repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
tdchaitanya/kornia | [
"6dd16563f66f979c7a95846ef86678894b7d54fd",
"6dd16563f66f979c7a95846ef86678894b7d54fd"
] | [
"kornia/filters/filter.py",
"test/color/test_normalize.py"
] | [
"from typing import Tuple, List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom kornia.filters.kernels import normalize_kernel2d\n\n\ndef compute_padding(kernel_size: Tuple[int, int]) -> List[int]:\n \"\"\"Computes padding tuple.\"\"\"\n # 4 ints: (padding_left, padding_right,padding_top,padding_bottom)\n # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad\n assert len(kernel_size) == 2, kernel_size\n computed = [(k - 1) // 2 for k in kernel_size]\n return [computed[1], computed[1], computed[0], computed[0]]\n\n\ndef filter2D(input: torch.Tensor, kernel: torch.Tensor,\n border_type: str = 'reflect',\n normalized: bool = False) -> torch.Tensor:\n r\"\"\"Function that convolves a tensor with a kernel.\n\n The function applies a given kernel to a tensor. The kernel is applied\n independently at each depth channel of the tensor. Before applying the\n kernel, the function applies padding according to the specified mode so\n that the output remains in the same shape.\n\n Args:\n input (torch.Tensor): the input tensor with shape of\n :math:`(B, C, H, W)`.\n kernel (torch.Tensor): the kernel to be convolved with the input\n tensor. The kernel shape must be :math:`(B, kH, kW)`.\n border_type (str): the padding mode to be applied before convolving.\n The expected modes are: ``'constant'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'reflect'``.\n normalized (bool): If True, kernel will be L1 normalized.\n\n Return:\n torch.Tensor: the convolved tensor of same size and numbers of channels\n as the input.\n \"\"\"\n if not isinstance(input, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n\n if not isinstance(kernel, torch.Tensor):\n raise TypeError(\"Input kernel type is not a torch.Tensor. Got {}\"\n .format(type(kernel)))\n\n if not isinstance(border_type, str):\n raise TypeError(\"Input border_type is not string. Got {}\"\n .format(type(kernel)))\n\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxCxHxW. Got: {}\"\n .format(input.shape))\n\n if not len(kernel.shape) == 3:\n raise ValueError(\"Invalid kernel shape, we expect BxHxW. Got: {}\"\n .format(kernel.shape))\n\n borders_list: List[str] = ['constant', 'reflect', 'replicate', 'circular']\n if border_type not in borders_list:\n raise ValueError(\"Invalid border_type, we expect the following: {0}.\"\n \"Got: {1}\".format(borders_list, border_type))\n\n # prepare kernel\n b, c, h, w = input.shape\n tmp_kernel: torch.Tensor = kernel.to(input.device).to(input.dtype)\n tmp_kernel = tmp_kernel.repeat(c, 1, 1, 1)\n if normalized:\n tmp_kernel = normalize_kernel2d(tmp_kernel)\n # pad the input tensor\n height, width = tmp_kernel.shape[-2:]\n padding_shape: List[int] = compute_padding((height, width))\n input_pad: torch.Tensor = F.pad(input, padding_shape, mode=border_type)\n\n # convolve the tensor with the kernel\n return F.conv2d(input_pad, tmp_kernel, padding=0, stride=1, groups=c)\n",
"import pytest\n\nimport kornia\nimport kornia.testing as utils # test utils\nfrom test.common import device_type\n\nimport torch\nfrom torch.autograd import gradcheck\nfrom torch.testing import assert_allclose\n\n\nclass TestNormalize:\n def test_smoke(self):\n mean = [0.5]\n std = [0.1]\n repr = \"Normalize(mean=[0.5], std=[0.1])\"\n assert str(kornia.color.Normalize(mean, std)) == repr\n\n def test_normalize(self):\n\n # prepare input data\n data = torch.ones(1, 2, 2)\n mean = torch.tensor([0.5])\n std = torch.tensor([2.0])\n\n # expected output\n expected = torch.tensor([0.25]).repeat(1, 2, 2).view_as(data)\n\n f = kornia.color.Normalize(mean, std)\n assert_allclose(f(data), expected)\n\n def test_broadcast_normalize(self):\n\n # prepare input data\n data = torch.ones(2, 3, 1, 1)\n data += 2\n\n mean = torch.tensor([2.0])\n std = torch.tensor([0.5])\n\n # expected output\n expected = torch.ones_like(data) + 1\n\n f = kornia.color.Normalize(mean, std)\n assert_allclose(f(data), expected)\n\n def test_float_input(self):\n\n data = torch.ones(2, 3, 1, 1)\n data += 2\n\n mean = 2.0\n std = 0.5\n\n # expected output\n expected = torch.ones_like(data) + 1\n\n f = kornia.color.Normalize(mean, std)\n assert_allclose(f(data), expected)\n\n def test_batch_normalize(self):\n\n # prepare input data\n data = torch.ones(2, 3, 1, 1)\n data += 2\n\n mean = torch.tensor([0.5, 1.0, 2.0]).repeat(2, 1)\n std = torch.tensor([2.0, 2.0, 2.0]).repeat(2, 1)\n # expected output\n expected = torch.tensor([1.25, 1, 0.5]).repeat(2, 1, 1).view_as(data)\n\n f = kornia.color.Normalize(mean, std)\n assert_allclose(f(data), expected)\n\n @pytest.mark.skip(reason=\"turn off all jit for a while\")\n def test_jit(self):\n @torch.jit.script\n def op_script(data: torch.Tensor, mean: torch.Tensor, std: torch.Tensor) -> torch.Tensor:\n return kornia.normalize(data, mean, std)\n\n data = torch.ones(2, 3, 1, 1)\n data += 2\n\n mean = torch.tensor([0.5, 1.0, 2.0]).repeat(2, 1)\n std = torch.tensor([2.0, 2.0, 2.0]).repeat(2, 1)\n\n actual = op_script(data, mean, std)\n expected = image.normalize(data, mean, std)\n assert_allclose(actual, expected)\n\n def test_gradcheck(self):\n\n # prepare input data\n data = torch.ones(2, 3, 1, 1)\n data += 2\n mean = torch.tensor([0.5, 1.0, 2.0]).double()\n std = torch.tensor([2.0, 2.0, 2.0]).double()\n\n data = utils.tensor_to_gradcheck_var(data) # to var\n\n assert gradcheck(kornia.color.Normalize(mean, std), (data,), raise_exception=True)\n"
] | [
[
"torch.nn.functional.conv2d",
"torch.nn.functional.pad"
],
[
"torch.ones",
"torch.ones_like",
"torch.testing.assert_allclose",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rryan/transformers | [
"f382a8decda82062bb6911f05b646f404eacfdd4"
] | [
"examples/run_glue.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept:\n from tensorboardX import SummaryWriter\n\nfrom tqdm import tqdm, trange\n\nfrom transformers import (WEIGHTS_NAME, BertConfig,\n BertForSequenceClassification, BertTokenizer,\n RobertaConfig,\n RobertaForSequenceClassification,\n RobertaTokenizer,\n XLMConfig, XLMForSequenceClassification,\n XLMTokenizer, XLNetConfig,\n XLNetForSequenceClassification,\n XLNetTokenizer,\n DistilBertConfig,\n DistilBertForSequenceClassification,\n DistilBertTokenizer)\n\nfrom transformers import AdamW, WarmupLinearSchedule\n\nfrom transformers import glue_compute_metrics as compute_metrics\nfrom transformers import glue_output_modes as output_modes\nfrom transformers import glue_processors as processors\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, \n RobertaConfig, DistilBertConfig)), ())\n\nMODEL_CLASSES = {\n 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),\n 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),\n 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\n 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\n 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n set_seed(args) # Added here for reproductibility (even between python 2 and 3)\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if args.model_type != 'distilbert':\n inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar('eval_{}'.format(key), value, global_step)\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == \"mnli\" else (args.output_dir,)\n\n results = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if args.model_type != 'distilbert':\n inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if args.output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(eval_task, preds, out_label_ids)\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return results\n\n\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n processor = processors[task]()\n output_mode = output_modes[task]\n # Load data features from cache or dataset file\n cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(\n 'dev' if evaluate else 'train',\n list(filter(None, args.model_name_or_path.split('/'))).pop(),\n str(args.max_seq_length),\n str(task)))\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1] \n examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)\n features = convert_examples_to_features(examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,\n )\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()))\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS))\n parser.add_argument(\"--task_name\", default=None, type=str, required=True,\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()))\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\", default=128, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--evaluate_during_training\", action='store_true',\n help=\"Rul evaluation during training at each logging step.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument('--logging_steps', type=int, default=50,\n help=\"Log every X updates steps.\")\n parser.add_argument('--save_steps', type=int, default=50,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\")\n parser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument('--overwrite_cache', action='store_true',\n help=\"Overwrite the cached training and evaluation sets\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\n parser.add_argument('--fp16_opt_level', type=str, default='O1',\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\")\n parser.add_argument('--server_ip', type=str, default='', help=\"For distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(args.output_dir))\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\n\n # Set seed\n set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)\n model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n model.to(args.device)\n\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else \"\"\n \n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, tokenizer, prefix=prefix)\n result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.load",
"numpy.squeeze",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.save",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.TensorDataset",
"torch.distributed.barrier",
"torch.tensor",
"numpy.argmax",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amanaster2/landlab | [
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c",
"ea17f8314eb12e3fc76df66c9b6ff32078caa75c"
] | [
"tests/components/erosion_deposition/test_general_erodep.py",
"landlab/components/profiler/base_profiler.py",
"tests/components/erosion_deposition/test_ero_dep_mass_conservation.py",
"tests/components/hand_calculator/test_hand.py",
"landlab/components/fracture_grid/fracture_grid.py",
"landlab/graph/dual.py",
"tests/components/flow_director/test_dinf.py",
"landlab/components/profiler/trickle_down_profiler.py",
"tests/graph/test_graph.py",
"landlab/components/pet/potential_evapotranspiration_field.py",
"tests/components/network_sediment_transporter/test_pulse_sediment.py"
] | [
"import numpy as np\nimport pytest\nfrom numpy import testing\n\nfrom landlab import RasterModelGrid\nfrom landlab.components import ErosionDeposition, FlowAccumulator\n\n\ndef test_Ff_too_high_vals():\n \"\"\"\n Test that instantiating ErosionDeposition with a F_f value > 1 throws a\n ValueError.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n with pytest.raises(ValueError):\n ErosionDeposition(\n mg,\n K=0.01,\n F_f=2.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n solver=\"basic\",\n )\n\n\ndef test_Ff_too_low_vals():\n \"\"\"\n Test that instantiating ErosionDeposition with a F_f value < 0 throws a\n ValueError.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n with pytest.raises(ValueError):\n ErosionDeposition(\n mg,\n K=0.01,\n F_f=-0.5,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n solver=\"basic\",\n )\n\n\ndef test_q_as_field():\n \"\"\"\n Test that passing in water discharge as a grid field results in self.q\n holding correct values\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n q = mg.add_zeros(\"user_imposed_discharge\", at=\"node\")\n q[:] += 1.0 # add 1.0 m3/yr of water\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n ed = ErosionDeposition(\n mg,\n K=0.01,\n F_f=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n discharge_field=\"user_imposed_discharge\",\n solver=\"basic\",\n )\n\n # ensure that ed._q is everywhere equal to 1.0 m3/yr.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n ed._q,\n err_msg=\"E/D discharge field test failed\",\n verbose=True,\n )\n\n\ndef test_q_as_array():\n \"\"\"\n Test that passing in water discharge as an array results in self.q\n holding correct values\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n q = np.zeros(mg.number_of_nodes)\n q[:] += 1.0 # add 1.0 m3/yr of water\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n ed = ErosionDeposition(\n mg,\n K=0.01,\n F_f=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n discharge_field=q,\n solver=\"basic\",\n )\n\n # ensure that ed._q is everywhere equal to 1.0 m3/yr.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n ed._q,\n err_msg=\"E/D discharge array test failed\",\n verbose=True,\n )\n\n\ndef test_sediment__flux_already_created():\n \"\"\"\n Test that an existing sediment flux grid field is not changed by\n instantiating ErosionDeposition.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n qs = mg.add_zeros(\"sediment__flux\", at=\"node\")\n qs[:] += 1.0 # add 1.0 m3/yr of flux\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n ed = ErosionDeposition(\n mg,\n K=0.01,\n F_f=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n solver=\"basic\",\n )\n\n # ensure that 'sediment__flux' field is everywhere equal to 1.0 m3/yr.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n ed._qs,\n err_msg=\"E/D sediment flux field test failed\",\n verbose=True,\n )\n",
"# coding: utf8\n# ! /usr/env/python\n\"\"\"Base class for profile constructors.\"\"\"\n\nfrom abc import ABC, abstractmethod\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nfrom landlab import Component\nfrom landlab.plot import imshow_grid\nfrom landlab.utils.return_array import return_array_at_node\n\n\ndef _recursive_max(jagged):\n \"\"\"\n Examples\n --------\n from landlab.components.profiler.base_profiler import _recursive_max\n >>> struct = [[1, 2, 3, 4],\n ... [[2, 3, 4, 5],\n ... [3, 4, 5, 6]],\n ... [4, 5, 6, 7]]\n >>> _recursive_max(struct)\n 7\n >>> _recursive_max([100])\n 100\n \"\"\"\n return max(_recursive_max(j) if hasattr(j, \"__iter__\") else j for j in jagged)\n\n\ndef _recursive_min(jagged):\n \"\"\"\n Examples\n --------\n from landlab.components.profiler.base_profiler import _recursive_min\n >>> struct = [[1, 2, 3, 4],\n ... [[2, 3, 4, 5],\n ... [3, 4, 5, 6]],\n ... [4, 5, 6, 7]]\n >>> _recursive_min(struct)\n 1\n >>> _recursive_min([100])\n 100\n \"\"\"\n return min(_recursive_min(j) if hasattr(j, \"__iter__\") else j for j in jagged)\n\n\nclass _BaseProfiler(ABC, Component):\n \"\"\"Base class to handle profilers.\n\n Primarily exists to handle plotting.\n \"\"\"\n\n _name = \"_BaseProfiler\"\n\n _unit_agnostic = True\n\n _info = {}\n\n def __init__(self, grid):\n super().__init__(grid)\n\n def run_one_step(self):\n \"\"\"Calculate the profile data structure and distances along it.\"\"\"\n # calculate the profile IDs data structure.\n self._create_profile_structure()\n\n @abstractmethod\n def _create_profile_structure(self):\n \"\"\"Private class for creating profile structure.\n\n Expectation is that this will be overridden to create the following\n three private attributes:\n\n self._nodes\n self._distance_along_profile\n\n are each lists of numpy arrays, one array per segment.\n\n self._colors\n\n is a list of RGBA tuples, one tuple per segment.\n\n The order of segments is expected to be consistent between each of the\n three data structures.\n \"\"\"\n ... # pragma: no cover\n\n @property\n def distance_along_profile(self):\n \"\"\"List of distances along profile for each segment.\n\n Examples\n --------\n >>> import numpy as np\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import (\n ... FastscapeEroder,\n ... FlowAccumulator,\n ... ChannelProfiler\n ... )\n >>> mg = RasterModelGrid((10, 10), xy_spacing=10)\n >>> np.random.seed(42)\n >>> z = mg.add_zeros('topographic__elevation', at='node')\n >>> z[mg.core_nodes] += np.random.randn(mg.core_nodes.size)\n >>> fa = FlowAccumulator(mg)\n >>> sp = FastscapeEroder(mg, K_sp=0.0001)\n >>> dt = 1000\n >>> for i in range(200):\n ... fa.run_one_step()\n ... sp.run_one_step(dt=dt)\n ... z[mg.core_nodes] += 0.001 * dt\n >>> profiler = ChannelProfiler(mg)\n >>> profiler.run_one_step()\n >>> profiler.distance_along_profile\n [array([ 0., 10., 20., 30., 40., 50.])]\n \"\"\"\n return self._distance_along_profile\n\n @property\n def nodes(self):\n \"\"\"List of node ids for each segment.\n\n Examples\n --------\n >>> import numpy as np\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import (\n ... FastscapeEroder,\n ... FlowAccumulator,\n ... ChannelProfiler\n ... )\n >>> mg = RasterModelGrid((10, 10), xy_spacing=10)\n >>> np.random.seed(42)\n >>> z = mg.add_zeros('topographic__elevation', at='node')\n >>> z[mg.core_nodes] += np.random.randn(mg.core_nodes.size)\n >>> fa = FlowAccumulator(mg)\n >>> sp = FastscapeEroder(mg, K_sp=0.0001)\n >>> dt = 1000\n >>> for i in range(200):\n ... fa.run_one_step()\n ... sp.run_one_step(dt=dt)\n ... z[mg.core_nodes] += 0.001 * dt\n >>> profiler = ChannelProfiler(mg)\n >>> profiler.run_one_step()\n >>> profiler.nodes\n [array([59, 58, 57, 56, 46, 45])]\n \"\"\"\n return self._nodes\n\n @property\n def colors(self):\n \"\"\"List of colors for each segment.\n\n Examples\n --------\n >>> import numpy as np\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import (\n ... FastscapeEroder,\n ... FlowAccumulator,\n ... ChannelProfiler\n ... )\n >>> mg = RasterModelGrid((10, 10), xy_spacing=10)\n >>> np.random.seed(42)\n >>> z = mg.add_zeros('topographic__elevation', at='node')\n >>> z[mg.core_nodes] += np.random.randn(mg.core_nodes.size)\n >>> fa = FlowAccumulator(mg)\n >>> sp = FastscapeEroder(mg, K_sp=0.0001)\n >>> dt = 1000\n >>> for i in range(200):\n ... fa.run_one_step()\n ... sp.run_one_step(dt=dt)\n ... z[mg.core_nodes] += 0.001 * dt\n >>> profiler = ChannelProfiler(mg)\n >>> profiler.run_one_step()\n >>> np.round(profiler.colors, decimals=2)\n array([[ 0.27, 0. , 0.33, 1. ]])\n \"\"\"\n return self._colors\n\n def plot_profiles(\n self,\n field=\"topographic__elevation\",\n xlabel=\"Distance Along Profile\",\n ylabel=\"Plotted Quantity\",\n title=\"Extracted Profiles\",\n color=None,\n ):\n \"\"\"Plot distance-upstream vs at at-node or size (nnodes,) quantity.\n\n Parameters\n ----------\n field : field name or nnode array\n Array of the at-node-field to plot against distance upstream.\n Default value is the at-node field 'topographic__elevation'.\n xlabel : str, optional\n X-axis label, default is \"Distance Along Profile\".\n ylabel : str, optional\n Y-axis label, default value is \"Plotted Quantity\".\n title : str, optional\n Plot title, default value is \"Extracted Profiles\".\n color : RGBA tuple or color string\n Color to use in order to plot all profiles the same color. Default\n is None, and the colors assigned to each profile are used.\n \"\"\"\n quantity = return_array_at_node(self._grid, field)\n\n # create segments the way that line collection likes them.\n segments = []\n qmin = []\n qmax = []\n for idx, nodes in enumerate(self._nodes):\n segments.append(\n list(zip(self._distance_along_profile[idx], quantity[nodes]))\n )\n qmin.append(min(quantity[nodes]))\n qmax.append(max(quantity[nodes]))\n\n # We need to set the plot limits.\n ax = plt.gca()\n ax.set_xlim(\n _recursive_min(self._distance_along_profile),\n _recursive_max(self._distance_along_profile),\n )\n ax.set_ylim(min(qmin), max(qmax))\n\n line_segments = LineCollection(segments)\n colors = color or self._colors\n line_segments.set_color(colors)\n ax.add_collection(line_segments)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(title)\n\n def plot_profiles_in_map_view(\n self, field=\"topographic__elevation\", endpoints_only=False, color=None, **kwds\n ):\n \"\"\"Plot profile locations in map view.\n\n Parameters\n ----------\n field : field name or nnode array\n Array of the at-node-field to plot as the 2D map values.\n Default value is the at-node field 'topographic__elevation'.\n endpoints_only : boolean\n Boolean where False (default) indicates every node along the\n profile is plotted, or True indicating only segment endpoints are\n plotted.\n color : RGBA tuple or color string\n Color to use in order to plot all profiles the same color. Default\n is None, and the colors assigned to each profile are used.\n **kwds : dictionary\n Keyword arguments to pass to imshow_grid.\n \"\"\"\n # make imshow_grid background\n imshow_grid(self._grid, field, **kwds)\n ax = plt.gca()\n\n # create segments the way that line collection likes them.\n segments = []\n for idx, nodes in enumerate(self._nodes):\n if endpoints_only:\n select_nodes = [nodes[0], nodes[-1]]\n segments.append(\n list(\n zip(\n self._grid.x_of_node[select_nodes],\n self._grid.y_of_node[select_nodes],\n )\n )\n )\n\n else:\n segments.append(\n list(zip(self._grid.x_of_node[nodes], self._grid.y_of_node[nodes]))\n )\n\n line_segments = LineCollection(segments)\n colors = color or self._colors\n line_segments.set_color(colors)\n ax.add_collection(line_segments)\n",
"import pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom landlab import RasterModelGrid\nfrom landlab.components import ErosionDeposition, FlowAccumulator, Space\n\n\[email protected]\ndef grid():\n grid = RasterModelGrid((10, 10), xy_spacing=10.0)\n grid.set_closed_boundaries_at_grid_edges(True, True, True, True)\n z = grid.add_zeros(\"node\", \"topographic__elevation\")\n grid.add_zeros(\"node\", \"soil__depth\")\n z += grid.x_of_node.copy() + grid.y_of_node.copy()\n z[25] -= 40\n z[35] -= 40\n z[26] -= 40\n z[36] -= 40\n z[24] -= 40\n z[34] -= 40\n\n return grid\n\n\n# consider full combinitorics of solver, two phi, ED and Space, and (if space)\n# initial soil depth of very large and zero.\[email protected](\"solver\", [\"basic\", \"adaptive\"])\[email protected](\"v_s\", [1.5])\[email protected](\"dt\", [2])\ndef test_mass_conserve_all_closed_ErosionDeposition(grid, solver, v_s, dt):\n z_init = grid.at_node[\"topographic__elevation\"].copy()\n\n fa = FlowAccumulator(grid)\n fa.run_one_step()\n\n ed = ErosionDeposition(grid, solver=solver, v_s=v_s)\n ed.run_one_step(dt)\n\n dz = z_init - grid.at_node[\"topographic__elevation\"]\n\n # For Erosion Deposition, porosity should not have any effect, because\n # the component operates in terms of bulk-equivalent sediment flux,\n # erosion, and deposition.\n\n assert_array_almost_equal(dz.sum(), 0.0, decimal=10)\n\n\[email protected](\"phi\", [0.0, 0.3])\[email protected](\"solver\", [\"basic\", \"adaptive\"])\[email protected](\"H\", [0, 1, 1000])\[email protected](\"v_s\", [1.5])\[email protected](\"H_star\", [0.1])\[email protected](\"dt\", [2])\ndef test_mass_conserve_all_closed_Space(grid, H, solver, phi, v_s, H_star, dt):\n grid.at_node[\"soil__depth\"][:] = H\n\n z_init = grid.at_node[\"topographic__elevation\"].copy()\n\n fa = FlowAccumulator(grid)\n fa.run_one_step()\n\n ed = Space(grid, solver=solver, phi=phi, v_s=v_s, H_star=H_star)\n ed.run_one_step(dt)\n\n # in space, everything is either bedrock or sediment. check for\n # conservation.\n dH = grid.at_node[\"soil__depth\"][:] - H\n\n # sediment is defined as having a porosity so all changes (up or down )\n # must be adjusted to mass.\n dH *= 1 - phi\n\n dBr = grid.at_node[\"bedrock__elevation\"] - (z_init - H)\n mass_change = dH + dBr\n\n assert_array_almost_equal(mass_change.sum(), 0.0, decimal=10)\n\n\n# Note that we can't make an equivalent test for with a depression finder yet\n# because the depression finder can't handle no outlet on the grid.\n# but what we can do is make an example in which there is a big sink in which\n# almost all sediment is trapped. We can then assert that all sediment is\n# either trapped OR that it is sent out of the one outlet node.\[email protected]()\ndef grid2(grid):\n grid.status_at_node[1] = grid.BC_NODE_IS_FIXED_VALUE\n return grid\n\n\n# consider full combinitorics of solver, two phi, depression finding or not,\n# ED and Space, and (if space) initial soil depth of very large and zero.\n\n\[email protected](\"depression_finder\", [None, \"DepressionFinderAndRouter\"])\[email protected](\"solver\", [\"basic\", \"adaptive\"])\[email protected](\"v_s\", [1.5])\[email protected](\"dt\", [2])\ndef test_mass_conserve_with_depression_finder_ErosionDeposition(\n grid2, solver, depression_finder, v_s, dt\n):\n assert grid2.status_at_node[1] == grid2.BC_NODE_IS_FIXED_VALUE\n\n z_init = grid2.at_node[\"topographic__elevation\"].copy()\n\n if depression_finder is None:\n fa = FlowAccumulator(grid2)\n else:\n fa = FlowAccumulator(grid2, depression_finder=depression_finder, routing=\"D4\")\n fa.run_one_step()\n\n ed = ErosionDeposition(grid2, solver=solver, v_s=v_s)\n ed.run_one_step(dt)\n\n dz = grid2.at_node[\"topographic__elevation\"] - z_init\n\n # assert that the mass loss over the surface is exported through the one\n # outlet.\n net_change = dz[grid2.core_nodes].sum() + (\n ed._qs_in[1] * dt / grid2.cell_area_at_node[11]\n )\n assert_array_almost_equal(net_change, 0.0, decimal=10)\n\n\[email protected](\"depression_finder\", [None, \"DepressionFinderAndRouter\"])\[email protected](\"phi\", [0.0, 0.3])\[email protected](\"solver\", [\"basic\", \"adaptive\"])\[email protected](\"H\", [0, 1000])\[email protected](\"v_s\", [1.5])\[email protected](\"H_star\", [0.1])\[email protected](\"dt\", [2])\ndef test_mass_conserve_with_depression_finder_Space(\n grid2, H, solver, depression_finder, phi, v_s, H_star, dt\n):\n grid2.at_node[\"soil__depth\"][:] = H\n assert grid2.status_at_node[1] == grid2.BC_NODE_IS_FIXED_VALUE\n\n z_init = grid2.at_node[\"topographic__elevation\"].copy()\n\n if depression_finder is None:\n fa = FlowAccumulator(grid2)\n else:\n fa = FlowAccumulator(grid2, depression_finder=depression_finder, routing=\"D4\")\n fa.run_one_step()\n\n ed = Space(grid2, solver=solver, phi=phi, v_s=v_s, H_star=H_star)\n ed.run_one_step(dt)\n\n # see above test for notes.\n dH = grid2.at_node[\"soil__depth\"][:] - H\n dH *= 1 - phi\n dBr = grid2.at_node[\"bedrock__elevation\"] - (z_init - H)\n mass_change = dH + dBr\n\n # assert that the mass loss over the surface is exported through the one\n # outlet.\n net_change = mass_change[grid2.core_nodes].sum() + (\n ed._qs_in[1] * dt / grid2.cell_area_at_node[11]\n )\n assert_array_almost_equal(net_change, 0.0, decimal=10)\n",
"import numpy as np\nimport pytest\n\nfrom landlab import RasterModelGrid\nfrom landlab.components import FlowAccumulator, HeightAboveDrainageCalculator\n\n\ndef test_route_to_multiple_error_raised():\n mg = RasterModelGrid((10, 10))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n z += mg.x_of_node + mg.y_of_node\n fa = FlowAccumulator(mg, flow_director=\"MFD\")\n fa.run_one_step()\n\n channel__mask = mg.zeros(at=\"node\")\n\n with pytest.raises(NotImplementedError):\n HeightAboveDrainageCalculator(mg, channel_mask=channel__mask)\n\n\ndef test_warn_drainage_pits():\n mg = RasterModelGrid((4, 4))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n elev = np.array([[2, 1, 1, 2], [3, 2, 2, 3], [4, 1, 3, 4], [5, 3, 4, 4]])\n z[:] = elev.reshape(len(z))\n\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n fa.run_one_step()\n\n channel__mask = mg.zeros(at=\"node\")\n channel__mask[[2, 6]] = 1\n hd = HeightAboveDrainageCalculator(mg, channel_mask=channel__mask)\n\n with pytest.warns(UserWarning):\n hd.run_one_step()\n",
"#! /usr/env/python\n\n\"\"\"Create 2D grid with randomly generated fractures.\n\nCreated: September 2013 by Greg Tucker\nLast significant modification: conversion to proper component 7/2019 GT\n\"\"\"\n\nimport numpy as np\n\nfrom landlab import Component\n\n\ndef _calc_fracture_starting_position(shape, seed):\n \"\"\"Choose a random starting position along the x or y axis (random choice).\n\n Parameters\n ----------\n shape : tuple of int\n Number of rows and columns in the grid\n seed : int\n Seeds the random number generator, so that a particular random\n sequence can be recreated.\n\n Returns\n -------\n (y, x) : tuple of int\n Fracture starting coordinates\n \"\"\"\n np.random.seed(seed)\n\n if np.random.randint(0, 1) == 0:\n x = 0\n y = np.random.randint(0, shape[0] - 1)\n else:\n x = np.random.randint(0, shape[1] - 1)\n y = 0\n return (y, x)\n\n\ndef _calc_fracture_orientation(coords, seed):\n \"\"\"Choose a random orientation for the fracture.\n\n Parameters\n ----------\n coords : tuple of int\n Starting coordinates (one of which should be zero) as *y*, *x*.\n seed : int\n Seed value for random number generator\n\n Returns\n -------\n ang : float\n Fracture angle relative to horizontal\n\n Notes\n -----\n If the fracture starts along the bottom of the grid (y=0), then the\n angle will be between 45 and 135 degrees from horizontal\n (counter-clockwise). Otherwise, it will be between -45 and 45 degrees.\n \"\"\"\n y, x = coords\n\n np.random.seed(seed)\n ang = (np.pi / 2) * np.random.rand()\n if y == 0:\n ang += np.pi / 4\n else:\n ang -= np.pi / 4\n\n return ang\n\n\ndef _calc_fracture_step_sizes(start_yx, ang):\n \"\"\"Calculate the sizes of steps dx and dy to be used when \"drawing\" the\n fracture onto the grid.\n\n Parameters\n ----------\n start_yx : tuple of int\n Starting grid coordinates\n ang : float\n Fracture angle relative to horizontal (radians)\n\n Returns\n -------\n (dy, dx) : tuple of float\n Step sizes in y and x directions. One will always be unity, and the\n other will always be <1.\n \"\"\"\n starty, startx = start_yx\n if startx == 0: # frac starts on left side\n dx = 1\n dy = np.tan(ang)\n else: # frac starts on bottom side\n dy = 1\n dx = -np.tan(ang - np.pi / 2)\n\n return (dy, dx)\n\n\ndef _trace_fracture_through_grid(m, start_yx, spacing):\n \"\"\"Create a 2D fracture in a grid.\n\n Creates a \"fracture\" in a 2D grid, m, by setting cell values to unity\n along the trace of the fracture (i.e., \"drawing\" a line throuh the\n grid).\n\n Parameters\n ----------\n m : 2D Numpy array\n Array that represents the grid\n start_yx : tuple of int\n Starting grid coordinates for fracture\n spacing : tuple of float\n Step sizes in y and x directions\n\n Returns\n -------\n None, but changes contents of m\n \"\"\"\n y0, x0 = start_yx\n dy, dx = spacing\n\n x = x0\n y = y0\n\n while (\n round(x) < np.size(m, 1)\n and round(y) < np.size(m, 0)\n and round(x) >= 0\n and round(y) >= 0\n ):\n m[int(y + 0.5)][int(x + 0.5)] = 1\n x += dx\n y += dy\n\n\nclass FractureGridGenerator(Component):\n\n \"\"\"Create a 2D grid with randomly generated fractures.\n\n The grid contains the value 1 where fractures (one cell wide) exist, and\n 0 elsewhere. The idea is to use this for simulations based on weathering\n and erosion of, and/or flow within, fracture networks.\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> grid = RasterModelGrid((5, 5))\n >>> fg = FractureGridGenerator(grid=grid, frac_spacing=3)\n >>> fg.run_one_step()\n >>> grid.at_node['fracture_at_node']\n array([1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], dtype=int8)\n\n Notes\n -----\n Potential improvements:\n\n - Fractures could be defined by links rather than nodes (i.e., return a\n link array with a code indicating whether the link crosses a fracture\n or not)\n - Fractures could have a finite length rather than extending all the way\n across the grid\n - Use of starting position along either x or y axis makes fracture net\n somewhat asymmetric. One would need a different algorithm to make it\n fully (statistically) symmetric.\n\n References\n ----------\n **Required Software Citation(s) Specific to this Component**\n\n None Listed\n\n **Additional References**\n\n None Listed\n\n \"\"\"\n\n _name = \"FractureGridGenerator\"\n\n _unit_agnostic = True\n\n _info = {\n \"fracture_at_node\": {\n \"dtype\": np.int8,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"node\",\n \"doc\": \"presence (1) or absence (0) of fracture\",\n }\n }\n\n def __init__(self, grid, frac_spacing=10.0, seed=0):\n \"\"\"Initialize the FractureGridGenerator.\n\n Parameters\n ----------\n frac_spacing : int, optional\n Average spacing of fractures (in grid cells) (default = 10)\n seed : int, optional\n Seed used for random number generator (default = 0)\n\n \"\"\"\n\n self._frac_spacing = frac_spacing\n self._seed = seed\n super().__init__(grid)\n\n if \"fracture_at_node\" not in grid.at_node:\n grid.add_zeros(\"node\", \"fracture_at_node\", dtype=np.int8)\n\n def run_one_step(self):\n \"\"\"Run FractureGridGenerator and create a random fracture grid.\"\"\"\n self._make_frac_grid(self._frac_spacing, self._seed)\n\n def _make_frac_grid(self, frac_spacing, seed):\n \"\"\"Create a grid that contains a network of random fractures.\n\n Creates a grid containing a network of random fractures, which are\n represented as 1's embedded in a grid of 0's. The grid is stored in\n the \"fracture_at_node\" field.\n\n Parameters\n ----------\n frac_spacing : int\n Average spacing of fractures (in grid cells)\n seed : int\n Seed used for random number generator\n \"\"\"\n # Make an initial grid of all zeros. If user specified a model grid,\n # use that. Otherwise, use the given dimensions.\n nr = self._grid.number_of_node_rows\n nc = self._grid.number_of_node_columns\n m = self._grid.at_node[\"fracture_at_node\"].reshape((nr, nc))\n\n # Add fractures to grid\n nfracs = (nr + nc) // frac_spacing\n for i in range(nfracs):\n\n (y, x) = _calc_fracture_starting_position((nr, nc), seed + i)\n ang = _calc_fracture_orientation((y, x), seed + i)\n (dy, dx) = _calc_fracture_step_sizes((y, x), ang)\n\n _trace_fracture_through_grid(m, (y, x), (dy, dx))\n",
"\"\"\"Define a graph of nodes-links-patches and its dual.\n\nThis class should not be used directly. Instead, it should be used as a\nbase class when defining other types of graphs.\n\"\"\"\nimport inspect\nfrom functools import lru_cache\n\nimport numpy as np\n\nfrom ..core.utils import as_id_array\nfrom .graph import Graph\nfrom .graph_convention import ConventionConverter\nfrom .sort.sort import reverse_one_to_one\n\n\nclass DualGraphMeta(type):\n def __init__(cls, name, bases, dct):\n type.__init__(cls, name, bases, dct)\n\n converter = ConventionConverter(\"cfc\")\n for name, prop in inspect.getmembers(cls, inspect.isdatadescriptor):\n new_name = converter.conform(name, \"nlp\")\n if hasattr(cls, new_name):\n continue\n\n fdoc = inspect.getdoc(prop)\n if fdoc:\n fdoc = inspect.cleandoc(\n \"\"\"{0}\n\n See Also\n --------\n Graph.{1}\n \"\"\".format(\n converter.conform(fdoc.splitlines()[0], \"nlp\"), name\n )\n )\n\n setattr(\n cls,\n new_name,\n property(lambda x, name=name: getattr(x._dual, name), None, None, fdoc),\n )\n\n\nclass DualGraph(metaclass=DualGraphMeta):\n @property\n def dual(self):\n return self._dual\n\n @property\n def node_at_cell(self):\n return self.ds[\"node_at_cell\"].values\n\n @property\n def nodes_at_face(self):\n return self.ds[\"nodes_at_face\"].values\n\n @property\n @lru_cache()\n def cell_at_node(self):\n return reverse_one_to_one(self.node_at_cell, minlength=self.number_of_nodes)\n\n @property\n @lru_cache()\n def link_at_face(self):\n return self._create_link_at_face()\n\n def _create_link_at_face(self):\n\n link_at_nodes = {}\n for link, pair in enumerate(self.nodes_at_link):\n # pair.sort()\n link_at_nodes[tuple(np.sort(pair))] = link\n\n link_at_face = np.full((self.number_of_faces,), -1, dtype=int)\n # for face, pair in enumerate(self._nodes_at_face):\n for face, pair in enumerate(self.nodes_at_face):\n # pair.sort()\n link_at_face[face] = link_at_nodes[tuple(np.sort(pair))]\n self._link_at_face = link_at_face\n\n return self._link_at_face\n\n @property\n @lru_cache()\n def face_at_link(self):\n return reverse_one_to_one(self.link_at_face, minlength=self.number_of_links)\n\n def sort(self):\n from .sort.ext.remap_element import remap_graph_element\n\n sorted_nodes, sorted_links, sorted_patches = Graph.sort(self)\n sorted_corners, sorted_faces, sorted_cells = self.dual.sort()\n\n with self.thawed():\n self.node_at_cell[:] = self.node_at_cell[sorted_cells]\n self.nodes_at_face[:] = self.nodes_at_face[sorted_faces]\n\n remap_graph_element(\n as_id_array(self.node_at_cell), as_id_array(np.argsort(sorted_nodes))\n )\n remap_graph_element(\n as_id_array(self.nodes_at_face).reshape((-1,)),\n as_id_array(np.argsort(sorted_nodes)),\n )\n",
"import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom landlab import RasterModelGrid, VoronoiDelaunayGrid\nfrom landlab.components import FlowAccumulator, FlowDirectorDINF\nfrom landlab.components.flow_director import flow_direction_dinf\n\n\ndef test_not_implemented_voroni():\n x = [0, 0.1, 0.2, 0.3, 1, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 2.3]\n y = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]\n vmg = VoronoiDelaunayGrid(x, y)\n with pytest.raises(NotImplementedError):\n flow_direction_dinf.flow_directions_dinf(vmg)\n\n\ndef test_D_infinity_low_closed_boundary_conditions():\n mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))\n z = np.array(\n [[0, 0, 0, 0], [0, 21, 10, 0], [0, 31, 20, 0], [0, 32, 30, 0], [0, 0, 0, 0]],\n dtype=\"float64\",\n )\n mg.add_field(\"topographic__elevation\", z, at=\"node\")\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n\n fd = FlowDirectorDINF(mg)\n fd.run_one_step()\n\n true_receivers = np.array(\n [\n [0, -1],\n [1, -1],\n [2, -1],\n [3, -1],\n [4, -1],\n [6, -1],\n [6, -1],\n [7, -1],\n [8, -1],\n [10, 6],\n [6, -1],\n [11, -1],\n [12, -1],\n [10, -1],\n [10, -1],\n [15, -1],\n [16, -1],\n [17, -1],\n [18, -1],\n [19, -1],\n ]\n )\n\n true_proportions = np.array(\n [\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [0.06058469, 0.93941531],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n ]\n )\n assert_array_equal(fd._receivers, true_receivers)\n assert_array_equal(\n np.round(fd._proportions, decimals=6), np.round(true_proportions, decimals=6)\n )\n\n\ndef test_D_infinity_open_boundary_conditions():\n mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))\n z = mg.x_of_node + 2.0 * mg.y_of_node\n mg.add_field(\"topographic__elevation\", z, at=\"node\")\n\n fd = FlowDirectorDINF(mg)\n fd.run_one_step()\n\n true_receivers = np.array(\n [\n [0, -1],\n [1, -1],\n [2, -1],\n [3, -1],\n [4, -1],\n [1, 0],\n [2, 1],\n [7, -1],\n [8, -1],\n [5, 4],\n [6, 5],\n [11, -1],\n [12, -1],\n [9, 8],\n [10, 9],\n [15, -1],\n [16, -1],\n [17, -1],\n [18, -1],\n [19, -1],\n ]\n )\n\n true_proportions = np.array(\n [\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [0.40966553, 0.59033447],\n [0.40966553, 0.59033447],\n [1.0, 0.0],\n [1.0, 0.0],\n [0.40966553, 0.59033447],\n [0.40966553, 0.59033447],\n [1.0, 0.0],\n [1.0, 0.0],\n [0.40966553, 0.59033447],\n [0.40966553, 0.59033447],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n [1.0, 0.0],\n ]\n )\n assert_array_equal(fd._receivers, true_receivers)\n assert_array_equal(\n np.round(fd._proportions, decimals=6), np.round(true_proportions, decimals=6)\n )\n\n\ndef test_D_infinity_flat():\n mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n\n fd = FlowDirectorDINF(mg)\n fd.run_one_step()\n\n node_ids = np.arange(mg.number_of_nodes)\n true_receivers = -1 * np.ones(fd._receivers.shape)\n true_receivers[:, 0] = node_ids\n\n true_proportions = np.zeros(fd._proportions.shape)\n true_proportions[:, 0] = 1\n\n assert_array_equal(fd._receivers, true_receivers)\n assert_array_equal(\n np.round(fd._proportions, decimals=6), np.round(true_proportions, decimals=6)\n )\n\n\ndef test_D_infinity_flat_closed_lower():\n mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n z[mg.core_nodes] += 1\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n\n fd = FlowDirectorDINF(mg)\n fd.run_one_step()\n\n node_ids = np.arange(mg.number_of_nodes)\n true_receivers = -1 * np.ones(fd._receivers.shape)\n true_receivers[:, 0] = node_ids\n\n true_proportions = np.zeros(fd._proportions.shape)\n true_proportions[:, 0] = 1\n\n assert_array_equal(fd._receivers, true_receivers)\n assert_array_equal(\n np.round(fd._proportions, decimals=6), np.round(true_proportions, decimals=6)\n )\n\n\ndef test_D_infinity_flat_closed_upper():\n mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n z[mg.core_nodes] -= 1\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n\n fd = FlowDirectorDINF(mg)\n fd.run_one_step()\n\n node_ids = np.arange(mg.number_of_nodes)\n true_receivers = -1 * np.ones(fd._receivers.shape)\n true_receivers[:, 0] = node_ids\n\n true_proportions = np.zeros(fd._proportions.shape)\n true_proportions[:, 0] = 1\n\n assert_array_equal(fd._receivers, true_receivers)\n assert_array_equal(\n np.round(fd._proportions, decimals=6), np.round(true_proportions, decimals=6)\n )\n\n\ndef test_D_infinity_SW_slope():\n mg = RasterModelGrid((10, 10))\n mg.add_field(\"topographic__elevation\", mg.node_y + mg.node_x, at=\"node\")\n fa = FlowAccumulator(mg, flow_director=\"FlowDirectorDINF\")\n fa.run_one_step()\n\n # this one should all flow to the soutwest (third column of diagonal neighbors at node)\n node_ids = np.arange(mg.number_of_nodes)\n sw_diags = mg.diagonal_adjacent_nodes_at_node[:, 2]\n true_receivers = -1 * np.ones(fa.flow_director._receivers.shape)\n true_receivers[:, 0] = sw_diags\n true_receivers[mg.boundary_nodes, 0] = node_ids[mg.boundary_nodes]\n\n true_proportions = np.zeros(fa.flow_director._proportions.shape)\n true_proportions[:, 0] = 1\n\n assert_array_equal(true_receivers, fa.flow_director._receivers)\n assert_array_equal(true_proportions, fa.flow_director._proportions)\n\n\ndef test_D_infinity_WSW_slope():\n mg = RasterModelGrid((10, 10))\n mg.add_field(\n \"topographic__elevation\", mg.node_y * (2 ** 0.5 - 1.0) + mg.node_x, at=\"node\"\n )\n fa = FlowAccumulator(mg, flow_director=\"FlowDirectorDINF\")\n fa.run_one_step()\n\n # this one should flow equally to west and southwest.\n node_ids = np.arange(mg.number_of_nodes)\n sw_diags = mg.diagonal_adjacent_nodes_at_node[:, 2]\n w_links = mg.adjacent_nodes_at_node[:, 2]\n true_receivers = -1 * np.ones(fa.flow_director._receivers.shape)\n true_receivers[mg.core_nodes, 0] = w_links[mg.core_nodes]\n true_receivers[mg.core_nodes, 1] = sw_diags[mg.core_nodes]\n true_receivers[mg.boundary_nodes, 0] = node_ids[mg.boundary_nodes]\n\n true_proportions = np.zeros(fa.flow_director._proportions.shape)\n true_proportions[mg.boundary_nodes, 0] = 1\n true_proportions[mg.core_nodes, 0] = 0.5\n true_proportions[mg.core_nodes, 1] = 0.5\n\n assert_array_equal(true_receivers, fa.flow_director._receivers)\n",
"# coding: utf8\n# ! /usr/env/python\n\"\"\"trickle_down_profiler.py component to create channel profiles.\"\"\"\nfrom collections import OrderedDict\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\nfrom landlab.components.profiler.base_profiler import _BaseProfiler\nfrom landlab.core.utils import as_id_array\nfrom landlab.utils.flow__distance import calculate_flow__distance\n\n\nclass TrickleDownProfiler(_BaseProfiler):\n \"\"\"Extract and a profile from one or more node IDs to their downstream termini.\n\n The TrickleDownProfiler extracts channel networks from a landlab grid.\n Unlike the ChannelProfiler which starts at one or more watershed outlets\n and works upstream until it reaches the end of the channel (based on\n a specified threshold, such as drainage area) the TrickleDownProfiler\n starts at a *starting node* and works its way downhill until it reaches\n an outlet or sink.\n\n In order to follow the channel network, the flow connectivity across the\n grid must already be identified. This is typically done with the\n FlowAccumulator component. However, this component does not require that the\n FlowAccumulator was used. Instead it expects that the following at-node\n grid fields will be present:\n ::\n\n 'flow__receiver_node'\n 'flow__link_to_receiver_node'\n\n The TrickleDownProfiler can work on grids that have used route-to-one or\n route-to-multiple flow directing.\n\n To understand how this component works it is useful to define the following\n terms: *outlet*, *starting node*, and *segment*.\n\n Consider the following grid with 10 columns and 7 rows. ``@`` represents\n the *starting node*, ``.`` represents the nodes downstream, and the\n watershed outlet node is indicated by ``o``.\n\n In this and the following examples, we will use only D4 connectivity. The\n ChannelProfiler, however, knows nothing of connectivity other than what is\n implied by the two required grid fields.\n ::\n\n X X X X X X X X X X\n X X X X X X X X X X\n X X X X X X . . @ X\n X X X X X X . X X X\n X X X . . . . X X X\n X X X . X X X X X X\n X X X o X X X X X X\n\n For each starting node, the TrickleDownProfiler follows the network\n downstream until it reaches the outlet or sink. One or more starting nodes\n can be used, depending on a user's needs.\n\n The node IDs and distances upstream of the channel network are stored in\n ``data_structure``. It is a dictionary with keys indicating the starting\n node.\n\n For each starting node, the value in the ``data_structure`` is itself\n a dictionary with keys that are a segment ID tuple of the\n ``(dowstream, upstream)`` nodes IDs of each channel segment.\n\n For our simple example, these are the node IDs:\n ::\n\n X X X X X X X X X X\n X X X X X X X X X X\n X X X X X X 46 47 48 X\n X X X X X X 36 X X X\n X X X 23 24 25 26 X X X\n X X X 13 X X X X X X\n X X X 3 X X X X X X\n\n The starting node is 48 and the outlet node is 3.\n\n The value associated with the segment ID tuple ``(3, 48)`` is itself a\n dictionary. It has three key-value pairs. First, ``\"ids\"`` contains a list\n of the segment node ids ordered from downstream to upstream. It includes\n the endpoints. Second, ``\"distances\"`` contains a list of distances\n upstream that mirrors the list in ``\"ids\"``. Finally, ``\"color\"`` is an\n RGBA tuple indicating the color for the segment.\n\n By default a unique color will be assigned to each starting node. To change\n the color, a user can change values stored in ``data_structure``.\n Additionally, a ``cmap`` keyword argument can provide some user control\n over the color at the instantiation of the component.\n\n For example with a starting node of 48, the data structure will look as\n follows:\n\n .. code-block:: python\n\n {48: {\n (3, 48) : {\n \"ids\": [3, 13, 23, 24, 25, 26, 36, 46, 47, 48],\n \"distances\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n \"color\": (1, 0, 1, 1),\n }\n }\n }\n\n Note that the distances upstream are relative to the outlet.\n\n Next consider an example with two starting nodes, each noted with an ``@``.\n ::\n\n X X X X X X X X X X\n X X X X X X @ X X X\n X X X X @ X . X X X\n o . . . . X . X X X\n X X X X X X . X X X\n X X X X X X . X X X\n X X X X X X . . . X\n X X X X X X X X o X\n\n And the following node IDs.\n ::\n\n X X X X X X X X X X\n X X X X X X 66 X X X\n X X X X 54 X 56 X X X\n 40 41 42 43 44 X 46 X X X\n X X X X X X 36 X X X\n X X X X X X 26 X X X\n X X X X X X 16 17 18 X\n X X X X X X X X 8 X\n\n With our starting nodes of 54 and 66 our data structure will look like.\n\n .. code-block:: python\n\n {54: {\n (40, 54) : {\n \"ids\": [40, 41, 42, 43, 44, 54],\n \"distances\": [0, 1, 3, 4, 5, 6],\n \"color\": [ 0.27, 0. , 0.33, 1. ],\n },\n }\n 66: {\n (8, 66) : {\n \"ids\": [8, 18, 17, 16, 26, 36, 46, 56, 66],\n \"distances\": [0, 1, 2, 3, 4, 5, 6, 7, 8],\n \"color\": [ 0.13, 0.57, 0.55, 1. ],\n },\n }\n\n Examples\n --------\n\n Start by importing necessary modules\n\n >>> import numpy as np\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import FlowAccumulator, TrickleDownProfiler\n\n Create the second example grid we showed above. Note that in order to do\n this we need to enter the elevations starting from the lower left so the\n elevation order may seem upside-down. In addition, in this example,\n elevation is only provided along the profiles. The third line of code below\n sets all nodes with a value of zero to closed, such that these nodes are\n igored.\n >>> z = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n ... 0, 0, 0, 0, 0, 0, 4, 3, 2, 0,\n ... 0, 0, 0, 8, 7, 6, 5, 0, 0, 0,\n ... 0, 0, 0, 0, 0, 0, 6, 0, 0, 0,\n ... 1, 3, 4, 5, 6, 0, 7, 0, 0, 0,\n ... 0, 4, 0, 0, 7, 0, 8, 0, 0, 0,\n ... 0, 5, 6, 0, 0, 0, 9, 0, 0, 0,\n ... 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,], dtype=float)\n\n >>> mg = RasterModelGrid((8, 10))\n >>> z = mg.add_field(\"topographic__elevation\", z, at=\"node\")\n >>> mg.set_nodata_nodes_to_closed(z, 0)\n >>> fa = FlowAccumulator(mg, flow_director='D4')\n >>> fa.run_one_step()\n >>> fa.node_drainage_area.reshape(mg.shape)\n array([[ 0., 0., 0., 0., 0., 0., 0., 0., 11., 0.],\n [ 0., 0., 0., 0., 0., 0., 9., 10., 11., 0.],\n [ 0., 0., 0., 1., 2., 3., 8., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 4., 0., 0., 0.],\n [ 8., 8., 4., 3., 2., 0., 3., 0., 0., 0.],\n [ 0., 3., 0., 0., 1., 0., 2., 0., 0., 0.],\n [ 0., 2., 1., 0., 0., 0., 1., 0., 0., 0.],\n [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n\n >>> profiler = TrickleDownProfiler(\n ... mg,\n ... starting_nodes=[54, 66])\n >>> profiler.run_one_step()\n\n The keys of the property ``data_structure`` are the IDs of the two outlet\n nodes.\n\n >>> profiler.data_structure.keys()\n odict_keys([54, 66])\n\n Within the data structure, the value at key 54, is a dictionary of the\n one segment, each specified by a ``(dowstream, upstream)`` tuple:\n\n >>> profiler.data_structure[54].keys()\n dict_keys([(40, 54)])\n\n The value of the segment between nodes 40 and 54 has the following\n components:\n\n >>> profiler.data_structure[54][(40, 54)][\"ids\"]\n array([40, 41, 42, 43, 44, 54])\n >>> profiler.data_structure[54][(40, 54)][\"distances\"]\n array([ 0., 1., 2., 3., 4., 5.])\n >>> np.round(profiler.data_structure[54][(40, 54)][\"color\"], decimals=2)\n array([ 0.27, 0. , 0.33, 1. ])\n\n The rest of the ``profile_structure`` encodes information about the second\n profile which starts at node 66.\n\n >>> profiler.data_structure[66].keys()\n dict_keys([(8, 66)])\n\n >>> profiler.data_structure[66][(8, 66)][\"ids\"]\n array([ 8, 18, 17, 16, 26, 36, 46, 56, 66])\n >>> profiler.data_structure[66][(8, 66)][\"distances\"]\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])\n >>> np.round(profiler.data_structure[66][(8, 66)][\"color\"], decimals=2)\n array([ 0.13, 0.57, 0.55, 1. ])\n\n\n References\n ----------\n **Required Software Citation(s) Specific to this Component**\n\n None Listed\n\n **Additional References**\n\n None Listed\n\n \"\"\"\n\n _name = \"TrickleDownProfiler\"\n\n _unit_agnostic = True\n\n _info = {\n \"flow__link_to_receiver_node\": {\n \"dtype\": int,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"node\",\n \"doc\": \"ID of link downstream of each node, which carries the discharge\",\n },\n \"flow__receiver_node\": {\n \"dtype\": int,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"node\",\n \"doc\": \"Node array of receivers (node that receives flow from current node)\",\n },\n }\n\n def __init__(\n self,\n grid,\n starting_nodes=None,\n cmap=\"viridis\",\n ):\n \"\"\"\n Parameters\n ----------\n grid : Landlab Model Grid instance\n starting_nodes : iterable\n cmap : str, optional\n A valid matplotlib cmap string. Default is \"viridis\".\n\n \"\"\"\n super().__init__(grid)\n\n self._cmap = plt.get_cmap(cmap)\n\n self._flow_receiver = grid.at_node[\"flow__receiver_node\"]\n self._starting_nodes = starting_nodes\n\n @property\n def data_structure(self):\n \"\"\"OrderedDict defining the trickle down network.\n\n The IDs and upstream distance of the channel network nodes are stored\n in ``data_structure``. It is a dictionary with keys of the outlet node\n ID.\n\n For each starting node, the value in the ``data_structure`` is\n itself a dictionary with keys that are a segment ID tuple of the\n ``(dowstream, upstream)`` nodes IDs of each channel segment.\n\n The value associated with the segment ID tuple\n ``(dowstream, upstream)`` is itself a dictionary. It has three\n key-value pairs. First, ``\"ids\"`` contains a list of the segment node\n IDs ordered from downstream to upstream. It includes the endpoints.\n Second, ``\"distances\"`` contains a list of distances upstream that\n mirrors the list in ``\"ids\"``. Finally, ``\"color\"`` is an RGBA tuple\n indicating the color for the segment.\n \"\"\"\n return self._data_struct\n\n def _create_profile_structure(self):\n \"\"\"Create the profile_IDs data structure for channel network.\n\n The bound attribute self._profile structure is the channel segment\n datastructure. Profile structure is a list of length\n starting_nodes. Each element of profile_structure is itself a\n list of length number of stream segments that drain to each of the\n starting nodes. Each stream segment list contains the node ids of a\n stream segment from downstream to upstream.\n \"\"\"\n self._data_struct = OrderedDict()\n\n for i in self._starting_nodes:\n channel_segment = []\n current_node = i\n\n # march downstream\n while self._flow_receiver[current_node] != current_node:\n channel_segment.append(current_node)\n current_node = self._flow_receiver[current_node]\n channel_segment.append(current_node)\n\n channel_segment.reverse()\n segment_tuple = (current_node, i)\n self._data_struct[i] = {\n segment_tuple: {\"ids\": as_id_array(channel_segment)}\n }\n\n self._calculate_distances()\n self.assign_colors()\n self._create_flat_structures()\n\n def _create_flat_structures(self):\n \"\"\"Create expected flattened structures for ids, distances, and colors.\"\"\"\n self._nodes = []\n\n self._distance_along_profile = []\n self._colors = []\n\n for outlet_id in self._data_struct:\n seg_tuples = self._data_struct[outlet_id].keys()\n self._nodes.extend(\n [self._data_struct[outlet_id][seg][\"ids\"] for seg in seg_tuples]\n )\n self._distance_along_profile.extend(\n [self._data_struct[outlet_id][seg][\"distances\"] for seg in seg_tuples]\n )\n self._colors.extend(\n [self._data_struct[outlet_id][seg][\"color\"] for seg in seg_tuples]\n )\n\n def assign_colors(self, color_mapping=None):\n \"\"\"Assign a unique color for each starting node.\n\n Parameters\n ----------\n color_mapping : str\n Color map name.\n \"\"\"\n\n if color_mapping is None:\n num_watersheds = len(self._data_struct)\n norm = mpl.colors.Normalize(vmin=0, vmax=num_watersheds)\n mappable = cm.ScalarMappable(norm=norm, cmap=self._cmap)\n color_mapping = {\n outlet_id: mappable.to_rgba(idx)\n for idx, outlet_id in enumerate(self._data_struct)\n }\n\n for outlet_id in self._data_struct:\n for segment_tuple in self._data_struct[outlet_id]:\n self._data_struct[outlet_id][segment_tuple][\"color\"] = color_mapping[\n outlet_id\n ]\n\n def _calculate_distances(self):\n \"\"\"Get distances along the network data structure.\"\"\"\n distance_upstream = calculate_flow__distance(self._grid)\n for outlet_id in self._data_struct:\n\n for segment_tuple in self._data_struct[outlet_id]:\n ids = self._data_struct[outlet_id][segment_tuple][\"ids\"]\n d = distance_upstream[ids]\n self._data_struct[outlet_id][segment_tuple][\"distances\"] = d\n",
"import pytest\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\n\nfrom landlab.graph import Graph\n\nr\"\"\"\nFor these tests the nodes are given column-by-column::\n\n (1) -- (3) --- (4)\n | | /\n (0) | /\n \\ | /\n (2)\n\nOnce sorted, the node numbering becomes::\n\n (2) -- (3) --- (4)\n | | /\n (1) | /\n \\ | /\n (0)\n\"\"\"\nNODE_X = (0, 0, 1, 1, 2, 2)\nNODE_Y = (0, 1, 0, 1, 1, 0)\nNODES_AT_LINK = ((0, 1), (0, 2), (1, 3), (2, 3), (2, 5), (3, 4), (4, 5))\nLINKS_AT_PATCH = ((3, 2, 0, 1), (3, 5, 6, 4))\n\n# NODE_X = (0, 0, 1, 1, 2)\n# NODE_Y = (1, 2, 0, 2, 2)\n# NODES_AT_LINK = ((0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4))\n# LINKS_AT_PATCH = ((3, 2, 0, 1), (3, 5, 4))\n# LINKS_AT_PATCH = ([3, 2, 0, 1, 3, 5, 4], [4, 3])\n\n\ndef test_graph_is_frozen():\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n\n assert_array_equal(\n graph.nodes_at_link, [[0, 1], [1, 2], [0, 3], [1, 4], [2, 5], [3, 4], [4, 5]]\n )\n\n with pytest.raises(ValueError):\n graph.nodes_at_link[0] = [1, 0]\n\n\ndef test_graph_can_thaw():\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n\n assert_array_equal(\n graph.nodes_at_link, [[0, 1], [1, 2], [0, 3], [1, 4], [2, 5], [3, 4], [4, 5]]\n )\n\n with graph.thawed():\n graph.nodes_at_link[0] = [1, 0]\n assert_array_equal(\n graph.nodes_at_link, [[1, 0], [1, 2], [0, 3], [1, 4], [2, 5], [3, 4], [4, 5]]\n )\n\n\ndef test_create_graph_with_nodes():\n \"\"\"Create a graph of unconnected nodes.\"\"\"\n graph = Graph((NODE_Y, NODE_X), sort=True)\n\n assert_array_almost_equal(graph.x_of_node, [0.0, 1.0, 2.0, 0.0, 1.0, 2.0])\n assert_array_almost_equal(graph.y_of_node, [0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n\n\ndef test_create_graph_with_links():\n \"\"\"Create a graph of connected nodes.\"\"\"\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n\n assert_array_almost_equal(graph.x_of_node, [0.0, 1.0, 2.0, 0.0, 1.0, 2.0])\n assert_array_almost_equal(graph.y_of_node, [0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n assert_array_almost_equal(\n graph.nodes_at_link, [[0, 1], [1, 2], [0, 3], [1, 4], [2, 5], [3, 4], [4, 5]]\n )\n\n\[email protected](\"numpy bug?\")\ndef test_graph_nodes_property():\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n assert_array_equal(graph.nodes, [0, 1, 2, 3, 4, 5])\n with pytest.raises(ValueError):\n graph.nodes[0] = 99\n\n\ndef test_graph_link_heads():\n \"\"\"Test nodes at link heads.\"\"\"\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n\n assert_array_equal(graph.node_at_link_head, [1, 2, 3, 4, 5, 4, 5])\n\n\ndef test_graph_link_tails():\n \"\"\"Test nodes at link tails.\"\"\"\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n\n assert_array_equal(graph.node_at_link_tail, [0, 1, 0, 1, 2, 3, 4])\n\n\ndef test_graph_links_at_node():\n \"\"\"Test links at nodes without rotational sorting.\"\"\"\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n\n assert_array_equal(\n graph.links_at_node,\n [[0, 2, -1], [1, 3, 0], [4, 1, -1], [5, 2, -1], [6, 5, 3], [6, 4, -1]],\n )\n\n\ndef test_graph_link_dirs_at_node():\n \"\"\"Test links directions at nodes without rotational sorting.\"\"\"\n graph = Graph((NODE_Y, NODE_X), links=NODES_AT_LINK, sort=True)\n\n assert_array_equal(\n graph.link_dirs_at_node,\n [[-1, -1, 0], [-1, -1, 1], [-1, 1, 0], [-1, 1, 0], [-1, 1, 1], [1, 1, 0]],\n )\n\n\ndef test_links_at_patch_ccw():\n \"\"\"Test links at patch with rotational sorting.\"\"\"\n graph = Graph(\n (NODE_Y, NODE_X), links=NODES_AT_LINK, patches=LINKS_AT_PATCH, sort=True\n )\n\n assert_array_equal(graph.links_at_patch, [[3, 5, 2, 0], [4, 6, 3, 1]])\n\n\ndef test_nodes_at_patch_ccw():\n \"\"\"Test nodes at patch with rotational sorting.\"\"\"\n graph = Graph(\n (NODE_Y, NODE_X), links=NODES_AT_LINK, patches=LINKS_AT_PATCH, sort=True\n )\n\n assert_array_equal(graph.nodes_at_patch, [[4, 3, 0, 1], [5, 4, 1, 2]])\n",
"import numpy as np\n\nfrom landlab import Component\n\n_VALID_METHODS = set([\"Constant\", \"PriestleyTaylor\", \"MeasuredRadiationPT\", \"Cosine\"])\n\n\ndef _assert_method_is_valid(method):\n if method not in _VALID_METHODS:\n raise ValueError(\"%s: Invalid method name\" % method)\n\n\nclass PotentialEvapotranspiration(Component):\n\n \"\"\"\n Potential Evapotranspiration Component calculates spatially distributed\n potential evapotranspiration based on input radiation factor (spatial\n distribution of incoming radiation) using chosen method such as constant\n or Priestley Taylor. Ref: Xiaochi et. al. 2013 for 'Cosine' method and\n ASCE-EWRI Task Committee Report Jan 2005 for 'PriestleyTaylor' method.\n Note: Calling 'PriestleyTaylor' method would generate/overwrite shortwave &\n longwave radiation fields.\n\n .. codeauthor:: Sai Nudurupati and Erkan Istanbulluoglu\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> from landlab.components.pet import PotentialEvapotranspiration\n\n >>> grid = RasterModelGrid((5, 4), xy_spacing=(0.2, 0.2))\n >>> grid['cell']['radiation__ratio_to_flat_surface'] = np.array([\n ... 0.38488566, 0.38488566,\n ... 0.33309785, 0.33309785,\n ... 0.37381705, 0.37381705])\n >>> PET = PotentialEvapotranspiration(grid)\n >>> PET.name\n 'PotentialEvapotranspiration'\n >>> PET.input_var_names\n ('radiation__ratio_to_flat_surface',)\n >>> sorted(PET.output_var_names)\n ['radiation__incoming_shortwave_flux',\n 'radiation__net_flux',\n 'radiation__net_longwave_flux',\n 'radiation__net_shortwave_flux',\n 'surface__potential_evapotranspiration_rate']\n >>> sorted(PET.units) # doctest: +NORMALIZE_WHITESPACE\n [('radiation__incoming_shortwave_flux', 'W/m^2'),\n ('radiation__net_flux', 'W/m^2'),\n ('radiation__net_longwave_flux', 'W/m^2'),\n ('radiation__net_shortwave_flux', 'W/m^2'),\n ('radiation__ratio_to_flat_surface', 'None'),\n ('surface__potential_evapotranspiration_rate', 'mm')]\n >>> PET.grid.number_of_cell_rows\n 3\n >>> PET.grid.number_of_cell_columns\n 2\n >>> PET.grid is grid\n True\n >>> pet_rate = grid.at_cell['surface__potential_evapotranspiration_rate']\n >>> np.allclose(pet_rate, 0.)\n True\n >>> PET.current_time = 0.5\n >>> PET.update()\n >>> np.allclose(pet_rate, 0.)\n False\n\n References\n ----------\n **Required Software Citation(s) Specific to this Component**\n\n None Listed\n\n **Additional References**\n\n ASCE-EWRI: The ASCE standardized reference evapotranspiration equation, in:\n Standardization of Reference Evapotranspiration Task Committee Final Report,\n edited by: Allen, R. G., Walter, I. A., Elliot, R. L., Howell, T. A.,\n Itenfisu, D., Jensen, M. E., and Snyder, R. L., Technical Committee report\n to the Environmental and Water Resources Institute of the American Society\n of Civil Engineers from the Task Committee on Standardization of Reference\n Evapotranspiration, Reston, VA, USA, 2005.\n\n Zhou, X., Istanbulluoglu, E., and Vivoni, E. R.: Modeling the\n ecohydrological role of aspect-controlled radiation on tree-grass-shrub\n coexistence in a semiarid climate, Water Resour. Res., 49, 2872– 2895,\n doi:10.1002/wrcr.20259, 2013.\n\n \"\"\"\n\n _name = \"PotentialEvapotranspiration\"\n\n _unit_agnostic = False\n\n _info = {\n \"radiation__incoming_shortwave_flux\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"W/m^2\",\n \"mapping\": \"cell\",\n \"doc\": \"total incident shortwave radiation over the time step\",\n },\n \"radiation__net_flux\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"W/m^2\",\n \"mapping\": \"cell\",\n \"doc\": \"net total radiation over the time step\",\n },\n \"radiation__net_longwave_flux\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"W/m^2\",\n \"mapping\": \"cell\",\n \"doc\": \"net incident longwave radiation over the time step\",\n },\n \"radiation__net_shortwave_flux\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"W/m^2\",\n \"mapping\": \"cell\",\n \"doc\": \"net incident shortwave radiation over the time step\",\n },\n \"radiation__ratio_to_flat_surface\": {\n \"dtype\": float,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"None\",\n \"mapping\": \"cell\",\n \"doc\": \"ratio of total incident shortwave radiation on sloped surface to flat surface\",\n },\n \"surface__potential_evapotranspiration_rate\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"mm\",\n \"mapping\": \"cell\",\n \"doc\": \"potential sum of evaporation and potential transpiration\",\n },\n }\n\n def __init__(\n self,\n grid,\n method=\"Cosine\",\n priestley_taylor_const=1.26,\n albedo=0.6,\n latent_heat_of_vaporization=28.34,\n psychometric_const=0.066,\n stefan_boltzmann_const=0.0000000567,\n solar_const=1366.67,\n latitude=34.0,\n elevation_of_measurement=300,\n adjustment_coeff=0.18,\n lt=0.0,\n nd=365.0,\n MeanTmaxF=12.0,\n delta_d=5.0,\n current_time=None,\n const_potential_evapotranspiration=12.0,\n Tmin=0.0,\n Tmax=1.0,\n Tavg=0.5,\n obs_radiation=350.0,\n ):\n \"\"\"\n Parameters\n ----------\n grid: RasterModelGrid\n A grid.\n method: {'Constant', 'PriestleyTaylor', 'MeasuredRadiationPT', 'Cosine'}, optional\n Priestley Taylor method will spit out radiation outputs too.\n priestley_taylor_constant: float, optional\n Alpha used in Priestley Taylor method.\n albedo: float, optional\n Albedo.\n latent_heat_of_vaporization: float, optional\n Latent heat of vaporization for water Pwhv (Wd/(m*mm^2)).\n psychometric_const: float, optional\n Psychometric constant (kPa (deg C)^-1).\n stefan_boltzmann_const: float, optional\n Stefan Boltzmann's constant (W/(m^2K^-4)).\n solar_const: float, optional\n Solar constant (W/m^2).\n latitude: float, optional\n Latitude (radians).\n elevation_of_measurement: float, optional\n Elevation at which measurement was taken (m).\n adjustment_coeff: float, optional\n adjustment coeff to predict Rs from air temperature (deg C)^-0.5.\n lt: float, optional\n lag between peak TmaxF and solar forcing (days).\n nd: float, optional\n Number of days in year (days).\n MeanTmaxF: float, optional\n Mean annual rate of TmaxF (mm/d).\n delta_d: float, optional\n Calibrated difference between max & min daily TmaxF (mm/d).\n current_time: float, required only for 'Cosine' method\n Current time (Years)\n const_potential_evapotranspiration: float, optional for\n 'Constant' method\n Constant PET value to be spatially distributed.\n Tmin: float, required for 'Priestley Taylor' method\n Minimum temperature of the day (deg C)\n Tmax: float, required for 'Priestley Taylor' method\n Maximum temperature of the day (deg C)\n Tavg: float, required for 'Priestley Taylor' and 'MeasuredRadiationPT'\n methods\n Average temperature of the day (deg C)\n obs_radiation float, required for 'MeasuredRadiationPT' method\n Observed radiation (W/m^2)\n \"\"\"\n super().__init__(grid)\n\n self.current_time = current_time\n self.const_potential_evapotranspiration = const_potential_evapotranspiration\n self.Tmin = Tmin\n self.Tmax = Tmax\n self.Tavg = Tavg\n self.obs_radiation = obs_radiation\n\n self._method = method\n # For Priestley Taylor\n self._alpha = priestley_taylor_const\n self._a = albedo\n self._pwhv = latent_heat_of_vaporization\n self._y = psychometric_const\n self._sigma = stefan_boltzmann_const\n self._Gsc = solar_const\n self._phi = (np.pi / 180.0) * latitude\n self._z = elevation_of_measurement\n self._Krs = adjustment_coeff\n self._LT = lt\n self._ND = nd\n self._TmaxF_mean = MeanTmaxF\n self._DeltaD = delta_d\n _assert_method_is_valid(self._method)\n\n self.initialize_output_fields()\n\n self._cell_values = self._grid[\"cell\"]\n\n @property\n def const_potential_evapotranspiration(self):\n \"\"\"Constant PET value to be spatially distributed.\n\n Used by 'Constant' method.\n \"\"\"\n return self._const_potential_evapotranspiration\n\n @const_potential_evapotranspiration.setter\n def const_potential_evapotranspiration(self, const_potential_evapotranspiration):\n self._const_potential_evapotranspiration = const_potential_evapotranspiration\n\n @property\n def obs_radiation(self):\n \"\"\"Observed radiation (W/m^2)\n\n obs_radiation float, required for 'MeasuredRadiationPT' method.\n \"\"\"\n return self._obs_radiation\n\n @obs_radiation.setter\n def obs_radiation(self, obs_radiation):\n self._obs_radiation = obs_radiation\n\n @property\n def Tmin(self):\n \"\"\"Minimum temperature of the day (deg C)\n\n Tmin: float, required for 'Priestley Taylor' method.\n \"\"\"\n return self._Tmin\n\n @Tmin.setter\n def Tmin(self, Tmin):\n self._Tmin = Tmin\n\n @property\n def Tmax(self):\n \"\"\"Maximum temperature of the day (deg C)\n\n Tmax: float, required for 'Priestley Taylor' method.\n \"\"\"\n return self._Tmax\n\n @Tmax.setter\n def Tmax(self, Tmax):\n self._Tmax = Tmax\n\n @property\n def Tavg(self):\n \"\"\"Average temperature of the day (deg C)\n\n Tavg: float, required for 'Priestley Taylor' and 'MeasuredRadiationPT'\n methods.\n \"\"\"\n return self._Tavg\n\n @Tavg.setter\n def Tavg(self, Tavg):\n self._Tavg = Tavg\n\n def update(self):\n \"\"\"Update fields with current conditions.\n\n If the 'Constant' method is used, this method looks to the value of\n the ``const_potential_evapotranspiration`` property.\n\n If the 'PriestleyTaylor' method is used, this method looks to the\n values of the ``Tmin``, ``Tmax``, and ``Tavg`` properties.\n\n If the 'MeasuredRadiationPT' method is use this method looks to the\n values of the ``Tavg`` and ``obs_radiation`` property.\n \"\"\"\n\n if self._method == \"Constant\":\n self._PET_value = self._const_potential_evapotranspiration\n elif self._method == \"PriestleyTaylor\":\n self._PET_value = self._PriestleyTaylor(\n self._current_time, self._Tmax, self._Tmin, self._Tavg\n )\n self._cell_values[\"radiation__incoming_shortwave_flux\"] = (\n self._Rs * self._cell_values[\"radiation__ratio_to_flat_surface\"]\n )\n self._cell_values[\"radiation__net_shortwave_flux\"] = (\n self._Rns * self._cell_values[\"radiation__ratio_to_flat_surface\"]\n )\n self._cell_values[\"radiation__net_longwave_flux\"] = (\n self._Rnl * self._cell_values[\"radiation__ratio_to_flat_surface\"]\n )\n self._cell_values[\"radiation__net_flux\"] = (\n self._Rn * self._cell_values[\"radiation__ratio_to_flat_surface\"]\n )\n elif self._method == \"MeasuredRadiationPT\":\n Robs = self._obs_radiation\n self._PET_value = self._MeasuredRadPT(self._Tavg, (1 - self._a) * Robs)\n elif self._method == \"Cosine\":\n self._J = np.floor(\n (self._current_time - np.floor(self._current_time)) * 365.0\n )\n self._PET_value = max(\n (\n self._TmaxF_mean\n + self._DeltaD\n / 2.0\n * np.cos(\n (2 * np.pi) * (self._J - self._LT - self._ND / 2) / self._ND\n )\n ),\n 0.0,\n )\n\n self._PET = (\n self._PET_value * self._cell_values[\"radiation__ratio_to_flat_surface\"]\n )\n self._cell_values[\"surface__potential_evapotranspiration_rate\"][:] = self._PET\n\n def _PriestleyTaylor(self, current_time, Tmax, Tmin, Tavg):\n\n # Julian Day - ASCE-EWRI Task Committee Report, Jan-2005 - Eqn 25, (52)\n self._J = np.floor((current_time - np.floor(current_time)) * 365)\n # Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 6, (37)\n self._es = 0.6108 * np.exp((17.27 * Tavg) / (237.7 + Tavg))\n\n # Actual Vapor Pressure - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 8, (38)\n self._ea = 0.6108 * np.exp((17.27 * Tmin) / (237.7 + Tmin))\n\n # Slope of Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 5, (36)\n self._delta = (4098.0 * self._es) / ((237.3 + Tavg) ** 2.0)\n\n # Solar Declination Angle - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 24,(51)\n self._sdecl = 0.409 * np.sin(((np.pi / 180.0) * self._J) - 1.39)\n\n # Inverse Relative Distance Factor - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 23,(50)\n self._dr = 1 + (0.033 * np.cos(np.pi / 180.0 * self._J))\n\n # To calculate ws - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 29,(61)\n self._x = 1.0 - (((np.tan(self._phi)) ** 2.0) * (np.tan(self._sdecl) ** 2.0))\n if self._x <= 0:\n self._x = 0.00001\n # Sunset Hour Angle - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 28,(60)\n self._ws = (np.pi / 2.0) - np.arctan(\n (-1 * np.tan(self._phi) * np.tan(self._sdecl)) / (self._x ** 2.0)\n )\n\n # Extraterrestrial radmodel.docx - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 21, (48)\n # 11.57 converts 1 MJ/m^2/day to W/m^2\n self._Ra = (\n 11.57\n * (24.0 / np.pi)\n * 4.92\n * self._dr\n * (\n (self._ws * np.sin(self._phi) * np.sin(self._sdecl))\n + (np.cos(self._phi) * np.cos(self._sdecl) * (np.sin(self._ws)))\n )\n )\n\n # Clear-sky Solar Radiation - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 19, (47)\n self._Rso = (0.75 + ((2.0 * (10 ** (-5.0))) * self._z)) * self._Ra\n self._Rs = min(self._Krs * self._Ra * np.sqrt(Tmax - Tmin), self._Rso)\n\n # Net Short Wave Radiation - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 16, (43)\n self._Rns = self._Rs * (1 - self._a)\n\n # Relative Cloudiness - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Page 20,35\n if self._Rso > 0:\n self._u = self._Rs / self._Rso\n else:\n self._u = 0\n\n if self._u < 0.3:\n self._u = 0.3\n elif self._u > 1:\n self._u = 1.0\n\n # Cloudiness Function - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 18, (45)\n self._fcd = (1.35 * self._u) - 0.35\n\n # Net Long Wave Radiation - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 17, (44)\n self._Rnl = (\n self._sigma\n * self._fcd\n * (\n 0.34\n - (0.14 * np.sqrt(self._ea))\n * (((Tmax + 273.16) ** 4.0 + (Tmin + 273.16) ** 4.0) / 2.0)\n )\n )\n\n # Net Radiation - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 15, (42)\n self._Rn = self._Rns - self._Rnl\n\n self._ETp = max(\n self._alpha\n * (self._delta / (self._delta + self._y))\n * (self._Rn / self._pwhv),\n 0,\n )\n\n return self._ETp\n\n def _MeasuredRadPT(self, Tavg, Rnobs):\n # Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 6, (37)\n self._es = 0.6108 * np.exp((17.27 * Tavg) / (237.7 + Tavg))\n\n # Slope of Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,\n # Jan-2005 - Eqn 5, (36)\n self._delta = (4098.0 * self._es) / ((237.3 + Tavg) ** 2.0)\n self._ETp = max(\n self._alpha\n * (self._delta / (self._delta + self._y))\n * (Rnobs / self._pwhv),\n 0,\n )\n return self._ETp\n",
"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\n\nfrom landlab.components import FlowDirectorSteepest, NetworkSedimentTransporter\nfrom landlab.data_record import DataRecord\nfrom landlab.grid.network import NetworkModelGrid\n\n\ndef test_add_pulse():\n\n y_of_node = (0, 0, 0, 0)\n x_of_node = (0, 100, 200, 300)\n nodes_at_link = ((0, 1), (1, 2), (2, 3))\n\n nmg_constant_slope = NetworkModelGrid((y_of_node, x_of_node), nodes_at_link)\n\n # add variables to nmg\n nmg_constant_slope.at_node[\"topographic__elevation\"] = [3.0, 2.0, 1.0, 0.0]\n nmg_constant_slope.at_node[\"bedrock__elevation\"] = [3.0, 2.0, 1.0, 0.0]\n nmg_constant_slope.at_link[\"channel_slope\"] = [0.001, 0.001, 0.001]\n nmg_constant_slope.at_link[\"reach_length\"] = [100.0, 100.0, 100.0] # m\n nmg_constant_slope.at_link[\"channel_width\"] = 15 * np.ones(\n nmg_constant_slope.size(\"link\")\n )\n nmg_constant_slope.at_link[\"flow_depth\"] = 2 * np.ones(\n nmg_constant_slope.size(\"link\")\n )\n\n flow_director = FlowDirectorSteepest(nmg_constant_slope)\n flow_director.run_one_step()\n\n time = [0.0]\n\n items = {\"grid_element\": \"link\", \"element_id\": np.array([[0]])}\n\n initial_volume = np.array([[1]])\n abrasion_rate = np.array([0])\n\n variables = {\n \"starting_link\": ([\"item_id\"], np.array([0])),\n \"abrasion_rate\": ([\"item_id\"], abrasion_rate),\n \"density\": ([\"item_id\"], np.array([2650])),\n \"time_arrival_in_link\": ([\"item_id\", \"time\"], np.array([[0.71518937]])),\n \"active_layer\": ([\"item_id\", \"time\"], np.array([[1]])),\n \"location_in_link\": ([\"item_id\", \"time\"], np.array([[0]])),\n \"D\": ([\"item_id\", \"time\"], np.array([[0.05]])),\n \"volume\": ([\"item_id\", \"time\"], initial_volume),\n }\n\n parcels = DataRecord(\n nmg_constant_slope,\n items=items,\n time=time,\n data_vars=variables,\n dummy_elements={\"link\": [NetworkSedimentTransporter.OUT_OF_NETWORK]},\n )\n\n nst = NetworkSedimentTransporter(\n nmg_constant_slope,\n parcels,\n flow_director,\n bed_porosity=0.03,\n g=9.81,\n fluid_density=1000,\n transport_method=\"WilcockCrowe\",\n )\n\n dt = 60 # (seconds) 1 min timestep\n\n nst.run_one_step(dt)\n\n # ONE TIMESTEP BEFORE PULSE\n # TIMESTEP 1 should have NANS.\n\n num_pulse_parcels = 2\n\n newpar_element_id = np.zeros(num_pulse_parcels, dtype=int)\n newpar_element_id = np.expand_dims(newpar_element_id, axis=1)\n\n new_starting_link = np.squeeze(newpar_element_id)\n\n np.random.seed(0)\n\n new_time_arrival_in_link = nst._time * np.ones(np.shape(newpar_element_id))\n\n new_volume = 0.5 * np.ones(\n np.shape(newpar_element_id)\n ) # (m3) the volume of each parcel\n\n new_lithology = [\"pulse_material\"] * np.size(\n newpar_element_id\n ) # a lithology descriptor for each parcel\n\n new_active_layer = np.ones(\n np.shape(newpar_element_id)\n ) # 1 = active/surface layer; 0 = subsurface layer\n\n new_density = 2650 * np.ones(np.size(newpar_element_id)) # (kg/m3)\n\n new_location_in_link = np.random.rand(np.size(newpar_element_id), 1)\n\n new_abrasion_rate = 0 * np.ones(np.size(newpar_element_id))\n\n new_D = 0.03 * np.ones(np.shape(newpar_element_id))\n\n newpar_grid_elements = np.array(\n np.empty((np.shape(newpar_element_id)), dtype=object)\n ) # BUG: should be able to pass [\"link\"], but datarecord fills it into an incorrect array shape-- the length of parcels (NOT new parcels)\n newpar_grid_elements.fill(\"link\")\n\n new_parcels = {\n \"grid_element\": newpar_grid_elements,\n \"element_id\": newpar_element_id,\n }\n\n new_variables = {\n \"starting_link\": ([\"item_id\"], new_starting_link),\n \"abrasion_rate\": ([\"item_id\"], new_abrasion_rate),\n \"density\": ([\"item_id\"], new_density),\n \"lithology\": ([\"item_id\"], new_lithology),\n \"time_arrival_in_link\": ([\"item_id\", \"time\"], new_time_arrival_in_link),\n \"active_layer\": ([\"item_id\", \"time\"], new_active_layer),\n \"location_in_link\": ([\"item_id\", \"time\"], new_location_in_link),\n \"D\": ([\"item_id\", \"time\"], new_D),\n \"volume\": ([\"item_id\", \"time\"], new_volume),\n }\n\n parcels.add_item(\n time=[nst._time], new_item=new_parcels, new_item_spec=new_variables\n )\n\n nst.run_one_step(dt)\n\n print(parcels.dataset.element_id.values)\n Parcel_element_id = parcels.dataset.element_id.values\n\n Parcel_element_id_Should_Be = np.array(\n [[0.0, 0.0, 0.0], [np.nan, 0.0, 0.0], [np.nan, 0.0, 0.0]]\n )\n\n assert_array_almost_equal(\n Parcel_element_id_Should_Be, Parcel_element_id, decimal=-1\n )\n"
] | [
[
"numpy.zeros",
"numpy.ones"
],
[
"matplotlib.collections.LineCollection",
"matplotlib.pyplot.gca"
],
[
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.array"
],
[
"numpy.random.seed",
"numpy.tan",
"numpy.size",
"numpy.random.rand",
"numpy.random.randint"
],
[
"numpy.argsort",
"numpy.sort",
"numpy.full"
],
[
"numpy.arange",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.round",
"numpy.array",
"numpy.zeros"
],
[
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.get_cmap",
"matplotlib.colors.Normalize"
],
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.tan",
"numpy.floor",
"numpy.exp"
],
[
"numpy.expand_dims",
"numpy.random.seed",
"numpy.squeeze",
"numpy.size",
"numpy.shape",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lasdasdas/tensorflow | [
"673b993983f37f332ff70cdb642305f69089337d"
] | [
"tensorflow/python/distribute/parameter_server_strategy_v2.py"
] | [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Parameter server strategy V2 class.\n\nThis is currently under development and the API is subject to change.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import parameter_server_strategy\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.eager import remote\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"distribute.experimental.ParameterServerStrategy\", v1=[])\nclass ParameterServerStrategyV2(distribute_lib.Strategy):\n \"\"\"An multi-worker tf.distribute strategy with parameter servers.\n\n Parameter server training refers to the distributed training architecture that\n requires two types of tasks in the cluster: workers (referred to as \"worker\"\n task) and parameter servers (referred to as \"ps\" task). The variables and\n updates to those variables are placed on ps, and most computation intensive\n operations are placed on workers.\n\n In TF2, parameter server training makes use of one coordinator, with some\n number of workers, and (usually fewer) ps. The coordinator uses a\n `tf.distribute.experimental.coordinator.ClusterCoordinator` to coordinate the\n cluster, and a `tf.distribute.experimental.ParameterServerStrategy` for\n variable distribution. The coordinator does not perform the actual training.\n Each of the workers and ps runs a `tf.distribute.Server`, which the\n coordinator connects to through the use of aforementioned two APIs.\n\n For the training to work, the coordinator sends requests to workers for the\n `tf.function`s to be executed on remote workers. Upon receiving requests from\n the coordinator, a worker executes the `tf.function` by reading the variables\n from parameter servers, executing the ops, and updating the variables on the\n parameter servers. Each of the worker only processes the requests from the\n coordinator, and communicates with parameter servers, without direct\n interactions with any of the other workers in the cluster.\n\n As a result, failures of some workers do not prevent the cluster from\n continuing the work, and this allows the cluster to train with instances that\n can be occasionally unavailable (e.g. preemptible or spot instances). The\n coordinator and parameter servers though, must be available at all times for\n the cluster to make progress.\n\n Note that the coordinator is not one of the training worker. Instead, its\n responsibility includes placing variables on ps, remotely executing\n `tf.function`s on workers, and saving checkpoints. Parameter server training\n thus consists of a server cluster with worker and ps, and a coordinator which\n connects to them to coordinate. Optionally, an evaluator can be run on the\n side that periodically reads the checkpoints saved by the coordinator, and\n saves summaries for example.\n\n `tf.distribute.experimental.ParameterServerStrategy` works closely with the\n associated `tf.distribute.experimental.coordinator.ClusterCoordinator` object,\n and should be used in conjunction with it. Standalone usage of\n `tf.distribute.experimental.ParameterServerStrategy` without a\n `tf.distribute.experimental.coordinator.ClusterCoordinator` indicates\n a parameter server training scheme without a centralized coordinator, which is\n not supported at this time.\n\n __Example code for coordinator__\n\n Here's an example usage of the API, with a custom training loop to train a\n model. This code snippet is intended to be run on (the only) one machine that\n is designated as the coordinator. Note that `cluster_resolver`,\n `variable_partitioner`, and `dataset_fn` arguments are explained in the\n following \"Cluster setup\", \"Variable partitioning\", and \"Dataset preparation\"\n sections.\n\n Currently, environment variable `GRPC_FAIL_FAST` needs to be set in all tasks\n to work around a known hanging issue as the following code illustrates:\n\n ```python\n # Set the environment variable to allow reporting worker and ps failure to the\n # coordinator.\n os.environ[\"GRPC_FAIL_FAST\"] = \"use_caller\"\n\n # Prepare a strategy to use with the cluster and variable partitioning info.\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver=...,\n variable_partitioner=...)\n coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(\n strategy=strategy)\n\n # Prepare a distribute dataset that will place datasets on the workers.\n distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn=...)\n\n with strategy.scope():\n model = ... # Variables created can possibly be container of variables\n optimizer, metrics = ... # Keras optimizer/metrics are great choices\n checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint, checkpoint_dir, max_to_keep=2)\n # `load_checkpoint` infers initial epoch from `optimizer.iterations`.\n initial_epoch = load_checkpoint(checkpoint_manager) or 0\n\n @tf.function\n def worker_fn(iterator):\n\n def replica_fn(inputs):\n batch_data, labels = inputs\n # calculate gradient, applying gradient, metrics update etc.\n\n strategy.run(replica_fn, args=(next(iterator),))\n\n for epoch in range(initial_epoch, num_epoch):\n distributed_iterator = iter(distributed_dataset) # Reset iterator state.\n for step in range(steps_per_epoch):\n\n # Asynchronously schedule the `worker_fn` to be executed on an arbitrary\n # worker. This call returns immediately.\n coordinator.schedule(worker_fn, args=(distributed_iterator,))\n\n # `join` blocks until all scheduled `worker_fn`s finish execution. Once it\n # returns, we can read the metrics and save checkpoints as needed.\n coordinator.join()\n logging.info('Metric result: %r', metrics.result())\n train_accuracy.reset_states()\n checkpoint_manager.save()\n ```\n\n __Example code for worker and parameter servers__\n\n In addition to the coordinator, there should be multiple machines designated\n as \"worker\" or \"ps\". They should run the following code to start a TensorFlow\n server, waiting for coordinator's request to execute functions or place\n variables:\n\n ```python\n # Set the environment variable to allow reporting worker and ps failure to the\n # coordinator.\n os.environ[\"GRPC_FAIL_FAST\"] = \"use_caller\"\n\n # Provide a `tf.distribute.cluster_resolver.ClusterResolver` that serves\n # the cluster information. See below \"Cluster setup\" section.\n cluster_resolver = ...\n\n server = tf.distribute.Server(\n cluster_resolver.cluster_spec().as_cluster_def(),\n job_name=cluster_resolver.task_type,\n task_index=cluster_resolver.task_id,\n protocol=protocol)\n\n # Blocking the process that starts a server from exiting.\n server.join()\n ```\n\n __Cluster setup__\n\n In order for the tasks in the cluster to know other tasks' addresses,\n a `tf.distribute.cluster_resolver.ClusterResolver` is required to be used\n in coordinator, worker, and ps. The\n `tf.distribute.cluster_resolver.ClusterResolver` is responsible for providing\n the cluster information, as well as the task type and id of the current task.\n See `tf.distribute.cluster_resolver.ClusterResolver` for more information.\n\n If `TF_CONFIG` environment variable is used for the processes to know the\n cluster information, a\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` should be used. Note\n that for legacy reason, \"chief\" should be used as the task type for the\n coordinator, as the following example demonstrates. Here we set `TF_CONFIG`\n in environment variable, intended to be run by the process of the machine\n designated as the parameter server (task type \"ps\") and index 1 (the second),\n in a cluster with 1 chief, 2 parameter servers, and 3 workers. Note that the\n it needs to be set before the use of\n `tf.distribute.cluster_resolver.TFConfigClusterResolver`.\n\n Example code for cluster setup:\n ```python\n os.environ['TF_CONFIG'] = '''\n {\n \"cluster\": {\n \"chief\": [\"chief.example.com:2222\"],\n \"ps\": [\"ps0.example.com:2222\", \"ps1.example.com:2222\"],\n \"worker\": [\"worker0.example.com:2222\", \"worker1.example.com:2222\",\n \"worker2.example.com:2222\"]\n },\n \"task\": {\n \"type\": \"ps\",\n \"index\": 1\n }\n }\n '''\n os.environ[\"GRPC_FAIL_FAST\"] = \"use_caller\"\n cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()\n\n # If coordinator (\"chief\" task type), create a strategy\n if cluster_resolver.task_type == 'chief':\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver)\n ...\n\n # If worker/ps, create a server\n elif cluster_resolver.task_type in (\"worker\", \"ps\"):\n server = tf.distribute.Server(...)\n ...\n ```\n\n __Variable creation with `strategy.scope()`__\n\n `tf.distribute.experimental.ParameterServerStrategy` follows the\n `tf.distribute` API contract where variable creation is expected to be inside\n the context manager returned by `strategy.scope()`, in order to be correctly\n placed on parameter servers in a round-robin manner:\n\n ```python\n # In this example, we're assuming having 3 ps.\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver=...)\n coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(\n strategy=strategy)\n\n # Variables should be created inside scope to be placed on parameter servers.\n # If created outside scope such as `v1` here, it would be placed on\n coordinator.\n v1 = tf.Variable(initial_value=0.0)\n\n with strategy.scope():\n v2 = tf.Variable(initial_value=1.0)\n v3 = tf.Variable(initial_value=2.0)\n v4 = tf.Variable(initial_value=3.0)\n v5 = tf.Variable(initial_value=4.0)\n\n # v2 through v5 are created in scope and are distributed on parameter servers.\n # Default placement is round-robin but the order should not be relied on.\n assert v2.device == \"/job:ps/replica:0/task:0/device:CPU:0\"\n assert v3.device == \"/job:ps/replica:0/task:1/device:CPU:0\"\n assert v4.device == \"/job:ps/replica:0/task:2/device:CPU:0\"\n assert v5.device == \"/job:ps/replica:0/task:0/device:CPU:0\"\n ```\n\n See `distribute.Strategy.scope` for more information.\n\n __Variable partitioning__\n\n Having dedicated servers to store variables means being able to divide up, or\n \"shard\" the variables across the ps. Large embeddings that would otherwise\n exceed memory limit of a single machine can be used in a cluster with enough\n number of ps.\n\n With `tf.distribute.experimental.ParameterServerStrategy`, if a\n `variable_partitioner` is provided to `__init__` and certain conditions are\n satisfied, the resulting variables created in scope are sharded across the\n parameter servers, in a round-robin fashion. The variable reference returned\n from `tf.Variable` becomes a type that serves as the container of the sharded\n variables. Access `variables` attribute of this container for the actual\n variable components. See arguments section of\n `tf.distribute.experimental.ParameterServerStrategy.__init__` for more\n information.\n\n To initialize the sharded variables in a more memory-efficient way, use an\n initializer whose `__call__` accepts a `shard_info` argument, and use\n `shard_info.offset` and `shard_info.shape` to create and return a\n partition-aware `tf.Tensor` to initialize the variable components.\n\n ```python\n class PartitionAwareIdentity(object):\n\n def __call__(self, shape, dtype, shard_info):\n value = tf.eye(*shape, dtype=dtype)\n if shard_info is not None:\n value = tf.slice(value, shard_info.offset, shard_info.shape)\n return value\n\n cluster_resolver = ...\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver, tf.fixed_size_partitioner(2))\n with strategy.scope():\n initializer = PartitionAwareIdentity()\n initial_value = functools.partial(initializer, shape=(4, 4), dtype=tf.int64)\n v = tf.Variable(\n initial_value=initial_value, shape=(4, 4), dtype=tf.int64)\n\n # `v.variables` gives the actual variable components.\n assert len(v.variables) == 2\n assert v.variables[0].device == \"/job:ps/replica:0/task:0/device:CPU:0\"\n assert v.variables[1].device == \"/job:ps/replica:0/task:1/device:CPU:0\"\n assert np.array_equal(v.variables[0].numpy(), [[1, 0, 0, 0], [0, 1, 0, 0]])\n assert np.array_equal(v.variables[1].numpy(), [[0, 0, 1, 0], [0, 0, 0, 1]])\n ```\n\n __Dataset preparation__\n\n With `tf.distribute.experimental.ParameterServerStrategy`, a dataset is\n created in each of the workers to be used for training. This is done by\n creating a `dataset_fn` that takes no argument and returns a\n `tf.data.Dataset`, and passing the `dataset_fn` into\n `tf.distribute.experimental.coordinator.\n ClusterCoordinator.create_per_worker_dataset`. We recommend the dataset to be\n shuffled and repeated to have the examples run through the training as evenly\n as possible.\n\n ```python\n def dataset_fn():\n filenames = ...\n dataset = tf.data.Dataset.from_tensor_slices(filenames)\n\n # Dataset is recommended to be shuffled, and repeated.\n return dataset.shuffle(buffer_size=...).repeat().batch(batch_size=...)\n\n coordinator =\n tf.distribute.experimental.coordinator.ClusterCoordinator(strategy=...)\n distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn)\n\n ```\n\n __Limitations__\n\n * `tf.distribute.experimental.ParameterServerStrategy` in TF2 is experimental,\n and the API is subject to further changes.\n\n * `tf.distribute.experimental.ParameterServerStrategy` does not yet support\n training with GPU(s). This is a feature request being developed.\n\n * `tf.distribute.experimental.ParameterServerStrategy` only supports\n [custom training loop\n API](https://www.tensorflow.org/tutorials/distribute/custom_training)\n currently in TF2. Usage of it with Keras `compile`/`fit` API is being\n developed.\n\n * `tf.distribute.experimental.ParameterServerStrategy` must be used with\n `tf.distribute.experimental.coordinator.ClusterCoordinator`.\n\n * This strategy is not intended for TPU. Use\n `tf.distribute.experimental.TPUStrategy` instead.\n \"\"\"\n\n # pyformat: disable\n def __init__(self, cluster_resolver, variable_partitioner=None):\n \"\"\"Initializes the TF2 parameter server strategy.\n\n This initializes the `tf.distribute.experimental.ParameterServerStrategy`\n object to be ready for use with\n `tf.distribute.experimental.coordinator.ClusterCoordinator`.\n\n Args:\n cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`\n object.\n variable_partitioner:\n a callable with the signature `num_partitions = fn(shape, dtype)`, where\n `num_partitions` is a list/tuple representing the number of partitions\n on each axis, and `shape` and `dtype` are of types `tf.TensorShape` and\n `tf.dtypes.Dtype`. If `None`, variables will not be partitioned.\n\n * `variable_partitioner` will be called for all variables created under\n strategy `scope` to instruct how the variables should be partitioned.\n Variables will be created in multiple partitions if there are more than\n one partition along the partitioning axis, otherwise it falls back to\n normal `tf.Variable`.\n\n * Only the first / outermost axis partitioning is supported, namely,\n elements in `num_partitions` must be 1 other than the first element.\n\n * Partitioner like `tf.compat.v1.min_max_variable_partitioner`,\n `tf.compat.v1.variable_axis_size_partitioner` and\n `tf.compat.v1.fixed_size_partitioner` are also supported since they\n conform to the required signature.\n\n * Div partition\n strategy is used to partition variables. Assuming we assign consecutive\n integer ids along the first axis of a variable, then ids are assigned to\n shards in a contiguous manner, while attempting to keep each shard size\n identical. If the ids do not evenly divide the number of shards, each of\n the first several shards will be assigned one more id. For instance, a\n variable whose first dimension is 13 has 13 ids, and they are split\n across 5 shards as:\n `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`.\n\n * Variables created under `strategy.extended.colocate_vars_with` will\n not be partitioned, e.g, optimizer's slot variables.\n \"\"\"\n # pyformat: enable\n self._cluster_resolver = cluster_resolver\n self._extended = ParameterServerStrategyV2Extended(self, cluster_resolver,\n variable_partitioner)\n self._verify_args_and_config(cluster_resolver)\n logging.info(\n \"`tf.distribute.experimental.ParameterServerStrategy` is initialized \"\n \"with cluster_spec: %s\", cluster_resolver.cluster_spec())\n\n # TODO(b/167894802): Make coordinator, worker, and ps names customizable.\n self._connect_to_cluster(coordinator_name=\"chief\")\n super(ParameterServerStrategyV2, self).__init__(self._extended)\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"ParameterServerStrategy\")\n\n def _connect_to_cluster(self, coordinator_name):\n if coordinator_name in [\"worker\", \"ps\"]:\n raise ValueError(\"coordinator name should not be 'worker' or 'ps'.\")\n cluster_spec = self._cluster_resolver.cluster_spec()\n self._num_workers = len(cluster_spec.as_dict().get(\"worker\", ()))\n self._num_ps = len(cluster_spec.as_dict().get(\"ps\", ()))\n\n device_filters = server_lib.ClusterDeviceFilters()\n # For any worker, only the devices on ps and coordinator nodes are visible\n for i in range(self._num_workers):\n device_filters.set_device_filters(\n \"worker\", i, [\"/job:ps\", \"/job:%s\" % coordinator_name])\n # Similarly for any ps, only the devices on workers and coordinator are\n # visible\n for i in range(self._num_ps):\n device_filters.set_device_filters(\n \"ps\", i, [\"/job:worker\", \"/job:%s\" % coordinator_name])\n\n # Allow at most one outstanding RPC for each worker at a certain time. This\n # is to simplify worker failure handling in the runtime\n os.environ[\"TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE\"] = \"False\"\n\n logging.info(\"%s is now connecting to cluster with cluster_spec: %r\",\n self.__class__.__name__, cluster_spec)\n remote.connect_to_cluster(\n cluster_spec,\n job_name=coordinator_name,\n protocol=self._cluster_resolver.rpc_layer,\n cluster_device_filters=device_filters)\n\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"ps_strategy_num_workers\").set(self._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"ps_strategy_num_ps\").set(self._num_ps)\n\n def _verify_args_and_config(self, cluster_resolver):\n if not cluster_resolver.cluster_spec():\n raise ValueError(\"Cluster spec must be non-empty in `cluster_resolver`.\")\n if self.extended._num_gpus_per_worker > 1: # pylint: disable=protected-access\n raise NotImplementedError(\"Multi-gpu is not supported yet.\")\n\n\nclass ParameterServerStrategyV2Extended(\n parameter_server_strategy.ParameterServerStrategyExtended):\n \"\"\"Extended class for ParameterServerStrategyV2.\n\n Please see `tf.distribute.StrategyExtended` doc for more information.\n \"\"\"\n\n def __init__(self, container_strategy, cluster_resolver,\n variable_partitioner):\n \"\"\"Initialization of ParameterServerStrategyV2Extended.\"\"\"\n super(ParameterServerStrategyV2Extended, self).__init__(container_strategy)\n self._num_ps = len(cluster_resolver.cluster_spec().as_dict().get(\"ps\", []))\n self._variable_count = 0\n self._variable_partitioner = variable_partitioner\n\n def _create_variable(self, next_creator, **kwargs):\n \"\"\"Implements StrategyExtendedV2._create_variable.\n\n Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be\n created if satisfying all the following criteria:\n 1. `self._variable_partitioner` results in more than one partition on the\n first axis.\n 2. variable's rank is greater than 0.\n 3. variable is not colocated with another variable.\n Otherwise a `Variable` will be created.\n\n Args:\n next_creator: See `variable_scope.variable_creator_scope`; the next\n creator in the chain.\n **kwargs: Passed through to the next creator.\n\n Returns:\n A `Variable` or `ShardedVariable`.\n \"\"\"\n\n if \"colocate_with\" in kwargs: # Never partition colocated_with variables.\n colocate_with = kwargs[\"colocate_with\"]\n # Clear the variable scope to avoid possible conflicts between device\n # scope and colocation scope.\n with ops.device(None):\n with ops.colocate_with(colocate_with):\n var = next_creator(**kwargs)\n logging.debug(\n \"Creating variable (name:%s, shape:%r) that colocates with %s\",\n var.name, var.shape, kwargs[\"colocate_with\"].name)\n return var\n\n if self._variable_partitioner is None:\n return self._create_variable_round_robin(next_creator, **kwargs)\n\n name = kwargs.get(\"name\", None)\n initial_value = kwargs.get(\"initial_value\", None)\n if initial_value is None:\n raise ValueError(\"initial_value must be specified.\")\n\n # Two cases where initial_value can be a callable:\n # 1. initial_value is passed as a callable, e.g, an `initializer` class.\n # 2. restoring from checkpoint, initial_value is a\n # \"CheckpointInitialValueCallable\".\n init_from_fn = callable(initial_value)\n\n dtype = kwargs.get(\"dtype\", None)\n shape = kwargs.get(\"shape\", None)\n if init_from_fn and (shape is None or dtype is None):\n init_from_fn = False\n initial_value = initial_value()\n if not init_from_fn:\n # The initial_value is created on coordinator, it will need to be sent to\n # ps for variable initialization, which can be inefficient and can\n # potentially hit the 2GB limit on protobuf serialization.\n initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)\n dtype = initial_value.dtype\n shape = initial_value.shape\n else:\n shape = tensor_shape.as_shape(shape)\n\n if shape.rank == 0: # Skip partitioning rank-0 variable.\n return self._create_variable_round_robin(next_creator, **kwargs)\n\n num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)\n if not num_partitions or num_partitions[0] == 0 or any(\n v != 1 for v in num_partitions[1:]):\n raise ValueError(\n \"variable_partitioner must return a list/tuple whose elements are 1\"\n \" besides the first element (non-zero), got: %r\" % num_partitions)\n\n if num_partitions[0] == 1: # no partition\n return self._create_variable_round_robin(next_creator, **kwargs)\n\n # Use \"div\" partition strategy to partition the variable.\n num_partitions = min(num_partitions[0], shape[0])\n base = shape[0] // num_partitions\n extra = shape[0] % num_partitions\n # An example: num_partitions=4, shape[0]=10, partitions: [3, 3, 2, 2]\n # offsets: [0, 3, 6, 8, 10]\n offsets = []\n for i in range(num_partitions):\n if i == 0:\n offsets.append(0)\n else:\n prev_shard_size = base + (1 if i - 1 < extra else 0)\n offsets.append(offsets[i - 1] + prev_shard_size)\n offsets.append(shape[0])\n\n def init_shard_fn(shard_index):\n if not init_from_fn:\n logging.log_if(\n logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and\n shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)\n return initial_value[offsets[shard_index]:offsets[shard_index + 1]]\n arg_spec = tf_inspect.getfullargspec(initial_value)\n if (\"shard_info\" not in arg_spec.args and\n \"shard_info\" not in arg_spec.kwonlyargs):\n # `initial_value` is a callable that doesn't accept `shard_info`.\n logging.log_if(\n logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and\n shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)\n full_value = initial_value()\n return full_value[offsets[shard_index]:offsets[shard_index + 1]]\n else:\n # Memory-efficient way of initializing sharded variable. It requires\n # the `init_fn` to accept a namedtuple `shard_info`.\n component_shape = (offsets[shard_index + 1] -\n offsets[shard_index],) + shape[1:]\n offsets_all_axes = (offsets[shard_index],) + (0,) * len(shape[1:])\n return initial_value(\n shard_info=trackable.ShardInfo(\n shape=tensor_shape.as_shape(component_shape),\n offset=offsets_all_axes))\n\n var_list = []\n for i in range(num_partitions):\n kwargs[\"shape\"] = (offsets[i + 1] - offsets[i],) + shape[1:]\n kwargs[\"initial_value\"] = lambda: init_shard_fn(i)\n if name is not None:\n kwargs[\"name\"] = \"{}/part_{}\".format(name, i)\n var_list.append(self._create_variable_round_robin(next_creator, **kwargs))\n\n result = sharded_variable.ShardedVariable(var_list)\n return result\n\n def _create_variable_round_robin(self, next_creator, **kwargs):\n # Clear the colocation scope to avoid possible conflicts between device\n # scope and colocation scope.\n with ops.colocate_with(None, ignore_existing=True):\n with ops.device(\"/job:ps/task:%d\" %\n (self._variable_count % self._num_ps)):\n var = next_creator(**kwargs)\n logging.debug(\n \"Creating variable (name:%s, shape:%r) on /job:ps/task:%d\",\n var.name, var.shape, (self._variable_count % self._num_ps))\n self._variable_count += 1\n return var\n\n def _call_for_each_replica(self, fn, args, kwargs):\n with distribute_lib.ReplicaContext(\n self._container_strategy(),\n replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):\n # TODO(rchao): Support multi-replica per worker or sync-group.\n return distribute_utils.regroup((fn(*args, **kwargs),))\n\n\n# The warning that will be logged if the way we initialize sharded variables\n# is memory-inefficient.\n_INEFFICIENT_INIT_WARNING = (\n \"Large variable %s is partitioned but not initialized in a memory-efficient\"\n \" way. The full value is first being created and then sliced into smaller \"\n \"values. To reduce the memory footprint, explicitly specify `dtype` and \"\n \"`shape` when creating variables, and pass a callable to Variable's \"\n \"`initial_value`. The callable should take only one argument which is a \"\n \"namedtuple (shape: `tf.TensorShape`, offsets: list/tuple) where shape is \"\n \"the shape of the component variable, and offsets is the offsets of the \"\n \"smaller variable on each axis.\")\n\n_LARGE_VARIABLE_NUM_ELEMENTS = 1e9\n"
] | [
[
"tensorflow.python.framework.ops.device",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell",
"tensorflow.python.platform.tf_logging.debug",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.sharded_variable.ShardedVariable",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell",
"tensorflow.python.training.server_lib.ClusterDeviceFilters",
"tensorflow.python.eager.remote.connect_to_cluster",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
SeanTater/rsvp | [
"2f136ee8aac2d9401427d4d4f0d78f74eb757f15"
] | [
"demo.py"
] | [
"#!/usr/bin/python3\nimport cv2\nimport numpy as np\nimport sqlite3\nimport time\n\nclass QualityCheck:\n def __init__(self):\n \"\"\" Record model runs \"\"\"\n self.db = sqlite3.connect(\"logs.db\", isolation_level=None)\n self.db.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ModelRun(\n run_id INT,\n num_embed INT,\n original_size INT,\n compressed_size INT,\n mse REAL,\n psnr REAL,\n cube_shape TEXT\n );\n \"\"\")\n self.run_id = int(time.time())\n \n \n def quality_check(self, original, compressed, new, num_embed, cube_shape):\n \"\"\" Compare pixel to pixel quality of each frame\n\n Accepts\n -------\n original (np.array): array containing original video\n compressed (bytes): compressed video bytes\n new (np.array): array containing new video\n num_embed (int): dimensionality of embedding\n\n \"\"\" \n comp_ratio = len(compressed) / original.size\n mse = float(((new - original)**2).mean())\n psnr = 20 * np.log(255) / np.log(10) - 10 * np.log(mse) / np.log(10)\n\n self.db.execute(\n \"INSERT INTO ModelRun(run_id, num_embed, original_size, compressed_size, mse, psnr, cube_shape)\"\n \" VALUES (?,?,?,?,?,?,?);\",\n (self.run_id, num_embed, original.size, len(compressed), mse, psnr, cube_shape)\n )\n\nclass CubeStep:\n def __init__(self, cube_shape=(8,8,8), video_shape=None):\n self.video_shape = video_shape\n self.depth, self.width, self.height = self.cube_shape = cube_shape\n self.colors = 3 # Purely for clarity\n self.cube_size = 3\n for d in cube_shape:\n self.cube_size *= d\n \n def to_cubes(self, images):\n \"\"\" Shatter a series of frames into 8x8x8x3 tensors\n\n Accepts\n -------\n images : np.array of shape (frame_count, width, height, colors) and dtype uint8\n The sequence of images to analyze.\n The image shape must be a multiple of the cube size, no padding will be done.\n \n Returns\n -------\n np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \"\"\"\n if self.video_shape is not None and self.video_shape != images.shape:\n raise ValueError(\"Video shape does not match input shape.\")\n else:\n self.video_shape = images.shape\n frame_count, width, height, colors = self.video_shape\n t = images.reshape((\n frame_count // self.depth,\n self.depth,\n width // self.width,\n self.width,\n height // self.height,\n self.height,\n colors\n ))\n # Make it a (x,y,z,8,8,8,3) tensor so it's a prism of cubes, by color\n return np.moveaxis(t, [0,2,4,1,3,5,6], [0,1,2,3,4,5,6])\n\n def to_matrix(self, cubes):\n \"\"\" Flatten a cube of cubes into a single matrix\n\n Accepts\n -------\n cubes : np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \n Returns\n -------\n np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \"\"\"\n # Copy improves locality later\n return cubes.reshape((-1, self.cube_size)).copy()\n\n def from_cubes(self, cubes):\n \"\"\" Convert cubes back into normal frames\n \n Accepts\n -------\n cubes : np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \n Returns\n -------\n np.array of shape (frame_count, width, height, colors) and dtype float32 (not uint8)\n The sequence of images to analyze.\n The image shape must be a multiple of the cube size, no padding will be done.\n \"\"\"\n d,w,h = cubes.shape[:3]\n uncube = np.moveaxis(cubes, [0,1,2,3,4,5,6], [0,2,4,1,3,5,6])\n return uncube.reshape((d*self.depth, w*self.width, h*self.height, self.colors))\n \n def from_matrix(self, samples):\n \"\"\" Convert cubes back into normal frames\n \n Accepts\n -------\n samples : np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n \n Returns\n -------\n np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \"\"\"\n if self.video_shape is None:\n raise ValueError(\"Video shape unspecified when converting from matrix.\")\n frame_count, width, height, colors = self.video_shape\n return samples.reshape((\n frame_count // self.depth,\n width // self.width,\n height // self.height,\n self.depth,\n self.width,\n self.height,\n colors\n ))\n\n\n\ndef webm_forward():\n vc = cv2.VideoCapture()\n vc.open(\"snippet.webm\")\n images = np.stack([vc.read()[1] for i in range(24)])\n # Divide into 8x8x8 cubes\n frame_count, width, height, colors = images.shape\n \n assert width % 8 == 0 and height % 8 == 0, \"The image dimensions must be divisible by 8\"\n vc.release()\n return images.astype(np.float32)\n\ndef webm_backward(images):\n \"\"\" Save images to a video file as VP8\n\n Accepts\n -------\n images : np.array of shape (frame_count, width, height, colors) and dtype uint8\n The sequence of images to analyze.\n The width, height, and frame count must all be multiples of 8.\n \"\"\"\n d,w,h = images.shape[:3]\n vw = cv2.VideoWriter(\"undemo.mkv\", cv2.VideoWriter_fourcc(*'VP80'), 5, (h,w))\n for frameid in range(d):\n vw.write(images[frameid])\n vw.release()\n\n\n\n\ndef svd_prep(samples, embed=50, subsample=13):\n \"\"\" Create a base to summarize video segments\n\n Accepts\n -------\n samples : np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n Returns\n -------\n np.array of shape (cube_volume, embed)\n \"\"\"\n\n # Try a basic SVD approximation\n #base = np.random.uniform(0, 1, (8*8*8*colors, embed))\n #breakpoint()\n #base = np.linalg.qr(np.dot(samples, base))[0]\n #base = np.linalg.qr(np.dot(samples, base))[0]\n subsample = samples[np.random.randint(0, samples.shape[0], 250)]\n return np.linalg.svd(subsample, full_matrices=False)[2][:embed].T\n\ndef svd_forward(base, cubes):\n \"\"\" Encode images with eigenvectors, using an existing base\n\n Accepts\n -------\n base : np.array of shape (cube_volume, embed)\n Linear basis for description of video segments, generated by svd_prep()\n cubes : np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n\n Returns\n -------\n np.array of (cube_count, embed)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \"\"\"\n return np.dot(cubes, base)\n\ndef svd_backward(base, proj):\n \"\"\" Decode cubes back from embedding\n \n Accepts\n -------\n base : np.array of shape (cube_volume, embed)\n Linear basis for description of video segments, generated by svd_prep()\n proj : np.array of (cube_count, embed)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n Returns\n -------\n np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n \"\"\"\n reproj = np.dot(proj, base.T)\n return np.clip(reproj, 0, 255)\n\ndef buf_forward(proj):\n \"\"\" Save projections to a bytestring for later reconstruction\n \n Accepts\n -------\n proj : np.array\n The projection to store as a byte array\n \n Returns\n -------\n A byte array representing that file. In machine order.\n \"\"\"\n sign = np.sign(proj)\n proj = sign * np.clip(np.log1p(np.abs(proj)) * 14, 0, 127)\n return (\n np.array(proj.shape).astype(np.uint32).tobytes().ljust(32, b\"\\x00\")\n + proj.astype(np.int8).tobytes()\n )\n\ndef buf_backward(buf):\n \"\"\" load projections from a bytestring\n \n Accepts\n -------\n proj : np.array\n The projection to store as a byte array\n \n Returns\n -------\n proj : np.array\n the projection previously saves\n \"\"\"\n shape = tuple(x for x in np.frombuffer(buf[:32], dtype=np.uint32) if x > 0)\n proj = (\n np.frombuffer(buf, dtype=np.int8, offset=32)\n .reshape(shape)\n .astype(np.float32)\n )\n return np.sign(proj) * np.expm1(np.abs(proj)/14)\n\ndef run_pipeline(num_embed):\n cube_step = CubeStep(cube_shape=(2,2,2))\n cubes = cube_step.to_matrix(cube_step.to_cubes(webm_forward()))\n base = svd_prep(cubes, embed=num_embed)\n proj = svd_forward(base, cubes)\n buf = buf_forward(proj)\n base_buf = buf_forward(base)\n open(\"proj.arr\", \"wb\").write(buf)\n open(\"base.arr\", \"wb\").write(base_buf)\n \n # At this point everything is stored in buf and base_buf\n proj = buf_backward(buf)\n #base = buf_backward(base_buf)\n reproj = svd_backward(base, proj)\n newimages = cube_step.from_cubes(cube_step.from_matrix(reproj))\n #webm_backward(newimages.astype(np.uint8))\n return (cubes, base_buf+buf, reproj, num_embed)\n\n \nif __name__ == \"__main__\":\n qlog = QualityCheck()\n for n in range(1,50,4):\n qlog.quality_check(*run_pipeline(n), cube_shape=\"2,2,2\")\n print(f\"Finished {n}\")\n\n"
] | [
[
"numpy.dot",
"numpy.log",
"numpy.linalg.svd",
"numpy.abs",
"numpy.clip",
"numpy.sign",
"numpy.frombuffer",
"numpy.moveaxis",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pschafhalter/pylot | [
"712fd504f9e2669cfc9876eaed4954fbf2b31f20"
] | [
"pylot/perception/detection/detection_operator.py"
] | [
"\"\"\"Implements an operator that detects obstacles.\"\"\"\nimport logging\nimport time\n\nimport erdos\n\nimport numpy as np\n\nimport pylot.utils\nfrom pylot.perception.detection.obstacle import Obstacle\nfrom pylot.perception.detection.utils import BoundingBox2D, \\\n OBSTACLE_LABELS, load_coco_bbox_colors, load_coco_labels\nfrom pylot.perception.messages import ObstaclesMessage\n\nimport tensorflow as tf\n\n\nclass DetectionOperator(erdos.Operator):\n \"\"\"Detects obstacles using a TensorFlow model.\n\n The operator receives frames on a camera stream, and runs a model for each\n frame.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which the\n operator sends\n :py:class:`~pylot.perception.messages.ObstaclesMessage` messages.\n model_path(:obj:`str`): Path to the model pb file.\n flags (absl.flags): Object to be used to access absl flags.\n \"\"\"\n def __init__(self, camera_stream, time_to_decision_stream,\n obstacles_stream, model_path, flags):\n camera_stream.add_callback(self.on_msg_camera_stream,\n [obstacles_stream])\n time_to_decision_stream.add_callback(self.on_time_to_decision_update)\n self._flags = flags\n self._logger = erdos.utils.setup_logging(self.config.name,\n self.config.log_file_name)\n self._detection_graph = tf.Graph()\n # Load the model from the model file.\n pylot.utils.set_tf_loglevel(logging.ERROR)\n with self._detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(model_path, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self._gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=flags.\n obstacle_detection_gpu_memory_fraction)\n # Create a TensorFlow session.\n self._tf_session = tf.Session(\n graph=self._detection_graph,\n config=tf.ConfigProto(gpu_options=self._gpu_options))\n # Get the tensors we're interested in.\n self._image_tensor = self._detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n self._detection_boxes = self._detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n self._detection_scores = self._detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self._detection_classes = self._detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self._num_detections = self._detection_graph.get_tensor_by_name(\n 'num_detections:0')\n self._coco_labels = load_coco_labels(self._flags.path_coco_labels)\n self._bbox_colors = load_coco_bbox_colors(self._coco_labels)\n # Unique bounding box id. Incremented for each bounding box.\n self._unique_id = 0\n # Serve some junk image to load up the model.\n self.__run_model(np.zeros((108, 192, 3)))\n\n @staticmethod\n def connect(camera_stream, time_to_decision_stream):\n \"\"\"Connects the operator to other streams.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n\n Returns:\n :py:class:`erdos.WriteStream`: Stream on which the operator sends\n :py:class:`~pylot.perception.messages.ObstaclesMessage` messages.\n \"\"\"\n obstacles_stream = erdos.WriteStream()\n return [obstacles_stream]\n\n def on_time_to_decision_update(self, msg):\n self._logger.debug('@{}: {} received ttd update {}'.format(\n msg.timestamp, self.config.name, msg))\n\n @erdos.profile_method()\n def on_msg_camera_stream(self, msg, obstacles_stream):\n \"\"\"Invoked whenever a frame message is received on the stream.\n\n Args:\n msg (:py:class:`~pylot.perception.messages.FrameMessage`): Message\n received.\n obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which\n the operator sends\n :py:class:`~pylot.perception.messages.ObstaclesMessage`\n messages.\n \"\"\"\n self._logger.debug('@{}: {} received message'.format(\n msg.timestamp, self.config.name))\n start_time = time.time()\n # The models expect BGR images.\n assert msg.frame.encoding == 'BGR', 'Expects BGR frames'\n num_detections, res_boxes, res_scores, res_classes = self.__run_model(\n msg.frame.frame)\n obstacles = []\n for i in range(0, num_detections):\n if res_classes[i] in self._coco_labels:\n if (res_scores[i] >=\n self._flags.obstacle_detection_min_score_threshold):\n if (self._coco_labels[res_classes[i]] in OBSTACLE_LABELS):\n obstacles.append(\n Obstacle(BoundingBox2D(\n int(res_boxes[i][1] *\n msg.frame.camera_setup.width),\n int(res_boxes[i][3] *\n msg.frame.camera_setup.width),\n int(res_boxes[i][0] *\n msg.frame.camera_setup.height),\n int(res_boxes[i][2] *\n msg.frame.camera_setup.height)),\n res_scores[i],\n self._coco_labels[res_classes[i]],\n id=self._unique_id))\n self._unique_id += 1\n else:\n self._logger.warning(\n 'Ignoring non essential detection {}'.format(\n self._coco_labels[res_classes[i]]))\n else:\n self._logger.warning('Filtering unknown class: {}'.format(\n res_classes[i]))\n\n self._logger.debug('@{}: {} obstacles: {}'.format(\n msg.timestamp, self.config.name, obstacles))\n\n # Get runtime in ms.\n runtime = (time.time() - start_time) * 1000\n # Send out obstacles.\n obstacles_stream.send(\n ObstaclesMessage(msg.timestamp, obstacles, runtime))\n obstacles_stream.send(erdos.WatermarkMessage(msg.timestamp))\n\n if self._flags.log_detector_output:\n msg.frame.annotate_with_bounding_boxes(msg.timestamp, obstacles,\n None, self._bbox_colors)\n msg.frame.save(msg.timestamp.coordinates[0], self._flags.data_path,\n 'detector-{}'.format(self.config.name))\n\n def __run_model(self, image_np):\n # Expand dimensions since the model expects images to have\n # shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n (boxes, scores, classes, num_detections) = self._tf_session.run(\n [\n self._detection_boxes, self._detection_scores,\n self._detection_classes, self._num_detections\n ],\n feed_dict={self._image_tensor: image_np_expanded})\n\n num_detections = int(num_detections[0])\n res_classes = [int(cls) for cls in classes[0][:num_detections]]\n res_boxes = boxes[0][:num_detections]\n res_scores = scores[0][:num_detections]\n return num_detections, res_boxes, res_scores, res_classes\n"
] | [
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.GraphDef",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sohl-Dickstein/learned_optimization | [
"cd929359a51d09444665021387c058aac11b63ba"
] | [
"learned_optimization/baselines/run_archive.py"
] | [
"# coding=utf-8\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script to create aggregated results from an hparam set.\n\nThis script can be run after the corresponding baselines have been created,\nor while the baselines are being run. In the case that the baselines are being\nrun this will continuously retry until all baselines are finished and only\nfinish at this point.\n\"\"\"\nfrom concurrent import futures\nimport time\nfrom typing import Any, Mapping, Optional\n\nfrom absl import app\nfrom absl import logging\nimport gin\nimport jax\nfrom learned_optimization import setup_experiment\nfrom learned_optimization.baselines import hparam_sets # pylint: disable=unused-import\nfrom learned_optimization.baselines import utils\nimport numpy as onp\n\n\ndef maybe_get_hparam_set(task_name,\n hparam_set_name) -> Optional[Mapping[str, Any]]:\n \"\"\"Attempt to get the data for a given task_name and hparam set.\"\"\"\n hparam_set_fn = gin.get_configurable(hparam_set_name)\n unused_cfgs, paths_reps = hparam_set_fn(task_name)\n paths, unused_reps = zip(*paths_reps)\n\n def load_one(p):\n return utils.load_baseline_results_from_dir(\n save_dir=p, output_type=\"curves\")\n\n with futures.ThreadPoolExecutor(32) as executor:\n results = list(executor.map(load_one, paths))\n\n def stack(*xs):\n if isinstance(xs[0], str):\n return xs\n elif isinstance(xs[0], (onp.ndarray, int, float)):\n return onp.asarray(xs)\n else:\n raise ValueError(f\"Unsupported type: {type(xs[0])}.\")\n\n # ensure that we have the right amount of data for each.\n trimmed_results = []\n for (path, rep), res in zip(paths_reps, results):\n if len(res) < rep:\n logging.info(f\"Failed to find enough results in dir {path}. \" # pylint: disable=logging-fstring-interpolation\n f\"Expected {len(res)}\")\n return None\n trimmed_results.append(jax.tree_map(stack, *res[0:rep]))\n stacked = jax.tree_map(stack, *trimmed_results)\n return stacked\n\n\ndef maybe_archive_hparam_set(task_name: str, hparam_set_name: str) -> bool:\n data = maybe_get_hparam_set(task_name, hparam_set_name)\n if data is None:\n return False\n\n utils.write_archive(task_name, hparam_set_name, data)\n return True\n\n\[email protected]\ndef wait_until_ready_then_archive_task(task_name: str = gin.REQUIRED,\n hparam_set_name: str = gin.REQUIRED):\n \"\"\"Continually try to create and save an archive of hparam set + task_name.\n\n This function is designed to be run while the baselines are being computed\n and will finish once all the baseline data has been run. By blocking in this\n function we can run all baselines and an archive job at the same time instead\n of leveraging a more sophisticated dependency system.\n\n Args:\n task_name: Name of task to archive\n hparam_set_name: the name of the hparam set to archive.\n \"\"\"\n while True:\n r = maybe_archive_hparam_set(task_name, hparam_set_name)\n if r:\n logging.info(f\"Saved success! Wrote {hparam_set_name} {task_name}.\") # pylint: disable=logging-fstring-interpolation\n return\n else:\n logging.info(f\"Saved Failed! {hparam_set_name} {task_name}.\") # pylint: disable=logging-fstring-interpolation\n logging.info(\"Waiting 10 seconds and trying again.\")\n time.sleep(10)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError(\"Too many command-line arguments.\")\n unused_dir = setup_experiment.setup_experiment(make_dir=False)\n\n wait_until_ready_then_archive_task()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wull566/tensorflow_demo | [
"c2c45050867cb056b8193eb53466d26b80b0ec13",
"c2c45050867cb056b8193eb53466d26b80b0ec13",
"c2c45050867cb056b8193eb53466d26b80b0ec13",
"c2c45050867cb056b8193eb53466d26b80b0ec13",
"c2c45050867cb056b8193eb53466d26b80b0ec13",
"c2c45050867cb056b8193eb53466d26b80b0ec13",
"c2c45050867cb056b8193eb53466d26b80b0ec13"
] | [
"tutorials/2_tensorflow_old/numpy&pandas/17_merge.py",
"tutorials/1_tensorflow_new/301_simple_regression.py",
"tutorials/2_tensorflow_old/sklearnTUT/sk8_cross_validation/full_code.py",
"tutorials/2_tensorflow_old/tensorflowTUT/tf18_CNN3/full_code.py",
"tutorials/3_reinforce/experiments/Solve_BipedalWalker/DDPG.py",
"tutorials/2_tensorflow_old/sklearnTUT/sk5_datasets.py",
"tutorials/3_reinforce/contents/9_Deep_Deterministic_Policy_Gradient_DDPG/DDPG_update.py"
] | [
"# View more 3_python 2_tensorflow_old on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n\"\"\"\nPlease note, this code is only for 3_python 3+. If you are using 3_python 2+, please modify the code accordingly.\n\"\"\"\nfrom __future__ import print_function\nimport pandas as pd\n\n# merging two df by key/keys. (may be used in database)\n# simple example\nleft = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']})\nright = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\nprint(left)\nprint(right)\nres = pd.merge(left, right, on='key')\nprint(res)\n\n# consider two keys\nleft = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],\n 'key2': ['K0', 'K1', 'K0', 'K1'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']})\nright = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],\n 'key2': ['K0', 'K0', 'K0', 'K0'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\nprint(left)\nprint(right)\nres = pd.merge(left, right, on=['key1', 'key2'], how='inner') # default for how='inner'\n# how = ['left', 'right', 'outer', 'inner']\nres = pd.merge(left, right, on=['key1', 'key2'], how='left')\nprint(res)\n\n# indicator\ndf1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']})\ndf2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]})\nprint(df1)\nprint(df2)\nres = pd.merge(df1, df2, on='col1', how='outer', indicator=True)\n# give the indicator a custom name\nres = pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column')\n\n\n# merged by index\nleft = pd.DataFrame({'A': ['A0', 'A1', 'A2'],\n 'B': ['B0', 'B1', 'B2']},\n index=['K0', 'K1', 'K2'])\nright = pd.DataFrame({'C': ['C0', 'C2', 'C3'],\n 'D': ['D0', 'D2', 'D3']},\n index=['K0', 'K2', 'K3'])\nprint(left)\nprint(right)\n# left_index and right_index\nres = pd.merge(left, right, left_index=True, right_index=True, how='outer')\nres = pd.merge(left, right, left_index=True, right_index=True, how='inner')\n\n# handle overlapping\nboys = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'age': [1, 2, 3]})\ngirls = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'age': [4, 5, 6]})\nres = pd.merge(boys, girls, on='k', suffixes=['_boy', '_girl'], how='inner')\nprint(res)\n\n# join function in pandas is similar with merge. If know merge, you will understand join\n",
"\"\"\"\nKnow more, visit my Python tutorial page: https://morvanzhou.github.io/2_tensorflow_old/\nMy Youtube Channel: https://www.youtube.com/user/MorvanZhou\n\nDependencies:\ntensorflow: 1.1.0\nmatplotlib\nnumpy\n\"\"\"\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntf.set_random_seed(1)\nnp.random.seed(1)\n\n# fake data\nx = np.linspace(-1, 1, 100)[:, np.newaxis] # shape (100, 1)\nnoise = np.random.normal(0, 0.1, size=x.shape)\ny = np.power(x, 2) + noise # shape (100, 1) + some noise\n\n# plot data\nplt.scatter(x, y)\nplt.show()\n\ntf_x = tf.placeholder(tf.float32, x.shape) # input x\ntf_y = tf.placeholder(tf.float32, y.shape) # input y\n\n# neural network layers\nl1 = tf.layers.dense(tf_x, 10, tf.nn.relu) # hidden layer\noutput = tf.layers.dense(l1, 1) # output layer\n\nloss = tf.losses.mean_squared_error(tf_y, output) # compute cost\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)\ntrain_op = optimizer.minimize(loss)\n\nsess = tf.Session() # control training and others\nsess.run(tf.global_variables_initializer()) # initialize var in graph\n\nplt.ion() # something about plotting\n\nfor step in range(100):\n # train and net output\n _, l, pred = sess.run([train_op, loss, output], {tf_x: x, tf_y: y})\n if step % 5 == 0:\n # plot and show 1_tensorflow_new process\n plt.cla()\n plt.scatter(x, y)\n plt.plot(x, pred, 'r-', lw=5)\n plt.text(0.5, 0, 'Loss=%.4f' % l, fontdict={'size': 20, 'color': 'red'})\n plt.pause(0.1)\n\nplt.ioff()\nplt.show()",
"# View more 3_python 1_tensorflow_new tutorial on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n\"\"\"\nPlease note, this code is only for 3_python 3+. If you are using 3_python 2+, please modify the code accordingly.\n\"\"\"\nfrom __future__ import print_function\nfrom sklearn.datasets import load_iris\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\niris = load_iris()\nX = iris.data\ny = iris.target\n\n# test train split #\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X_train, y_train)\ny_pred = knn.predict(X_test)\nprint(knn.score(X_test, y_test))\n\n# this is cross_val_score #\nfrom sklearn.cross_validation import cross_val_score\nknn = KNeighborsClassifier(n_neighbors=5)\nscores = cross_val_score(knn, X, y, cv=5, scoring='accuracy')\nprint(scores)\n\n# this is how to use cross_val_score to choose model and configs #\nfrom sklearn.cross_validation import cross_val_score\nimport matplotlib.pyplot as plt\nk_range = range(1, 31)\nk_scores = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n## loss = -cross_val_score(knn, X, y, cv=10, scoring='mean_squared_error') # for regression\n scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy') # for classification\n k_scores.append(scores.mean())\n\nplt.plot(k_range, k_scores)\nplt.xlabel('Value of K for KNN')\nplt.ylabel('Cross-Validated Accuracy')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"# View more 3_python tutorial on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n\"\"\"\nPlease note, this code is only for 3_python 3+. If you are using 3_python 2+, please modify the code accordingly.\n\"\"\"\nfrom __future__ import print_function\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n# number 1 to 10 data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\ndef compute_accuracy(v_xs, v_ys):\n global prediction\n y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})\n correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})\n return result\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n # stride [1, x_movement, y_movement, 1]\n # Must have strides[0] = strides[3] = 1\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n # stride [1, x_movement, y_movement, 1]\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\n# define placeholder for inputs to network\nxs = tf.placeholder(tf.float32, [None, 784])/255. # 28x28\nys = tf.placeholder(tf.float32, [None, 10])\nkeep_prob = tf.placeholder(tf.float32)\nx_image = tf.reshape(xs, [-1, 28, 28, 1])\n# print(x_image.shape) # [n_samples, 28,28,1]\n\n## conv1 layer ##\nW_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32\nb_conv1 = bias_variable([32])\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 28x28x32\nh_pool1 = max_pool_2x2(h_conv1) # output size 14x14x32\n\n## conv2 layer ##\nW_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64\nb_conv2 = bias_variable([64])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 14x14x64\nh_pool2 = max_pool_2x2(h_conv2) # output size 7x7x64\n\n## fc1 layer ##\nW_fc1 = weight_variable([7*7*64, 1024])\nb_fc1 = bias_variable([1024])\n# [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n## fc2 layer ##\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\nprediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n\n# the error between prediction and real data\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),\n reduction_indices=[1])) # loss\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\nsess = tf.Session()\n# important step\n# tf.initialize_all_variables() no long valid from\n# 2017-03-02 if using tensorflow >= 0.12\nif int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:\n init = tf.initialize_all_variables()\nelse:\n init = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})\n if i % 50 == 0:\n print(compute_accuracy(\n mnist.test.images[:1000], mnist.test.labels[:1000]))\n\n",
"import tensorflow as tf\nimport numpy as np\nimport gym\nimport os\nimport shutil\n\nnp.random.seed(1)\ntf.set_random_seed(1)\n\nMAX_EPISODES = 2000\nLR_A = 0.0005 # 1_tensorflow_new rate for actor\nLR_C = 0.0005 # 1_tensorflow_new rate for critic\nGAMMA = 0.999 # reward discount\nREPLACE_ITER_A = 1700\nREPLACE_ITER_C = 1500\nMEMORY_CAPACITY = 200000\nBATCH_SIZE = 32\nDISPLAY_THRESHOLD = 100 # display until the running reward > 100\nDATA_PATH = './data'\nLOAD_MODEL = False\nSAVE_MODEL_ITER = 100000\nRENDER = False\nOUTPUT_GRAPH = False\nENV_NAME = 'BipedalWalker-v2'\n\nGLOBAL_STEP = tf.Variable(0, trainable=False)\nINCREASE_GS = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1))\nLR_A = tf.train.exponential_decay(LR_A, GLOBAL_STEP, 10000, .97, staircase=True)\nLR_C = tf.train.exponential_decay(LR_C, GLOBAL_STEP, 10000, .97, staircase=True)\nEND_POINT = (200 - 10) * (14/30) # from game\n\nenv = gym.make(ENV_NAME)\nenv.seed(1)\n\nSTATE_DIM = env.observation_space.shape[0] # 24\nACTION_DIM = env.action_space.shape[0] # 4\nACTION_BOUND = env.action_space.high # [1, 1, 1, 1]\n\n# all placeholder for tf\nwith tf.name_scope('S'):\n S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s')\nwith tf.name_scope('R'):\n R = tf.placeholder(tf.float32, [None, 1], name='r')\nwith tf.name_scope('S_'):\n S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_')\n\n############################### Actor ####################################\n\nclass Actor(object):\n def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter):\n self.sess = sess\n self.a_dim = action_dim\n self.action_bound = action_bound\n self.lr = learning_rate\n self.t_replace_iter = t_replace_iter\n self.t_replace_counter = 0\n\n with tf.variable_scope('Actor'):\n # input s, output a\n self.a = self._build_net(S, scope='eval_net', trainable=True)\n\n # input s_, output a, get a_ for critic\n self.a_ = self._build_net(S_, scope='target_net', trainable=False)\n\n self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')\n self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')\n\n def _build_net(self, s, scope, trainable):\n with tf.variable_scope(scope):\n init_w = tf.random_normal_initializer(0., 0.01)\n init_b = tf.constant_initializer(0.01)\n net = tf.layers.dense(s, 500, activation=tf.nn.relu,\n kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)\n net = tf.layers.dense(net, 200, activation=tf.nn.relu,\n kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable)\n\n with tf.variable_scope('a'):\n actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,\n bias_initializer=init_b, name='a', trainable=trainable)\n scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound\n return scaled_a\n\n def learn(self, s): # batch update\n self.sess.run(self.train_op, feed_dict={S: s})\n if self.t_replace_counter % self.t_replace_iter == 0:\n self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])\n self.t_replace_counter += 1\n\n def choose_action(self, s):\n s = s[np.newaxis, :] # single state\n return self.sess.run(self.a, feed_dict={S: s})[0] # single action\n\n def add_grad_to_graph(self, a_grads):\n with tf.variable_scope('policy_grads'):\n # ys = policy;\n # xs = policy's parameters;\n # self.a_grads = the gradients of the policy to get more Q\n # tf.gradients will calculate dys/dxs with a initial gradients for ys, so this is dq/da * da/dparams\n self.policy_grads_and_vars = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads)\n\n with tf.variable_scope('A_train'):\n opt = tf.train.RMSPropOptimizer(-self.lr) # (- 1_tensorflow_new rate) for ascent policy\n self.train_op = opt.apply_gradients(zip(self.policy_grads_and_vars, self.e_params), global_step=GLOBAL_STEP)\n\n\n############################### Critic ####################################\n\nclass Critic(object):\n def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter, a, a_):\n self.sess = sess\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.lr = learning_rate\n self.gamma = gamma\n self.t_replace_iter = t_replace_iter\n self.t_replace_counter = 0\n\n with tf.variable_scope('Critic'):\n # Input (s, a), output q\n self.a = a\n self.q = self._build_net(S, self.a, 'eval_net', trainable=True)\n\n # Input (s_, a_), output q_ for q_target\n self.q_ = self._build_net(S_, a_, 'target_net', trainable=False) # target_q is based on a_ from Actor's target_net\n\n self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')\n self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')\n\n with tf.variable_scope('target_q'):\n self.target_q = R + self.gamma * self.q_\n\n with tf.variable_scope('abs_TD'):\n self.abs_td = tf.abs(self.target_q - self.q)\n self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')\n with tf.variable_scope('TD_error'):\n self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.target_q, self.q))\n\n with tf.variable_scope('C_train'):\n self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP)\n\n with tf.variable_scope('a_grad'):\n self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)\n\n def _build_net(self, s, a, scope, trainable):\n with tf.variable_scope(scope):\n init_w = tf.random_normal_initializer(0., 0.01)\n init_b = tf.constant_initializer(0.01)\n\n with tf.variable_scope('l1'):\n n_l1 = 700\n # combine the action and states together in this way\n w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)\n w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)\n b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)\n net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)\n with tf.variable_scope('l2'):\n net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w,\n bias_initializer=init_b, name='l2', trainable=trainable)\n with tf.variable_scope('q'):\n q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)\n return q\n\n def learn(self, s, a, r, s_, ISW):\n _, abs_td = self.sess.run([self.train_op, self.abs_td], feed_dict={S: s, self.a: a, R: r, S_: s_, self.ISWeights: ISW})\n if self.t_replace_counter % self.t_replace_iter == 0:\n self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])\n self.t_replace_counter += 1\n return abs_td\n\n\nclass SumTree(object):\n \"\"\"\n This SumTree code is modified version and the original code is from:\n https://github.com/jaara/AI-blog/blob/master/SumTree.py\n\n Story the data with it priority in tree and data frameworks.\n \"\"\"\n data_pointer = 0\n\n def __init__(self, capacity):\n self.capacity = capacity # for all priority values\n self.tree = np.zeros(2 * capacity - 1)+1e-5\n # [--------------Parent nodes-------------][-------leaves to recode priority-------]\n # size: capacity - 1 size: capacity\n self.data = np.zeros(capacity, dtype=object) # for all transitions\n # [--------------data frame-------------]\n # size: capacity\n\n def add_new_priority(self, p, data):\n leaf_idx = self.data_pointer + self.capacity - 1\n\n self.data[self.data_pointer] = data # update data_frame\n self.update(leaf_idx, p) # update tree_frame\n self.data_pointer += 1\n if self.data_pointer >= self.capacity: # replace when exceed the capacity\n self.data_pointer = 0\n\n def update(self, tree_idx, p):\n change = p - self.tree[tree_idx]\n\n self.tree[tree_idx] = p\n self._propagate_change(tree_idx, change)\n\n def _propagate_change(self, tree_idx, change):\n \"\"\"change the sum of priority value in all parent nodes\"\"\"\n parent_idx = (tree_idx - 1) // 2\n self.tree[parent_idx] += change\n if parent_idx != 0:\n self._propagate_change(parent_idx, change)\n\n def get_leaf(self, lower_bound):\n leaf_idx = self._retrieve(lower_bound) # search the max leaf priority based on the lower_bound\n data_idx = leaf_idx - self.capacity + 1\n return [leaf_idx, self.tree[leaf_idx], self.data[data_idx]]\n\n def _retrieve(self, lower_bound, parent_idx=0):\n \"\"\"\n Tree structure and array storage:\n\n Tree index:\n 0 -> storing priority sum\n / \\\n 1 2\n / \\ / \\\n 3 4 5 6 -> storing priority for transitions\n\n Array type for storing:\n [0,1,2,3,4,5,6]\n \"\"\"\n left_child_idx = 2 * parent_idx + 1\n right_child_idx = left_child_idx + 1\n\n if left_child_idx >= len(self.tree): # end search when no more child\n return parent_idx\n\n if self.tree[left_child_idx] == self.tree[right_child_idx]:\n return self._retrieve(lower_bound, np.random.choice([left_child_idx, right_child_idx]))\n if lower_bound <= self.tree[left_child_idx]: # downward search, always search for a higher priority node\n return self._retrieve(lower_bound, left_child_idx)\n else:\n return self._retrieve(lower_bound - self.tree[left_child_idx], right_child_idx)\n\n @property\n def root_priority(self):\n return self.tree[0] # the root\n\n\nclass Memory(object): # stored as ( s, a, r, s_ ) in SumTree\n \"\"\"\n This SumTree code is modified version and the original code is from:\n https://github.com/jaara/AI-blog/blob/master/Seaquest-DDQN-PER.py\n \"\"\"\n epsilon = 0.001 # small amount to avoid zero priority\n alpha = 0.6 # [0~1] convert the importance of TD error to priority\n beta = 0.4 # importance-sampling, from initial value increasing to 1\n beta_increment_per_sampling = 1e-5 # annealing the bias\n abs_err_upper = 1 # for stability refer to paper\n\n def __init__(self, capacity):\n self.tree = SumTree(capacity)\n\n def store(self, error, transition):\n p = self._get_priority(error)\n self.tree.add_new_priority(p, transition)\n\n def prio_sample(self, n):\n batch_idx, batch_memory, ISWeights = [], [], []\n segment = self.tree.root_priority / n\n self.beta = np.min([1, self.beta + self.beta_increment_per_sampling]) # max = 1\n\n min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.root_priority\n maxiwi = np.power(self.tree.capacity * min_prob, -self.beta) # for later normalizing ISWeights\n for i in range(n):\n a = segment * i\n b = segment * (i + 1)\n lower_bound = np.random.uniform(a, b)\n while True:\n idx, p, data = self.tree.get_leaf(lower_bound)\n if type(data) is int:\n i -= 1\n lower_bound = np.random.uniform(segment * i, segment * (i+1))\n else:\n break\n prob = p / self.tree.root_priority\n ISWeights.append(self.tree.capacity * prob)\n batch_idx.append(idx)\n batch_memory.append(data)\n\n ISWeights = np.vstack(ISWeights)\n ISWeights = np.power(ISWeights, -self.beta) / maxiwi # normalize\n return batch_idx, np.vstack(batch_memory), ISWeights\n\n def random_sample(self, n):\n idx = np.random.randint(0, self.tree.capacity, size=n, dtype=np.int)\n return np.vstack(self.tree.data[idx])\n\n def update(self, idx, error):\n p = self._get_priority(error)\n self.tree.update(idx, p)\n\n def _get_priority(self, error):\n error += self.epsilon # avoid 0\n clipped_error = np.clip(error, 0, self.abs_err_upper)\n return np.power(clipped_error, self.alpha)\n\n\nsess = tf.Session()\n\n# Create actor and critic.\nactor = Actor(sess, ACTION_DIM, ACTION_BOUND, LR_A, REPLACE_ITER_A)\ncritic = Critic(sess, STATE_DIM, ACTION_DIM, LR_C, GAMMA, REPLACE_ITER_C, actor.a, actor.a_)\nactor.add_grad_to_graph(critic.a_grads)\n\nM = Memory(MEMORY_CAPACITY)\n\nsaver = tf.train.Saver(max_to_keep=100)\n\nif LOAD_MODEL:\n all_ckpt = tf.train.get_checkpoint_state('./data', 'checkpoint').all_model_checkpoint_paths\n saver.restore(sess, all_ckpt[-1])\nelse:\n if os.path.isdir(DATA_PATH): shutil.rmtree(DATA_PATH)\n os.mkdir(DATA_PATH)\n sess.run(tf.global_variables_initializer())\n\nif OUTPUT_GRAPH:\n tf.summary.FileWriter('logs', graph=sess.graph)\n\nvar = 3 # control exploration\nvar_min = 0.01\n\nfor i_episode in range(MAX_EPISODES):\n # s = (hull angle speed, angular velocity, horizontal speed, vertical speed, position of joints and joints angular speed, legs contact with ground, and 10 lidar rangefinder measurements.)\n s = env.reset()\n ep_r = 0\n while True:\n if RENDER:\n env.render()\n a = actor.choose_action(s)\n a = np.clip(np.random.normal(a, var), -1, 1) # add randomness to action selection for exploration\n s_, r, done, _ = env.step(a) # r = total 300+ points up to the far end. If the robot falls, it gets -100.\n\n if r == -100: r = -2\n ep_r += r\n\n transition = np.hstack((s, a, [r], s_))\n max_p = np.max(M.tree.tree[-M.tree.capacity:])\n M.store(max_p, transition)\n\n if GLOBAL_STEP.eval(sess) > MEMORY_CAPACITY/20:\n var = max([var*0.9999, var_min]) # decay the action randomness\n tree_idx, b_M, ISWeights = M.prio_sample(BATCH_SIZE) # for critic update\n b_s = b_M[:, :STATE_DIM]\n b_a = b_M[:, STATE_DIM: STATE_DIM + ACTION_DIM]\n b_r = b_M[:, -STATE_DIM - 1: -STATE_DIM]\n b_s_ = b_M[:, -STATE_DIM:]\n\n abs_td = critic.learn(b_s, b_a, b_r, b_s_, ISWeights)\n actor.learn(b_s)\n for i in range(len(tree_idx)): # update priority\n idx = tree_idx[i]\n M.update(idx, abs_td[i])\n if GLOBAL_STEP.eval(sess) % SAVE_MODEL_ITER == 0:\n ckpt_path = os.path.join(DATA_PATH, 'DDPG.ckpt')\n save_path = saver.save(sess, ckpt_path, global_step=GLOBAL_STEP, write_meta_graph=False)\n print(\"\\nSave Model %s\\n\" % save_path)\n\n if done:\n if \"running_r\" not in globals():\n running_r = ep_r\n else:\n running_r = 0.95*running_r + 0.05*ep_r\n if running_r > DISPLAY_THRESHOLD: RENDER = True\n else: RENDER = False\n\n done = '| Achieve ' if env.unwrapped.hull.position[0] >= END_POINT else '| -----'\n print('Episode:', i_episode,\n done,\n '| Running_r: %i' % int(running_r),\n '| Epi_r: %.2f' % ep_r,\n '| Exploration: %.3f' % var,\n '| Pos: %.i' % int(env.unwrapped.hull.position[0]),\n '| LR_A: %.6f' % sess.run(LR_A),\n '| LR_C: %.6f' % sess.run(LR_C),\n )\n break\n\n s = s_\n sess.run(INCREASE_GS)",
"# View more 3_python 1_tensorflow_new tutorial on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n\"\"\"\nPlease note, this code is only for 3_python 3+. If you are using 3_python 2+, please modify the code accordingly.\n\"\"\"\nfrom __future__ import print_function\nfrom sklearn import datasets\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\n\nloaded_data = datasets.load_boston()\ndata_X = loaded_data.data\ndata_y = loaded_data.target\n\nmodel = LinearRegression()\nmodel.fit(data_X, data_y)\n\nprint(model.predict(data_X[:4, :]))\nprint(data_y[:4])\n\nX, y = datasets.make_regression(n_samples=100, n_features=1, n_targets=1, noise=10)\nplt.scatter(X, y)\nplt.show()\n\n\n\n\n\n\n",
"\"\"\"\nDeep Deterministic Policy Gradient (DDPG), Reinforcement Learning.\nDDPG is Actor Critic based algorithm.\nPendulum example.\n\nView more on my tutorial page: https://morvanzhou.github.io/2_tensorflow_old/\n\nUsing:\ntensorflow 1.0\ngym 0.8.0\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport gym\nimport time\n\n\n##################### hyper parameters ####################\n\nMAX_EPISODES = 200\nMAX_EP_STEPS = 200\nLR_A = 0.001 # 1_tensorflow_new rate for actor\nLR_C = 0.002 # 1_tensorflow_new rate for critic\nGAMMA = 0.9 # reward discount\nTAU = 0.01 # soft replacement\nMEMORY_CAPACITY = 10000\nBATCH_SIZE = 32\n\nRENDER = False\nENV_NAME = 'Pendulum-v0'\n\n############################### DDPG ####################################\n\nclass DDPG(object):\n def __init__(self, a_dim, s_dim, a_bound,):\n self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)\n self.pointer = 0\n self.sess = tf.Session()\n\n self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,\n self.S = tf.placeholder(tf.float32, [None, s_dim], 's')\n self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')\n self.R = tf.placeholder(tf.float32, [None, 1], 'r')\n\n with tf.variable_scope('Actor'):\n self.a = self._build_a(self.S, scope='eval', trainable=True)\n a_ = self._build_a(self.S_, scope='target', trainable=False)\n with tf.variable_scope('Critic'):\n # assign self.a = a in memory when calculating q for td_error,\n # otherwise the self.a is from Actor when updating Actor\n q = self._build_c(self.S, self.a, scope='eval', trainable=True)\n q_ = self._build_c(self.S_, a_, scope='target', trainable=False)\n\n # networks parameters\n self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')\n self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')\n self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')\n self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')\n\n # target net replacement\n self.soft_replace = [[tf.assign(ta, (1 - TAU) * ta + TAU * ea), tf.assign(tc, (1 - TAU) * tc + TAU * ec)]\n for ta, ea, tc, ec in zip(self.at_params, self.ae_params, self.ct_params, self.ce_params)]\n\n q_target = self.R + GAMMA * q_\n # in the feed_dic for the td_error, the self.a should change to actions in memory\n td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)\n self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=self.ce_params)\n\n a_loss = - tf.reduce_mean(q) # maximize the q\n self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=self.ae_params)\n\n self.sess.run(tf.global_variables_initializer())\n\n def choose_action(self, s):\n return self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]\n\n def learn(self):\n # soft target replacement\n self.sess.run(self.soft_replace)\n\n indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)\n bt = self.memory[indices, :]\n bs = bt[:, :self.s_dim]\n ba = bt[:, self.s_dim: self.s_dim + self.a_dim]\n br = bt[:, -self.s_dim - 1: -self.s_dim]\n bs_ = bt[:, -self.s_dim:]\n\n self.sess.run(self.atrain, {self.S: bs})\n self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})\n\n def store_transition(self, s, a, r, s_):\n transition = np.hstack((s, a, [r], s_))\n index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory\n self.memory[index, :] = transition\n self.pointer += 1\n\n def _build_a(self, s, scope, trainable):\n with tf.variable_scope(scope):\n net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)\n a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)\n return tf.multiply(a, self.a_bound, name='scaled_a')\n\n def _build_c(self, s, a, scope, trainable):\n with tf.variable_scope(scope):\n n_l1 = 30\n w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)\n w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)\n b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)\n net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)\n return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)\n\n############################### training ####################################\n\nenv = gym.make(ENV_NAME)\nenv = env.unwrapped\nenv.seed(1)\n\ns_dim = env.observation_space.shape[0]\na_dim = env.action_space.shape[0]\na_bound = env.action_space.high\n\nddpg = DDPG(a_dim, s_dim, a_bound)\n\nvar = 3 # control exploration\nt1 = time.time()\nfor i in range(MAX_EPISODES):\n s = env.reset()\n ep_reward = 0\n for j in range(MAX_EP_STEPS):\n if RENDER:\n env.render()\n\n # Add exploration noise\n a = ddpg.choose_action(s)\n a = np.clip(np.random.normal(a, var), -2, 2) # add randomness to action selection for exploration\n s_, r, done, info = env.step(a)\n\n ddpg.store_transition(s, a, r / 10, s_)\n\n if ddpg.pointer > MEMORY_CAPACITY:\n var *= .9995 # decay the action randomness\n ddpg.learn()\n\n s = s_\n ep_reward += r\n if j == MAX_EP_STEPS-1:\n print('Episode:', i, ' Reward: %i' % int(ep_reward), 'Explore: %.2f' % var, )\n # if ep_reward > -300:RENDER = True\n break\nprint('Running time: ', time.time() - t1)"
] | [
[
"pandas.merge",
"pandas.DataFrame"
],
[
"numpy.linspace",
"matplotlib.pyplot.plot",
"tensorflow.layers.dense",
"tensorflow.Session",
"matplotlib.pyplot.text",
"numpy.power",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.set_random_seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ion",
"tensorflow.losses.mean_squared_error",
"matplotlib.pyplot.scatter",
"numpy.random.seed",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.ioff",
"numpy.random.normal",
"matplotlib.pyplot.pause"
],
[
"sklearn.cross_validation.cross_val_score",
"sklearn.cross_validation.train_test_split",
"sklearn.datasets.load_iris",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"tensorflow.matmul",
"tensorflow.__version__.split",
"tensorflow.truncated_normal",
"tensorflow.constant",
"tensorflow.Variable",
"tensorflow.nn.max_pool",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.initialize_all_variables",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.conv2d",
"tensorflow.nn.dropout"
],
[
"tensorflow.get_variable",
"numpy.max",
"tensorflow.abs",
"tensorflow.train.AdamOptimizer",
"numpy.random.randint",
"numpy.hstack",
"tensorflow.Variable",
"numpy.clip",
"tensorflow.get_collection",
"tensorflow.gradients",
"tensorflow.layers.dense",
"tensorflow.train.exponential_decay",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.random_normal_initializer",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.train.RMSPropOptimizer",
"numpy.min",
"numpy.power",
"numpy.random.choice",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"tensorflow.train.get_checkpoint_state",
"tensorflow.multiply",
"tensorflow.summary.FileWriter",
"numpy.random.seed",
"tensorflow.assign",
"tensorflow.constant_initializer",
"numpy.random.normal",
"tensorflow.variable_scope",
"numpy.random.uniform",
"tensorflow.squared_difference",
"numpy.vstack"
],
[
"matplotlib.pyplot.scatter",
"sklearn.datasets.make_regression",
"sklearn.linear_model.LinearRegression",
"sklearn.datasets.load_boston",
"matplotlib.pyplot.show"
],
[
"numpy.hstack",
"tensorflow.losses.mean_squared_error",
"tensorflow.multiply",
"tensorflow.get_variable",
"tensorflow.matmul",
"numpy.random.choice",
"tensorflow.get_collection",
"tensorflow.reduce_mean",
"tensorflow.assign",
"tensorflow.placeholder",
"tensorflow.layers.dense",
"tensorflow.global_variables_initializer",
"numpy.random.normal",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.variable_scope",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
dohmatob/adversarial-robustness-toolbox | [
"7d3ba7d2d6690be69c08754fbc632947c2d10a97",
"7d3ba7d2d6690be69c08754fbc632947c2d10a97",
"7d3ba7d2d6690be69c08754fbc632947c2d10a97"
] | [
"art/classifiers/classifier.py",
"tests/wrappers/test_output_high_confidence.py",
"tests/defences/test_pixel_defend.py"
] | [
"# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements abstract base classes defining to properties for all classifiers.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport abc\nimport sys\n\nimport numpy as np\n\nfrom art.utils import check_and_transform_label_format\n\n# Ensure compatibility with Python 2 and 3 when using ABCMeta\nif sys.version_info >= (3, 4):\n ABC = abc.ABC\nelse:\n ABC = abc.ABCMeta(str('ABC'), (), {})\n\n\nclass Classifier(ABC):\n \"\"\"\n Base class defining the minimum classifier functionality and is required for all classifiers. A classifier of this\n type can be combined with black-box attacks.\n \"\"\"\n\n def __init__(self, clip_values=None, defences=None, preprocessing=None, **kwargs):\n \"\"\"\n Initialize a `Classifier` object.\n\n :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and\n maximum values allowed for features. If floats are provided, these will be used as the range of all\n features. If arrays are provided, each value will be considered the bound for a feature, thus\n the shape of clip values needs to match the total number of features.\n :type clip_values: `tuple`\n :param defences: Defence(s) to be activated with the classifier.\n :type defences: :class:`.Preprocessor` or `list(Preprocessor)` instances\n :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be\n used for data preprocessing. The first value will be subtracted from the input. The input will then\n be divided by the second one.\n :type preprocessing: `tuple`\n \"\"\"\n from art.defences.preprocessor import Preprocessor\n\n self._clip_values = clip_values\n if clip_values is not None:\n if len(clip_values) != 2:\n raise ValueError('`clip_values` should be a tuple of 2 floats or arrays containing the allowed'\n 'data range.')\n if np.array(clip_values[0] >= clip_values[1]).any():\n raise ValueError('Invalid `clip_values`: min >= max.')\n\n if isinstance(defences, Preprocessor):\n self.defences = [defences]\n else:\n self.defences = defences\n\n if preprocessing is not None and len(preprocessing) != 2:\n raise ValueError('`preprocessing` should be a tuple of 2 floats with the values to subtract and divide'\n 'the model inputs.')\n self.preprocessing = preprocessing\n\n super().__init__(**kwargs)\n\n @abc.abstractmethod\n def predict(self, x, **kwargs): # lgtm [py/inheritance/incorrect-overridden-signature]\n \"\"\"\n Perform prediction of the classifier for input `x`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :type x: `np.ndarray`\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fit(self, x, y, **kwargs): # lgtm [py/inheritance/incorrect-overridden-signature]\n \"\"\"\n Fit the classifier using the training data `(x, y)`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :type x: `np.ndarray`\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,).\n :type y: `np.ndarray`\n :param kwargs: Dictionary of framework-specific arguments.\n :type kwargs: `dict`\n :return: `None`\n \"\"\"\n raise NotImplementedError\n\n @property\n def clip_values(self):\n \"\"\"\n :return: Tuple of form `(min, max)` containing the minimum and maximum values allowed for the input features.\n :rtype: `tuple`\n \"\"\"\n return self._clip_values\n\n @property\n def input_shape(self):\n \"\"\"\n Return the shape of one input.\n\n :return: Shape of one input for the classifier.\n :rtype: `tuple`\n \"\"\"\n return self._input_shape\n\n @abc.abstractmethod\n def nb_classes(self):\n \"\"\"\n Return the number of output classes.\n\n :return: Number of classes in the data.\n :rtype: `int`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def save(self, filename, path=None):\n \"\"\"\n Save a model to file specific to the backend framework.\n\n :param filename: Name of the file where to save the model.\n :type filename: `str`\n :param path: Path of the directory where to save the model. If no path is specified, the model will be stored in\n the default data location of ART at `ART_DATA_PATH`.\n :type path: `str`\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def _apply_preprocessing(self, x, y, fit):\n \"\"\"\n Apply all defences and preprocessing operations on the inputs `(x, y)`. This function has to be applied to all\n raw inputs (x, y) provided to the classifier.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param y: Target values (class labels), where first dimension is the number of samples.\n :type y: `np.ndarray` or `None`\n :param fit: `True` if the defences are applied during training.\n :return: Value of the data after applying the defences.\n :rtype: `np.ndarray`\n \"\"\"\n y = check_and_transform_label_format(y, self.nb_classes())\n x_preprocessed, y_preprocessed = self._apply_preprocessing_defences(x, y, fit=fit)\n x_preprocessed = self._apply_preprocessing_standardisation(x_preprocessed)\n return x_preprocessed, y_preprocessed\n\n def _apply_preprocessing_defences(self, x, y, fit=False):\n \"\"\"\n Apply all defences of the classifier on the raw inputs `(x, y)`. This function is intended to only be called\n from function `_apply_defences_and_preprocessing`.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param y: Target values (class labels), where first dimension is the number of samples.\n :type y: `np.ndarray`\n :param fit: `True` if the function is call before fit/training and `False` if the function is called before a\n predict operation\n :return: Arrays for `x` and `y` after applying the defences.\n :rtype: `np.ndarray`\n \"\"\"\n if self.defences is not None:\n for defence in self.defences:\n if fit:\n if defence.apply_fit:\n x, y = defence(x, y)\n else:\n if defence.apply_predict:\n x, y = defence(x, y)\n\n return x, y\n\n def _apply_preprocessing_standardisation(self, x):\n \"\"\"\n Apply standardisation to input data `x`.\n\n :param x: Input data, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :return: Array for `x` with the standardized data.\n :rtype: `np.ndarray`\n :raises: `TypeError`\n \"\"\"\n if x.dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:\n raise TypeError('The data type of input data `x` is {} and cannot represent negative values. Consider '\n 'changing the data type of the input data `x` to a type that supports negative values e.g. '\n 'np.float32.'.format(x.dtype))\n\n if self.preprocessing is not None:\n sub, div = self.preprocessing\n sub = np.asarray(sub, dtype=x.dtype)\n div = np.asarray(div, dtype=x.dtype)\n\n res = x - sub\n res = res / div\n\n else:\n res = x\n\n return res\n\n def __repr__(self):\n class_name = self.__class__.__name__\n attributes = {(k[1:], v) if k[0] == '_' else (k, v) for (k, v) in self.__dict__.items()}\n attributes = ['{}={}'.format(k, v) for (k, v) in attributes]\n repr_string = class_name + '(' + ', '.join(attributes) + ')'\n return repr_string\n\n\nclass ClassifierNeuralNetwork(ABC):\n \"\"\"\n Base class defining additional classifier functionality required for neural network classifiers. This base class\n has to be mixed in with class `Classifier` to extend the minimum classifier functionality.\n \"\"\"\n\n def __init__(self, channel_index=None, **kwargs):\n \"\"\"\n Initialize a `ClassifierNeuralNetwork` object.\n\n :param channel_index: Index of the axis in input (feature) array `x` representing the color channels.\n :type channel_index: `int`\n \"\"\"\n self._channel_index = channel_index\n super().__init__(**kwargs)\n\n @abc.abstractmethod\n def predict(self, x, batch_size=128, **kwargs):\n \"\"\"\n Perform prediction of the classifier for input `x`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :param batch_size: The batch size used for evaluating the classifer's `model`.\n :type batch_size: `int`\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs):\n \"\"\"\n Fit the classifier on the training set `(x, y)`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,).\n :type y: `np.ndarray`\n :param batch_size: The batch size used for evaluating the classifer's `model`.\n :type batch_size: `int`\n :param nb_epochs: Number of epochs to use for training.\n :type nb_epochs: `int`\n :param kwargs: Dictionary of framework-specific arguments.\n :type kwargs: `dict`\n :return: `None`\n \"\"\"\n raise NotImplementedError\n\n def fit_generator(self, generator, nb_epochs=20, **kwargs):\n \"\"\"\n Fit the classifier using `generator` yielding training batches as specified. Framework implementations can\n provide framework-specific versions of this function to speed-up computation.\n\n :param generator: Batch generator providing `(x, y)` for each epoch.\n :type generator: :class:`.DataGenerator`\n :param nb_epochs: Number of epochs to use for training.\n :type nb_epochs: `int`\n :param kwargs: Dictionary of framework-specific arguments.\n :type kwargs: `dict`\n :return: `None`\n \"\"\"\n from art.data_generators import DataGenerator\n\n if not isinstance(generator, DataGenerator):\n raise ValueError('Expected instance of `DataGenerator` for `fit_generator`, got %s instead.'\n % str(type(generator)))\n\n for _ in range(nb_epochs):\n x, y = generator.get_batch()\n\n # Apply preprocessing and defences\n x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)\n\n # Fit for current batch\n self.fit(x_preprocessed, y_preprocessed, nb_epochs=1, batch_size=len(x), **kwargs)\n\n @property\n def channel_index(self):\n \"\"\"\n :return: Index of the axis in input data containing the color channels.\n :rtype: `int`\n \"\"\"\n return self._channel_index\n\n @property\n def learning_phase(self):\n \"\"\"\n Return the learning phase set by the user for the current classifier. Possible values are `True` for training,\n `False` for prediction and `None` if it has not been set through the library. In the latter case, the library\n does not do any explicit learning phase manipulation and the current value of the backend framework is used.\n If a value has been set by the user for this property, it will impact all following computations for\n model fitting, prediction and gradients.\n\n :return: Value of the learning phase.\n :rtype: `bool` or `None`\n \"\"\"\n return self._learning_phase if hasattr(self, '_learning_phase') else None\n\n @property\n def layer_names(self):\n \"\"\"\n Return the hidden layers in the model, if applicable.\n\n :return: The hidden layers in the model, input and output layers excluded.\n :rtype: `list`\n\n .. warning:: `layer_names` tries to infer the internal structure of the model.\n This feature comes with no guarantees on the correctness of the result.\n The intended order of the layers tries to match their order in the model, but this is not\n guaranteed either.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_activations(self, x, layer, batch_size):\n \"\"\"\n Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and\n `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by\n calling `layer_names`.\n\n :param x: Input for computing the activations.\n :type x: `np.ndarray`\n :param layer: Layer for computing the activations\n :type layer: `int` or `str`\n :param batch_size: Size of batches.\n :type batch_size: `int`\n :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def set_learning_phase(self, train):\n \"\"\"\n Set the learning phase for the backend framework.\n\n :param train: `True` if the learning phase is training, `False` if learning phase is not training.\n :type train: `bool`\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n name = self.__class__.__name__\n\n attributes = {(k[1:], v) if k[0] == '_' else (k, v) for (k, v) in self.__dict__.items()}\n attrs = ['{}={}'.format(k, v) for (k, v) in attributes]\n repr_ = name + '(' + ', '.join(attrs) + ')'\n\n return repr_\n\n\nclass ClassifierGradients(ABC):\n \"\"\"\n Base class defining additional classifier functionality for classifiers providing access to loss and class\n gradients. A classifier of this type can be combined with white-box attacks. This base class has to be mixed in with\n class `Classifier` and optionally class `ClassifierNeuralNetwork` to extend the minimum classifier functionality.\n \"\"\"\n\n @abc.abstractmethod\n def class_gradient(self, x, label=None, **kwargs):\n \"\"\"\n Compute per-class derivatives w.r.t. `x`.\n\n :param x: Input with shape as expected by the classifier's model.\n :type x: `np.ndarray`\n :param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class\n output is computed for all samples. If multiple values as provided, the first dimension should\n match the batch size of `x`, and each value will be used as target for its corresponding sample in\n `x`. If `None`, then gradients for all classes will be computed for each sample.\n :type label: `int` or `list`\n :return: Array of gradients of input features w.r.t. each class in the form\n `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes\n `(batch_size, 1, input_shape)` when `label` parameter is specified.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def loss_gradient(self, x, y, **kwargs):\n \"\"\"\n Compute the gradient of the loss function w.r.t. `x`.\n\n :param x: Input with shape as expected by the classifier's model.\n :type x: `np.ndarray`\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,).\n :type y: `np.ndarray`\n :return: Array of gradients of the same shape as `x`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n def _apply_preprocessing_gradient(self, x, gradients):\n \"\"\"\n Apply the backward pass through all preprocessing operations to the gradients.\n\n Apply the backward pass through all preprocessing operations and defences on the inputs `(x, y)`. This function\n has to be applied to all gradients returned by the classifier.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param gradients: Input gradients.\n :type gradients: `np.ndarray`\n :return: Gradients after backward step through preprocessing operations and defences.\n :rtype: `np.ndarray`\n \"\"\"\n gradients = self._apply_preprocessing_normalization_gradient(gradients)\n gradients = self._apply_preprocessing_defences_gradient(x, gradients)\n return gradients\n\n def _apply_preprocessing_defences_gradient(self, x, gradients, fit=False):\n \"\"\"\n Apply the backward pass through the preprocessing defences.\n\n Apply the backward pass through all defences of the classifier on the gradients. This function is intended to\n only be called from function `_apply_preprocessing_gradient`.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param gradients: Input gradient.\n :type gradients: `np.ndarray`\n :param fit: `True` if the gradient is computed during training.\n :return: Gradients after backward step through defences.\n :rtype: `np.ndarray`\n \"\"\"\n if self.defences is not None:\n for defence in self.defences[::-1]:\n if fit:\n if defence.apply_fit:\n gradients = defence.estimate_gradient(x, gradients)\n else:\n if defence.apply_predict:\n gradients = defence.estimate_gradient(x, gradients)\n\n return gradients\n\n def _apply_preprocessing_normalization_gradient(self, gradients):\n \"\"\"\n Apply the backward pass through standardisation of `x` to `gradients`.\n\n :param gradients: Input gradients.\n :type gradients: `np.ndarray`\n :return: Gradients after backward step through standardisation.\n :rtype: `np.ndarray\n \"\"\"\n _, div = self.preprocessing\n div = np.asarray(div, dtype=gradients.dtype)\n res = gradients / div\n return res\n\n\nclass ClassifierDecisionTree(ABC):\n \"\"\"\n Base class defining additional classifier functionality for decision-tree-based classifiers This base class has to\n be mixed in with class `Classifier` to extend the minimum classifier functionality.\n \"\"\"\n\n @abc.abstractmethod\n def get_trees(self):\n \"\"\"\n Get the decision trees.\n\n :return: A list of decision trees.\n :rtype: `[Tree]`\n \"\"\"\n raise NotImplementedError\n",
"# MIT License\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport numpy as np\n\nfrom art.utils import load_dataset, master_seed\nfrom tests.utils_test import get_classifier_kr_tf, get_classifier_kr_tf_binary\nfrom art.wrappers.output_high_confidence import OutputHighConfidence\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestOutputHighConfidence(unittest.TestCase):\n \"\"\"\n A unittest class for testing the High Confidence Output wrapper.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist')\n cls.mnist = (x_train, y_train), (x_test, y_test)\n\n def setUp(self):\n master_seed(1234)\n\n def test_decimals_0_1(self):\n \"\"\"\n Test with cutoff of 0.1.\n \"\"\"\n (_, _), (x_test, _) = self.mnist\n classifier = get_classifier_kr_tf()\n wrapped_classifier = OutputHighConfidence(classifier=classifier, cutoff=0.1)\n\n classifier_prediction_expected = np.asarray([[0.12109935, 0.0498215, 0.0993958, 0.06410096, 0.11366928,\n 0.04645343, 0.06419807, 0.30685693, 0.07616714, 0.05823757]],\n dtype=np.float32)\n wrapped_classifier_prediction_expected = np.asarray([[0.12109935, 0.0, 0.0, 0.0, 0.11366928, 0.0, 0.0,\n 0.30685693, 0.0, 0.0]], dtype=np.float32)\n\n np.testing.assert_array_almost_equal(classifier.predict(x_test[0:1]), classifier_prediction_expected, decimal=4)\n np.testing.assert_array_almost_equal(wrapped_classifier.predict(x_test[0:1]),\n wrapped_classifier_prediction_expected, decimal=4)\n\n def test_decimals_0_2(self):\n \"\"\"\n Test with cutoff of 0.2.\n \"\"\"\n (_, _), (x_test, _) = self.mnist\n classifier = get_classifier_kr_tf()\n wrapped_classifier = OutputHighConfidence(classifier=classifier, cutoff=0.2)\n\n classifier_prediction_expected = np.asarray([[0.12109935, 0.0498215, 0.0993958, 0.06410096, 0.11366928,\n 0.04645343, 0.06419807, 0.30685693, 0.07616714, 0.05823757]],\n dtype=np.float32)\n wrapped_classifier_prediction_expected = np.asarray([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.30685693, 0.0, 0.0]],\n dtype=np.float32)\n\n np.testing.assert_array_almost_equal(classifier.predict(x_test[0:1]), classifier_prediction_expected, decimal=4)\n np.testing.assert_array_almost_equal(wrapped_classifier.predict(x_test[0:1]),\n wrapped_classifier_prediction_expected, decimal=4)\n\n def test_binary_decimals_0_5(self):\n \"\"\"\n Test with cutoff of 0.5 for binary classifier.\n \"\"\"\n (_, _), (x_test, _) = self.mnist\n classifier = get_classifier_kr_tf_binary()\n wrapped_classifier = OutputHighConfidence(classifier=classifier, cutoff=0.5)\n\n classifier_prediction_expected = np.asarray([[0.5301345]], dtype=np.float32)\n wrapped_classifier_prediction_expected = np.asarray([[0.5301345]], dtype=np.float32)\n\n np.testing.assert_array_almost_equal(classifier.predict(x_test[0:1]), classifier_prediction_expected, decimal=4)\n np.testing.assert_array_almost_equal(wrapped_classifier.predict(x_test[0:1]),\n wrapped_classifier_prediction_expected, decimal=4)\n\n def test_binary_decimals_0_6(self):\n \"\"\"\n Test with cutoff of 0.6 for binary classifier.\n \"\"\"\n (_, _), (x_test, _) = self.mnist\n classifier = get_classifier_kr_tf_binary()\n wrapped_classifier = OutputHighConfidence(classifier=classifier, cutoff=0.6)\n\n classifier_prediction_expected = np.asarray([[0.5301345]], dtype=np.float32)\n wrapped_classifier_prediction_expected = np.asarray([[0.0]], dtype=np.float32)\n\n np.testing.assert_array_almost_equal(classifier.predict(x_test[0:1]), classifier_prediction_expected, decimal=4)\n np.testing.assert_array_almost_equal(wrapped_classifier.predict(x_test[0:1]),\n wrapped_classifier_prediction_expected, decimal=4)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom art.classifiers import PyTorchClassifier\nfrom art.defences import PixelDefend\nfrom art.utils import load_mnist, master_seed\n\nlogger = logging.getLogger(__name__)\n\n\nclass ModelImage(nn.Module):\n def __init__(self):\n super(ModelImage, self).__init__()\n self.fc = nn.Linear(25, 6400)\n\n def forward(self, x):\n x = x.view(-1, 25)\n logit_output = self.fc(x)\n logit_output = logit_output.view(-1, 5, 5, 1, 256)\n\n return logit_output\n\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.fc = nn.Linear(4, 1024)\n\n def forward(self, x):\n x = x.view(-1, 4)\n logit_output = self.fc(x)\n logit_output = logit_output.view(-1, 4, 256)\n\n return logit_output\n\n\nclass TestPixelDefend(unittest.TestCase):\n def setUp(self):\n # Set master seed\n master_seed(1234)\n\n def test_one_channel(self):\n (x_train, _), (_, _), _, _ = load_mnist()\n x_train = x_train[:2, 10:15, 15:20, :]\n x_train = x_train.astype(np.float32)\n x_train_original = x_train.copy()\n\n # Define the network\n model = ModelImage()\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.01)\n self.pixelcnn = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28),\n nb_classes=10, clip_values=(0, 1))\n preprocess = PixelDefend(eps=5, pixel_cnn=self.pixelcnn)\n x_defended, _ = preprocess(x_train)\n\n self.assertEqual(x_defended.shape, x_train.shape)\n self.assertTrue((x_defended <= 1.0).all())\n self.assertTrue((x_defended >= 0.0).all())\n\n # Check that x_train has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_train_original - x_train))), 0.0, delta=0.00001)\n\n def test_feature_vectors(self):\n # Define the network\n model = Model()\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.01)\n pixel_cnn = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(4,),\n nb_classes=2, clip_values=(0, 1))\n\n x = np.random.rand(5, 4).astype(np.float32)\n preprocess = PixelDefend(eps=5, pixel_cnn=pixel_cnn)\n x_defended, _ = preprocess(x)\n\n self.assertEqual(x_defended.shape, x.shape)\n self.assertTrue((x_defended <= 1.0).all())\n self.assertTrue((x_defended >= 0.0).all())\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.asarray"
],
[
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"numpy.random.rand",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kashif/spinningup-pytorch | [
"8f3389c239c94b3ff46453f359061ae30d851ce8"
] | [
"fireup/algos/ddpg/ddpg.py"
] | [
"import time\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom fireup.algos.ddpg import core\nfrom fireup.utils.logx import EpochLogger\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for DDPG agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(\n obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs],\n )\n\n\n\"\"\"\n\nDeep Deterministic Policy Gradient (DDPG)\n\n\"\"\"\n\n\ndef ddpg(\n env_fn,\n actor_critic=core.ActorCritic,\n ac_kwargs=dict(),\n seed=0,\n steps_per_epoch=5000,\n epochs=100,\n replay_size=int(1e6),\n gamma=0.99,\n polyak=0.995,\n pi_lr=1e-3,\n q_lr=1e-3,\n batch_size=100,\n start_steps=10000,\n act_noise=0.1,\n max_ep_len=1000,\n logger_kwargs=dict(),\n save_freq=1,\n):\n \"\"\"\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_critic: The agent's main model which takes some states ``x`` and\n and actions ``a`` and returns a tuple of:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``pi`` (batch, act_dim) | Deterministically computes actions\n | from policy given states.\n ``q`` (batch,) | Gives the current estimate of Q* for\n | states ``x`` and actions in\n | ``a``.\n ``q_pi`` (batch,) | Gives the composition of ``q`` and\n | ``pi`` for states in ``x``:\n | q(x, pi(x)).\n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the actor_critic\n class you provided to DDPG.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs)\n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target\n networks. Target networks are updated towards main networks\n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow\n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually\n close to 1.)\n\n pi_lr (float): Learning rate for policy.\n\n q_lr (float): Learning rate for Q-networks.\n\n batch_size (int): Minibatch size for SGD.\n\n start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n act_noise (float): Stddev for Gaussian exploration noise added to\n policy at training time. (At test time, no noise is added.)\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n \"\"\"\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n env, test_env = env_fn(), env_fn()\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs[\"action_space\"] = env.action_space\n\n # Main outputs from computation graph\n main = actor_critic(in_features=obs_dim, **ac_kwargs)\n\n # Target networks\n target = actor_critic(in_features=obs_dim, **ac_kwargs)\n target.eval()\n\n # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Count variables\n var_counts = tuple(\n core.count_vars(module) for module in [main.policy, main.q, main]\n )\n print(\"\\nNumber of parameters: \\t pi: %d, \\t q: %d, \\t total: %d\\n\" % var_counts)\n\n # Separate train ops for pi, q\n pi_optimizer = torch.optim.Adam(main.policy.parameters(), lr=pi_lr)\n q_optimizer = torch.optim.Adam(main.q.parameters(), lr=q_lr)\n\n # Initializing targets to match main variables\n target.load_state_dict(main.state_dict())\n\n def get_action(o, noise_scale):\n pi = main.policy(torch.Tensor(o.reshape(1, -1)))\n a = pi.detach().numpy()[0] + noise_scale * np.random.randn(act_dim)\n return np.clip(a, -act_limit, act_limit)\n\n def test_agent(n=10):\n for _ in range(n):\n o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0\n while not (d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time (noise_scale=0)\n o, r, d, _ = test_env.step(get_action(o, 0))\n ep_ret += r\n ep_len += 1\n logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)\n\n start_time = time.time()\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n total_steps = steps_per_epoch * epochs\n\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n main.eval()\n \"\"\"\n Until start_steps have elapsed, randomly sample actions\n from a uniform distribution for better exploration. Afterwards,\n use the learned policy (with some noise, via act_noise).\n \"\"\"\n if t > start_steps:\n a = get_action(o, act_noise)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len == max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update\n # most recent observation!\n o = o2\n\n if d or (ep_len == max_ep_len):\n main.train()\n \"\"\"\n Perform all DDPG updates at the end of the trajectory,\n in accordance with tuning done by TD3 paper authors.\n \"\"\"\n for _ in range(ep_len):\n batch = replay_buffer.sample_batch(batch_size)\n (obs1, obs2, acts, rews, done) = (\n torch.Tensor(batch[\"obs1\"]),\n torch.Tensor(batch[\"obs2\"]),\n torch.Tensor(batch[\"acts\"]),\n torch.Tensor(batch[\"rews\"]),\n torch.Tensor(batch[\"done\"]),\n )\n _, _, q_pi_targ = target(obs2, acts)\n\n # Bellman backup for Q function\n backup = (rews + gamma * (1 - done) * q_pi_targ).detach()\n\n # DDPG Q loss\n _, q, _ = main(obs1, acts)\n q_loss = F.mse_loss(q, backup)\n\n # Q-learning update\n q_optimizer.zero_grad()\n q_loss.backward()\n q_optimizer.step()\n logger.store(LossQ=q_loss.item(), QVals=q.data.numpy())\n\n # DDPG Policy loss\n _, _, q_pi = main(obs1, acts)\n pi_loss = -q_pi.mean()\n\n # Policy update\n pi_optimizer.zero_grad()\n pi_loss.backward()\n pi_optimizer.step()\n logger.store(LossPi=pi_loss.item())\n\n # Polyak averaging for target parameters\n for p_main, p_target in zip(main.parameters(), target.parameters()):\n p_target.data.copy_(\n polyak * p_target.data + (1 - polyak) * p_main.data\n )\n\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n # End of epoch wrap-up\n if t > 0 and t % steps_per_epoch == 0:\n epoch = t // steps_per_epoch\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs - 1):\n logger.save_state({\"env\": env}, main, None)\n\n # Test the performance of the deterministic version of the agent.\n test_agent()\n\n # Log info about epoch\n logger.log_tabular(\"Epoch\", epoch)\n logger.log_tabular(\"EpRet\", with_min_and_max=True)\n logger.log_tabular(\"TestEpRet\", with_min_and_max=True)\n logger.log_tabular(\"EpLen\", average_only=True)\n logger.log_tabular(\"TestEpLen\", average_only=True)\n logger.log_tabular(\"TotalEnvInteracts\", t)\n logger.log_tabular(\"QVals\", with_min_and_max=True)\n logger.log_tabular(\"LossPi\", average_only=True)\n logger.log_tabular(\"LossQ\", average_only=True)\n logger.log_tabular(\"Time\", time.time() - start_time)\n logger.dump_tabular()\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--env\", type=str, default=\"HalfCheetah-v2\")\n parser.add_argument(\"--hid\", type=int, default=300)\n parser.add_argument(\"--l\", type=int, default=1)\n parser.add_argument(\"--gamma\", type=float, default=0.99)\n parser.add_argument(\"--seed\", \"-s\", type=int, default=0)\n parser.add_argument(\"--epochs\", type=int, default=50)\n parser.add_argument(\"--exp_name\", type=str, default=\"ddpg\")\n args = parser.parse_args()\n\n from fireup.utils.run_utils import setup_logger_kwargs\n\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n ddpg(\n lambda: gym.make(args.env),\n actor_critic=core.ActorCritic,\n ac_kwargs=dict(hidden_sizes=[args.hid] * args.l),\n gamma=args.gamma,\n seed=args.seed,\n epochs=args.epochs,\n logger_kwargs=logger_kwargs,\n )\n"
] | [
[
"numpy.random.seed",
"numpy.clip",
"torch.Tensor",
"torch.manual_seed",
"torch.nn.functional.mse_loss",
"numpy.random.randn",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marinarierav-uab/foveabox | [
"1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5"
] | [
"tools/challenge_validation.py"
] | [
"import os\n\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom scipy.ndimage.measurements import label\n\n\ndef calculate_average_classif_results(results_dict: dict, thresholds, output_file):\n avg = pd.DataFrame(columns=[\"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", \"Accuracy\", \"Conf\"])\n for threshold in thresholds:\n # TP, FP, FN, TN, RT\n results = [0, 0, 0, 0]\n mean_conf = 0\n for vid, res_dict in results_dict.items(): # for each video\n results = [res + new for res, new in zip(results, res_dict[threshold][:4])]\n\n mean_conf += res_dict[threshold][-1]\n\n # switched values fn <-> tn, as requested by J.B.\n tp, fp, tn, fn = results[0], results[1], results[2], results[3]\n\n try:\n acc = (tp + tn) / (tp + fp + fn + tn)\n except:\n acc = -1\n try:\n mean_conf /= len(results_dict.items())\n except:\n mean_conf = 0\n\n # switched values fn <-> tn, as requested by J.B.\n row = [threshold, tp, fp, tn, fn, acc, mean_conf]\n avg.loc[-1] = row\n\n avg.index += 1\n avg.sort_index()\n avg.reset_index(inplace=True, drop=True)\n\n print(avg)\n\n avg.to_csv(output_file)\n\n\ndef calculate_average_results(results_dict: dict, thresholds, output_file):\n avg = pd.DataFrame(columns=[\"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", 'Accuracy', \"Precision\", \"Recall\", \"Specificity\", \"F1\", \"F2\", \"Mean RT\"])\n for threshold in thresholds:\n # TP, FP, FN, TN, RT\n results = [0, 0, 0, 0]\n sums = [0, 0, 0, 0]\n srt = 0\n drt = 1e-7\n for vid, res_dict in results_dict.items(): # for each video\n results = [res + new for res, new in zip(results, res_dict[threshold][:-1])]\n #sums = [val + new for val, new in zip(sums, results)]\n #print(res_dict[threshold][:-1])\n #print(sum)\n #print(results)\n srt = srt + res_dict[threshold][-1] if res_dict[threshold][-1] != -1 else srt\n drt = drt + 1 if res_dict[threshold][-1] != -1 else drt\n\n # switched values fn <-> tn, as requested by J.B.\n tp, fp, tn, fn = results[0], results[1], results[2], results[3]\n\n try:\n acc = (tp + tn) / (tp + fp + fn + tn)\n except:\n acc = -1\n try:\n pre = tp / (tp + fp)\n except:\n pre = -1\n try:\n rec = tp / (tp + fn)\n except:\n rec = -1\n try:\n spec = tn / (fp + tn)\n except:\n spec = -1\n try:\n mean_rt = srt / drt\n except:\n mean_rt = -1\n\n f1 = (2*pre*rec) / (pre+rec)\n f2 = (5*pre*rec) / ( (4*pre) + rec)\n\n # switched values fn <-> tn, as requested by J.B.\n row = [threshold, tp, fp, tn, fn, acc, pre, rec, spec, f1, f2, mean_rt]\n avg.loc[-1] = row\n\n avg.index += 1\n avg.sort_index()\n avg.reset_index(inplace=True, drop=True)\n\n print(avg)\n\n avg.to_csv(output_file)\n\n\ndef save_detection_plot(output_folder, threshold, vid_folder, video_gt, video_pred):\n title = \"Video: {} - threshold: {}\".format(vid_folder.split(\"/\")[-1], threshold)\n plt.title(title)\n plt.plot(video_gt, color='blue')\n plt.plot(video_pred, color='gold')\n plt.savefig(os.path.join(output_folder, \"detect_plot-{}-{}.png\".format(vid_folder.split(\"/\")[-1], threshold)))\n plt.clf()\n\n\ndef process_video_for_detection(file, has_confidence, thresh, vid_folder):\n video_len = len(os.listdir(vid_folder)) + 1\n video_gt = np.zeros((video_len, 1))\n video_pred = np.zeros((video_len, 1))\n\n first_polyp = -1\n first_detected_polyp = -1\n\n tp, fp, fn, tn = 0, 0, 0, 0\n for frame in sorted(os.listdir(vid_folder)):\n\n polyp_n = int(frame.split(\"_\")[0].split(\"-\")[1])\n im_frame = Image.open(os.path.join(vid_folder, frame))\n is_polyp = np.asarray(im_frame).sum() > 0\n video_gt[polyp_n] = 1.1 if is_polyp else 0\n\n if is_polyp and first_polyp == -1:\n first_polyp = polyp_n\n\n frame_output = file.loc[file[0] == polyp_n]\n if has_confidence:\n frame_output = frame_output.loc[frame_output[2] >= thresh]\n\n if frame_output.empty:\n if is_polyp:\n fn += 1\n else:\n tn += 1\n else:\n pred_out = frame_output[1].tolist()[0]\n if pred_out:\n if is_polyp:\n tp += 1\n if first_detected_polyp == -1:\n first_detected_polyp = polyp_n\n else:\n fp += 1\n else:\n if is_polyp:\n fn += 1\n else:\n tn += 1\n\n video_pred[polyp_n] = 0.9\n\n rt = first_detected_polyp - first_polyp if first_detected_polyp != -1 else -1\n\n # switched values fn <-> tn, as requested by J.B.\n return [tp, fp, tn, fn, rt], video_gt, video_pred\n\n\ndef process_video_for_localization(file, has_confidence, threshold, vid_folder):\n tp, fp, tn, fn = 0, 0, 0, 0\n histo_tp, histo_fp, histo_tn, histo_fn = 0, 0, 0, 0\n\n # HISTOLOGIAS DE VIDEOS DE TEST (eventually should be loaded from file)\n no_adenomas = [2, 16]\n\n first_polyp = -1\n first_detected_polyp = -1\n i = 0\n\n vid_n = int(vid_folder.split('/')[-1])\n histologia_real = 0 if (vid_n in no_adenomas) else 1\n\n for frame in sorted(os.listdir(vid_folder)):\n i+=1\n\n #print(\"frame\", i)\n polyp_n = int(frame.split(\"_\")[0].split(\"-\")[1])\n im_frame = Image.open(os.path.join(vid_folder, frame))\n im_frame_np = np.asarray(im_frame, dtype=int)\n is_polyp = im_frame_np.sum() > 0\n\n # 8-connected\n kernel = np.array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n labeled_frame, max_polyp = label(im_frame, structure=kernel)\n\n if is_polyp and first_polyp == -1:\n first_polyp = polyp_n\n frame_output = file.loc[file[0] == polyp_n]\n if has_confidence:\n frame_output = frame_output.loc[frame_output[3] >= threshold]\n\n #if i>35:\n # break\n #print(frame)\n\n if frame_output.empty:\n if is_polyp:\n fn += max_polyp\n else:\n tn += 1\n else:\n already_detected = []\n\n for detection_row in frame_output.iterrows():\n detection = detection_row[1]\n frame_pred = True\n centroid_x = int(detection[1])\n centroid_y = int(detection[2])\n\n #print(\"Detection:\",centroid_x, centroid_y)\n #print(im_frame_np[centroid_y-5:centroid_y+5, centroid_x-5:centroid_x+5])\n\n if frame_pred:\n if is_polyp:\n if im_frame_np[centroid_y, centroid_x] != 0:\n if labeled_frame[centroid_y, centroid_x] not in already_detected:\n tp += 1\n already_detected += [labeled_frame[centroid_y, centroid_x]]\n\n if first_detected_polyp == -1:\n first_detected_polyp = polyp_n\n\n # HISTOLOGIAS:\n histologia_red = int(detection[4])\n\n if (histologia_red == 0) and (histologia_real == 0):\n histo_tn += 1\n elif (histologia_red == 0) and (histologia_real == 1):\n histo_fn += 1\n elif (histologia_red == 1) and (histologia_real == 0):\n histo_fp += 1\n elif (histologia_red == 1) and (histologia_real == 1):\n histo_tp += 1\n else:\n fp += 1\n else:\n fp += 1\n else:\n if not is_polyp:\n tn += 1\n\n detected_in_frame = len(set(already_detected))\n fn += (max_polyp - detected_in_frame)\n\n rt = first_detected_polyp - first_polyp if first_detected_polyp != -1 else -1\n\n positives = histo_fp + histo_tp\n negatives = histo_fn + histo_tn\n pred_histo = 1 if positives >= negatives else 0\n if(positives+negatives) == 0:\n conf = 0\n acc = 0\n else:\n conf = positives/(positives+negatives) if positives >= negatives else negatives/(positives+negatives)\n acc = (histo_tp + histo_tn) / (positives + negatives)\n\n # switched values fn <-> tn, as requested by J.B.\n return [tp, fp, tn, fn, rt], [histo_tp, histo_fp, histo_tn, histo_fn, acc, histologia_real, pred_histo, conf]\n\n\ndef generate_results_per_video(videos, confidences, thresholds, gt):\n detect_dict = {}\n local_dict = {}\n classif_dict = {}\n for threshold in thresholds:\n # TODO change plots\n res_detection, _, _ = process_video_for_detection(videos[0], confidences[0], threshold, gt)\n res_localization, res_classif = process_video_for_localization(videos[1], confidences[1], threshold, gt)\n print(\" -thr\",threshold, \"done...\")\n\n detect_dict[threshold] = res_detection\n local_dict[threshold] = res_localization\n classif_dict[threshold] = res_classif\n return detect_dict, local_dict, classif_dict\n\n\ndef do_giana_eval(folder_detection, folder_localization, folder_gt, root_folder_output, team, thr=0, series=False):\n\n # DEBUGGING !!!!!\n nvids = 18 # should be 18\n\n folder_output_detection = os.path.join(root_folder_output, \"Detection/\"+team)\n folder_output_localization = os.path.join(root_folder_output, \"Localization/\"+team)\n folder_output_classif = os.path.join(root_folder_output, \"Classif/\"+team)\n average_detection_output_file = os.path.join(folder_output_detection, \"average.csv\")\n average_localization_output_file = os.path.join(folder_output_localization, \"average.csv\")\n average_classif_output_file = os.path.join(folder_output_classif, \"average.csv\")\n\n if series:\n thresholds = [x / 10 for x in range(1, 10)]\n elif thr!=0:\n thresholds = [thr]\n else:\n thresholds = [0]\n\n if not os.path.exists(folder_output_detection):\n os.makedirs(folder_output_detection)\n if not os.path.exists(folder_output_localization):\n os.makedirs(folder_output_localization)\n if not os.path.exists(folder_output_classif):\n os.makedirs(folder_output_classif)\n\n files_detection = sorted(os.listdir(folder_detection))[0:nvids]\n files_localization = sorted(os.listdir(folder_localization))[0:nvids]\n\n results_detection = {}\n results_localization = {}\n results_classif = {}\n\n # for each video:\n for detection, localization in zip(files_detection, files_localization):\n\n detection_csv = os.path.join(folder_detection, detection)\n detection_df = pd.read_csv(detection_csv, header=None)\n detection_confidence = detection_df.shape[1] > 2\n\n localization_csv = os.path.join(folder_localization, localization)\n localization_df = pd.read_csv(localization_csv, header=None)\n localization_confidence = localization_df.shape[1] > 3\n\n # both named the same\n vid_name = localization_csv.split(\"/\")[-1].split(\".\")[0]\n gt_vid_folder = os.path.join(folder_gt, str(int(vid_name)))\n print('Processing video', vid_name, \"...\")\n res_detection, res_localization, res_classif = generate_results_per_video((detection_df, localization_df),\n (detection_confidence, localization_confidence),\n thresholds, gt_vid_folder)\n\n pd.DataFrame.from_dict(res_detection, columns=[\"TP\", \"FP\", \"TN\", \"FN\", \"RT\"], orient='index').to_csv(\n os.path.join(folder_output_detection, \"d{}.csv\".format(vid_name)))\n results_detection[vid_name] = res_detection\n\n pd.DataFrame.from_dict(res_localization, columns=[\"TP\", \"FP\", \"TN\", \"FN\", \"RT\"], orient='index').to_csv(\n os.path.join(folder_output_localization, \"l{}.csv\".format(vid_name)))\n results_localization[vid_name] = res_localization\n\n pd.DataFrame.from_dict(res_classif, columns=[\"TP\", \"FP\", \"TN\", \"FN\", \"Acc\", \"Histo-real\", \"Histo-pred\", \"Conf\"], orient='index').to_csv(\n os.path.join(folder_output_classif, \"l{}.csv\".format(vid_name)))\n results_classif[vid_name] = res_classif\n\n calculate_average_results(results_detection, thresholds, average_detection_output_file)\n calculate_average_results(results_localization, thresholds, average_localization_output_file)\n calculate_average_classif_results(results_classif, thresholds, average_classif_output_file)\n\n #nvids = len(results_detection)\n\n global_detection_list = np.zeros([nvids*len(thresholds), 7])\n global_localization_list = np.zeros([nvids*len(thresholds), 7])\n global_classif_list = np.zeros([nvids*len(thresholds), 10])\n\n i=0;\n j=0;\n k=0;\n for vidname in sorted(results_detection.keys()):\n\n vid = int(vidname)\n\n for key, vals in results_detection[vidname].items():\n\n global_detection_list[i, :] = ([vid] + [key] + vals)\n i += 1\n\n #print(np.around(global_detection_list, decimals=4))\n\n for key, vals in results_localization[vidname].items():\n global_localization_list[j, :] = ([vid] + [key] + vals)\n j += 1\n\n for key, vals in results_classif[vidname].items():\n global_classif_list[k, :] = ([vid] + [key] + vals)\n k += 1\n\n\n #print(\"\")\n\n columns = [\"Video\", \"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", \"RT\"]\n detframe = pd.DataFrame(global_detection_list, columns=columns)\n locframe = pd.DataFrame(global_localization_list, columns=columns)\n classifframe = pd.DataFrame(global_classif_list, columns=[\"Video\", \"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", \"Acc\", \"Histo-real\", \"Histo-pred\", \"Conf\"])\n\n print(\"\")\n\n detframe.to_csv(os.path.join(folder_output_detection, \"detection.csv\"))\n locframe.to_csv(os.path.join(folder_output_localization, \"localization.csv\"))\n classifframe.to_csv(os.path.join(folder_output_classif, \"classification.csv\"))\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n ap = ArgumentParser()\n ap.add_argument(\"--res\", \"--results_root\", type=str, default='results')\n ap.add_argument(\"--thr\", \"--threshold\", type=float, default=0)\n ap.add_argument(\"--team\", \"--team\", type=str, required=True)\n ap.add_argument(\"--out\", \"--output_folder\", type=str, default=None)\n ap.add_argument(\"--list\", action='store_true', help=\"threshold series\")\n\n params = ap.parse_args()\n team = params.team.split('.bbox.json')[0].split('json/')[-1]\n\n folder_detection = os.path.join(params.res, \"Detection\")\n folder_detection = os.path.join(folder_detection, team)\n folder_localization = os.path.join(params.res, \"Localization\")\n folder_localization = os.path.join(folder_localization, team)\n output_folder = params.out\n\n if output_folder is None:\n output_folder = os.path.join(params.res, \"results_giana\")\n folder_gt = \"/home/marina/Downloads/DATASETS/cvcvideoclinicdbtest/masks/\"\n\n do_giana_eval(folder_detection, folder_localization, folder_gt, output_folder, team, params.thr, params.list)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.asarray",
"pandas.DataFrame",
"scipy.ndimage.measurements.label",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"pandas.DataFrame.from_dict",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
dorlivne/PoPS | [
"088425d1a40a4c2e6856b07744281cd8ab9bce3b",
"088425d1a40a4c2e6856b07744281cd8ab9bce3b"
] | [
"Pacman/processor.py",
"utils/plot_utils.py"
] | [
"import numpy as np\nfrom PIL import Image\nfrom copy import deepcopy\nINPUT_SHAPE = (84, 84)\n\n\ndef init_state():\n # return np.zeros((84, 84, 4))\n return np.zeros((4, 84, 84))\n\ndef append_frame(state, frame):\n # new_state = deepcopy(state)\n # new_state[:, :, :-1] = state[:, :, 1:]\n # new_state[:, :, -1] = frame\n new_state = deepcopy(state)\n new_state[:-1, :, :, ] = state[1:, :, :]\n new_state[-1, :, :] = frame\n del state\n return new_state\n\n\ndef process_observation(observation):\n assert observation.ndim == 3\n img = Image.fromarray(observation)\n img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale\n processed_observation = np.array(img)\n assert processed_observation.shape == INPUT_SHAPE\n return processed_observation.astype('float32') / 255. # saves storage in experience memory\n\n\ndef process_state_batch(batch):\n return np.asarray(batch).astype('float32') / 255.\n\n\ndef clip_rewards(reward):\n return np.clip(reward, -1, 1)\n",
"from matplotlib import pyplot as plt\nimport math\n\ndef plot_nnz_vs_accuracy(data_policy, data_pruned, legend=('policy_dist', 'PDX2'),\n title='NNZ_vs_Accuracy', xlabel='NNZ0', ylabel='accuracy'):\n fig = plt.figure()\n x_policy = data_policy[0][:]\n x_pruned = data_pruned[0][:]\n acc_policy = data_policy[1][:]\n acc_pruned = data_pruned[1][:]\n plt.plot(x_policy, acc_policy, marker='o', color='b')\n plt.plot(x_pruned, acc_pruned, marker='^', color='g')\n plt.legend(legend)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.grid()\n plt.show()\n fig.savefig('results.png')\n\ndef plot_nnz_vs_accuracy_latest(data_policy, data_pruned, data_PDX2):\n fig = plt.figure()\n x = data_policy[0][:]\n acc_policy = data_policy[1][:]\n acc_pruned = data_pruned[1][:]\n acc_PDX2 = data_PDX2[1][:]\n plt.plot(x, acc_policy, marker='o', color='b')\n plt.plot(x, acc_pruned, marker='^', color='g')\n plt.plot(x, acc_PDX2, marker='*', color='r')\n plt.legend(('policy_dist', 'pruning', 'PDX2'))\n plt.xlabel('NNZ')\n plt.ylabel('accuracy')\n plt.title('NNZ_vs_Accuracy')\n plt.grid()\n plt.show()\n fig.savefig('results.png')\n\ndef plot_weights(agent, title: str, figure_num: int , range=5):\n weights_matrices = agent.sess.run(agent.weights_matrices)\n plot_histogram(weights_matrices, title, include_zeros=False, figure_num=figure_num, range=(-range, range))\n\n\ndef plot_histogram(weights_list: list,\n image_name: str,\n range: tuple,\n include_zeros=True,\n figure_num=1):\n\n \"\"\"A function to plot weights distribution\"\"\"\n\n weights = []\n for w in weights_list:\n weights.extend(list(w.ravel()))\n\n if not include_zeros:\n weights = [w for w in weights if w != 0]\n\n fig = plt.figure(num=figure_num, figsize=(10, 7))\n ax = fig.add_subplot(111)\n\n ax.hist(weights,\n bins=100,\n facecolor='green',\n edgecolor='black',\n alpha=0.7,\n range=range)\n\n ax.set_title('Weights distribution \\n ' + image_name)\n ax.set_xlabel('Weights values')\n ax.set_ylabel('Number of weights')\n\n fig.savefig(image_name + '.png')\n\n\ndef plot_graph(data, name: str, figure_num=1, file_name=None, xaxis='sparsity', yaxis='accuracy'):\n fig = plt.figure(figure_num)\n x = data[0]\n y = data[1]\n plt.plot(x[:], y[:], 'ro')\n plt.xlabel(xaxis)\n plt.ylabel(yaxis)\n plt.title(name)\n plt.grid()\n filename = name if file_name is None else file_name\n fig.savefig(filename + '.png')\n\ndef plot_conv_weights(model, title='weights', figure_num=1):\n weights = model.get_flat_weights()\n plot_histogram(weights_list=weights, image_name=title, include_zeros=False, range=(-1.0, 1.0), figure_num=figure_num)\n"
] | [
[
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.clip"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kim-com/tensorflow | [
"4301e3f34b8da528c58bdafe05cd66c8a55fce9e",
"4301e3f34b8da528c58bdafe05cd66c8a55fce9e"
] | [
"tensorflow/python/eager/function_cache.py",
"tensorflow/python/framework/type_spec.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Cache to manage concrete functions and their signatures.\"\"\"\n\nimport collections\nfrom typing import Optional, Sequence, Tuple\n\nfrom tensorflow.core.function import trace_type\nfrom tensorflow.core.function.polymorphism import type_dispatch\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import func_graph as func_graph_module\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.types import trace\nfrom tensorflow.python.util import memory\n\n# TODO(b/182990542): Enable and remove flag when stable.\nDELETE_WITH_WEAKREF = False\n\nExecutionContext = collections.namedtuple(\"ExecutionContext\", [\n \"parent_graph\",\n \"device_functions\",\n \"colocation_stack\",\n \"in_cross_replica_context\",\n \"variable_policy\",\n \"xla_context_id\",\n])\n\n\nclass FunctionCacheKey(trace.TraceType):\n \"\"\"The unique key associated with a concrete function.\n\n Attributes:\n function_signature: A TraceType corresponding to the function arguments.\n call_context: The ExecutionContext for when the function_signature was\n generated.\n \"\"\"\n\n def __init__(self, function_signature: trace.TraceType,\n call_context: ExecutionContext):\n self.function_signature = function_signature\n self.call_context = call_context\n\n def is_subtype_of(self, other: trace.TraceType) -> bool:\n if not isinstance(other, FunctionCacheKey):\n return False\n\n if self.call_context != other.call_context:\n return False\n\n return self.function_signature.is_subtype_of(other.function_signature)\n\n def most_specific_common_supertype(\n self, others: Sequence[trace.TraceType]) -> Optional[\"FunctionCacheKey\"]:\n if not all(\n isinstance(other, FunctionCacheKey) and\n self.call_context == other.call_context for other in others):\n return None\n\n common = self.function_signature.most_specific_common_supertype(\n [other.function_signature for other in others])\n\n if common is None:\n return None\n\n return FunctionCacheKey(common, self.call_context)\n\n def __hash__(self) -> int:\n return hash((self.call_context, self.function_signature))\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, trace.TraceType):\n return NotImplemented\n\n if not isinstance(other, FunctionCacheKey):\n return False\n\n return (self.call_context == other.call_context and\n self.function_signature == other.function_signature)\n\n def __repr__(self) -> str:\n return (\n f\"{type(self).__name__}(function_signature={repr(self.function_signature)},\"\n f\" call_context={repr(self.call_context)})\")\n\n\nclass FunctionCache:\n \"\"\"A container for managing concrete functions.\"\"\"\n\n __slots__ = [\n \"_missed\", \"_primary\", \"_dispatch_table\", \"arg_relaxed_specs\",\n \"arg_relaxed\", \"_garbage_collectors\"\n ]\n\n def __init__(self):\n # The set of functions that have been missed; entries are ExecutionContext.\n self._missed = set()\n # The primary cache, mapping FunctionCacheKey to a concrete function.\n self._primary = collections.OrderedDict()\n\n # Maps a FunctionCacheKey K to a FunctionCacheKey V such that it is safe\n # to dispatch K to the concrete function of V that exists in _primary.\n # Used to lookup posible concrete functions when K is not in _primary.\n self._dispatch_table = type_dispatch.TypeDispatchTable()\n\n # TODO(b/202430155): Incorporate relaxation logic inside FunctionCache.\n # A cache key lookup, mapping a cache key generated without shape info to a\n # flat list of `TypeSpec`s with relaxed shapes (one for each flattened\n # argument). Arguments that are not Tensors or `CompositeTensor`s contain a\n # `None` for the corresponding relaxed spec.\n self.arg_relaxed_specs = collections.OrderedDict()\n # The secondary cache, mapping a cache key generated without shape info to a\n # function.\n self.arg_relaxed = collections.OrderedDict()\n # All OrderedDicts require manual garbage collection.\n\n self._garbage_collectors = [\n _FunctionGarbageCollector(self._primary),\n _FunctionGarbageCollector(self.arg_relaxed),\n _FunctionGarbageCollector(self.arg_relaxed_specs)\n ]\n\n # Note: Instead of returning any viable function, we can return the most\n # specfic one by maintaining trees of traces where children are more specific\n # traces of their parents.\n def lookup(self, key: FunctionCacheKey, use_function_subtyping: bool):\n \"\"\"Looks up a concrete function based on the key.\"\"\"\n if not use_function_subtyping:\n return self._primary.get(key, None)\n\n dispatch_key = self._dispatch_table.dispatch(key)\n if dispatch_key is not None:\n return self._primary[dispatch_key]\n\n return None\n\n def delete(self, key: FunctionCacheKey):\n \"\"\"Deletes a concrete function given the key it was added with.\"\"\"\n if key not in self._primary:\n return False\n\n del self._primary[key]\n self._dispatch_table.delete(key)\n\n return True\n\n def add(self, key: FunctionCacheKey,\n deletion_observer: trace_type.WeakrefDeletionObserver,\n concrete):\n \"\"\"Adds a new concrete function alongside its key.\n\n Args:\n key: A FunctionCacheKey object corresponding to the provided `concrete`.\n deletion_observer: A WeakrefDeletionObserver object for the `key`.\n concrete: The concrete function to be added to the cache.\n \"\"\"\n self._primary[key] = concrete\n self._dispatch_table.add_target(key)\n deletion_observer.add_listener(\n lambda: self.delete(key) if DELETE_WITH_WEAKREF else None)\n\n # TODO(b/205971333): Remove this function.\n def clear(self):\n \"\"\"Removes all concrete functions from the cache.\"\"\"\n self._primary.clear()\n self._dispatch_table.clear()\n self.arg_relaxed_specs.clear()\n self.arg_relaxed.clear()\n\n def values(self):\n \"\"\"Returns a list of all `ConcreteFunction` instances held by this cache.\"\"\"\n # We need to simultaneously make sure our returned concrete functions are\n # unique *and* make sure they are returned in a deterministic order for\n # serialization.\n #\n # TODO(b/174215821): It's likely that we ultimately would just prefer to\n # choose the most specific concrete function shape given a set of\n # arguments. If and when that is implemented, this logic can be revisited.\n primary_functions = set(self._primary.values())\n return list(self._primary.values()) + [\n v for v in self.arg_relaxed.values() if v not in primary_functions\n ]\n\n def has_call_context(self, call_context: ExecutionContext) -> bool:\n \"\"\"Checks if an ExcutionContext was observed.\"\"\"\n return call_context in self._missed\n\n def add_call_context(self, call_context: ExecutionContext) -> None:\n \"\"\"Adds a new ExcutionContext observation.\"\"\"\n self._missed.add(call_context)\n\n\nclass _FunctionGarbageCollector(object):\n \"\"\"Cleans up cycles when a defun goes out of scope.\"\"\"\n\n __slots__ = [\"_cache\"]\n\n def __init__(self, cache):\n self._cache = cache\n\n def __del__(self):\n if func_graph_module is None or memory is None:\n return\n try:\n while self._cache:\n self._cache.popitem()\n memory.dismantle_ordered_dict(self._cache)\n except: # pylint: disable=bare-except\n pass\n\n\ndef make_cache_key(\n args,\n include_tensor_ranks_only: bool = False\n) -> Tuple[FunctionCacheKey, trace_type.WeakrefDeletionObserver]:\n \"\"\"Computes the cache key given the function arguments.\"\"\"\n signature_context = trace_type.SignatureContext(\n include_tensor_ranks_only)\n function_signature = trace_type.make_function_signature(\n args, signature_context)\n return FunctionCacheKey(\n function_signature,\n _make_execution_context()), signature_context.deletion_observer\n\n\ndef _make_execution_context() -> ExecutionContext:\n \"\"\"Generates an ExecutionContext based on current contextual info.\"\"\"\n ctx = context.context()\n\n # Don't need to open an init_scope if the _cache_key call is in eager mode\n # already.\n executing_eagerly = ctx.executing_eagerly()\n parent_graph = None\n xla_context_id = 0\n if not executing_eagerly:\n # We want to force function retracing for each different\n # XLAControlFlowContext, so add `xla_context_id` to the cache key.\n xla_context = _enclosing_xla_context()\n if xla_context is not None and xla_context.RequiresUniqueFunctionRetracing(\n ):\n xla_context_id = id(xla_context)\n\n with ops.init_scope():\n # The graph, or whether we're executing eagerly, should be a part of the\n # cache key so we don't improperly capture tensors such as variables.\n executing_eagerly = ctx.executing_eagerly()\n parent_graph = None if executing_eagerly else ops.get_default_graph()\n\n # pylint: disable=protected-access\n default_graph = ops.get_default_graph()\n # TODO(b/117617952): The current distribution strategy will affect graph\n # building (e.g. accessing different variables from different devices) and\n # so requires retracing for each device.\n strategy_stack = default_graph._distribution_strategy_stack\n uses_distribution_strategy = (\n strategy_stack and\n strategy_stack[-1].strategy.extended._retrace_functions_for_each_device)\n if executing_eagerly:\n colocation_stack = ()\n if uses_distribution_strategy:\n device_functions = (pydev.merge_device(ctx.device_name),)\n else:\n device_functions = ()\n else:\n colocation_stack = tuple(default_graph._colocation_stack.peek_objs())\n if (uses_distribution_strategy or\n func_graph_module.device_stack_has_callable(\n default_graph._device_function_stack)):\n # Putting the device in the cache key ensures that call-site device\n # annotations are respected.\n device_functions = tuple(default_graph._device_functions_outer_to_inner)\n else:\n device_functions = ()\n\n in_cross_replica_context = False\n try:\n in_cross_replica_context = (strategy_stack[-1].replica_context is None) # pylint: disable=protected-access\n except (AttributeError, IndexError):\n pass\n\n if save_context.in_save_context():\n variable_policy = (\n save_context.get_save_options().experimental_variable_policy)\n else:\n variable_policy = None\n\n return ExecutionContext(parent_graph, device_functions, colocation_stack,\n in_cross_replica_context, variable_policy,\n xla_context_id)\n\n\ndef _enclosing_xla_context():\n \"\"\"Returns the XLAControlFlowContext, which exists inside a tpu.rewrite().\"\"\"\n graph = ops.get_default_graph()\n while graph is not None:\n # pylint: disable=protected-access\n context_ = graph._get_control_flow_context()\n # pylint: enable=protected-access\n while context_ is not None:\n if isinstance(context_, control_flow_ops.XLAControlFlowContext):\n return context_\n context_ = context_.outer_context\n # This may be a FuncGraph due to defuns or v2 control flow. We need to\n # find the original graph with the XLAControlFlowContext.\n graph = getattr(graph, \"outer_graph\", None)\n return None\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Type specifications for TensorFlow APIs.\"\"\"\n\nimport abc\nimport collections\nimport functools\nimport re\nfrom typing import List, Optional, Sequence, Any\nimport warnings\n\nimport numpy as np\n\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.types import trace\nfrom tensorflow.python.util import _pywrap_utils\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.lazy_loader import LazyLoader\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Use LazyLoader to avoid circular dependencies.\ntensor_spec = LazyLoader(\n \"tensor_spec\", globals(),\n \"tensorflow.python.framework.tensor_spec\")\nops = LazyLoader(\"ops\", globals(),\n \"tensorflow.python.framework.ops\")\n\n\n@tf_export(\"TypeSpec\", v1=[\"TypeSpec\", \"data.experimental.Structure\"])\nclass TypeSpec(trace.TraceType, metaclass=abc.ABCMeta):\n \"\"\"Specifies a TensorFlow value type.\n\n A `tf.TypeSpec` provides metadata describing an object accepted or returned\n by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and\n `tf.RaggedTensorSpec`, are used to describe different value types.\n\n For example, `tf.function`'s `input_signature` argument accepts a list\n (or nested structure) of `TypeSpec`s.\n\n Creating new subclasses of `TypeSpec` (outside of TensorFlow core) is not\n currently supported. In particular, we may make breaking changes to the\n private methods and properties defined by this base class.\n\n Example:\n\n >>> spec = tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int32)\n >>> @tf.function(input_signature=[spec])\n ... def double(x):\n ... return x * 2\n >>> print(double(tf.ragged.constant([[1, 2], [3]])))\n <tf.RaggedTensor [[2, 4], [6]]>\n \"\"\"\n # === Subclassing ===\n #\n # Each `TypeSpec` subclass must define:\n #\n # * A \"component encoding\" for values.\n # * A \"serialization\" for types.\n #\n # The component encoding for a value is a nested structure of `tf.Tensor`\n # or `CompositeTensor` that can be used by the `TypeSpec` to reconstruct\n # the value. Each individual `TypeSpec` must use the same nested structure\n # for all values -- this structure is defined by the `component_specs`\n # attribute. Decomposing values into components, and reconstructing them\n # from those components, should be inexpensive. In particular, it should\n # *not* require any TensorFlow ops.\n #\n # The serialization for a `TypeSpec` is a nested tuple of values that can\n # be used to reconstruct the `TypeSpec`. See the documentation for\n # `_serialize()` for more information.\n\n __slots__ = []\n\n @abc.abstractproperty\n def value_type(self):\n \"\"\"The Python type for values that are compatible with this TypeSpec.\n\n In particular, all values that are compatible with this TypeSpec must be an\n instance of this type.\n \"\"\"\n raise NotImplementedError(\"%s.value_type\" % type(self).__name__)\n\n def is_subtype_of(self, other: trace.TraceType) -> bool:\n \"\"\"Returns True if `self` is a subtype of `other`.\n\n Implements the tf.types.experimental.func.TraceType interface.\n\n If not overridden by a subclass, the default behavior is to assume the\n TypeSpec is covariant upon attributes that implement TraceType and\n invariant upon rest of the attributes as well as the structure and type\n of the TypeSpec.\n\n Args:\n other: A TraceType object.\n \"\"\"\n if type(self) is not type(other):\n return False\n\n is_subtype = True\n def check_attribute(attribute_self, attribute_other):\n nonlocal is_subtype\n if not is_subtype:\n return\n\n if isinstance(attribute_self, trace.TraceType):\n if not attribute_self.is_subtype_of(attribute_other):\n is_subtype = False\n return\n else:\n if attribute_self != attribute_other:\n is_subtype = False\n\n try:\n # TODO(b/217959193): Replace _serialize with parameter decomposition.\n nest.map_structure(check_attribute, self._serialize(),\n other._serialize()) # pylint: disable=protected-access\n except (ValueError, TypeError):\n return False\n\n return is_subtype\n\n def most_specific_common_supertype(\n self,\n others: Sequence[trace.TraceType]) -> Optional[\"TypeSpec\"]:\n \"\"\"Returns the most specific supertype TypeSpec of `self` and `others`.\n\n Implements the tf.types.experimental.func.TraceType interface.\n\n If not overridden by a subclass, the default behavior is to assume the\n TypeSpec is covariant upon attributes that implement TraceType and\n invariant upon rest of the attributes as well as the structure and type\n of the TypeSpec.\n\n Args:\n others: A sequence of TraceTypes.\n \"\"\"\n if any(type(self) is not type(other) for other in others):\n return None\n\n has_supertype = True\n def make_supertype_attribute(attribute_self, *attribute_others):\n nonlocal has_supertype\n if not has_supertype:\n return\n\n if isinstance(attribute_self, trace.TraceType):\n attribute_supertype = attribute_self.most_specific_common_supertype(\n attribute_others)\n if attribute_supertype is None:\n has_supertype = False\n return\n return attribute_supertype\n else:\n if not all(attribute_self == attribute_other\n for attribute_other in attribute_others):\n has_supertype = False\n return\n return attribute_self\n\n try:\n # TODO(b/217959193): Replace _serialize with parameter decomposition.\n serialized_supertype = nest.map_structure(\n make_supertype_attribute, self._serialize(),\n *(o._serialize() for o in others)) # pylint: disable=protected-access\n except (ValueError, TypeError):\n return None\n\n return self._deserialize(serialized_supertype) if has_supertype else None\n\n # TODO(b/202447704): Reduce internal usages.\n def is_compatible_with(self, spec_or_value):\n \"\"\"Returns true if `spec_or_value` is compatible with this TypeSpec.\"\"\"\n # === Subclassing ===\n # If not overridden by subclasses, the default behavior is to convert\n # `spec_or_value` to a `TypeSpec` (if it isn't already); and then to\n # consider two `TypeSpec`s compatible if they have the same type, and\n # the values returned by `_serialize` are compatible (where\n # `tf.TensorShape`, `tf.TensorSpec`, and `tf.DType` are checked for\n # compatibility using their `is_compatible_with` method; and all other\n # types are considered compatible if they are equal).\n if not isinstance(spec_or_value, TypeSpec):\n spec_or_value = type_spec_from_value(spec_or_value)\n if type(self) is not type(spec_or_value):\n return False\n return self.__is_compatible(self._serialize(), spec_or_value._serialize()) # pylint: disable=protected-access\n\n @deprecation.deprecated(None, \"Use most_specific_common_supertype instead.\")\n def most_specific_compatible_type(self, other: \"TypeSpec\") -> \"TypeSpec\":\n \"\"\"Returns the most specific TypeSpec compatible with `self` and `other`.\n\n Deprecated. Please use `most_specific_common_supertype` instead.\n Do not override this function.\n\n Args:\n other: A `TypeSpec`.\n\n Raises:\n ValueError: If there is no TypeSpec that is compatible with both `self`\n and `other`.\n \"\"\"\n result = self.most_specific_common_supertype([other])\n if result is None:\n raise ValueError(\"No TypeSpec is compatible with both %s and %s\" %\n (self, other))\n return result\n\n def _with_tensor_ranks_only(self) -> \"TypeSpec\":\n \"\"\"Returns a TypeSpec compatible with `self`, with tensor shapes relaxed.\n\n Returns:\n A `TypeSpec` that is compatible with `self`, where any `TensorShape`\n information has been relaxed to include only tensor rank (and not\n the dimension sizes for individual axes).\n \"\"\"\n\n # === Subclassing ===\n # If not overridden by a subclass, the default behavior is to serialize\n # this TypeSpec, relax any TensorSpec or TensorShape values, and\n # deserialize the result.\n\n def relax(value):\n if isinstance(value, TypeSpec):\n return value._with_tensor_ranks_only() # pylint: disable=protected-access\n elif (isinstance(value, tensor_shape.TensorShape) and\n value.rank is not None):\n return tensor_shape.TensorShape([None] * value.rank)\n else:\n return value\n\n return self._deserialize(nest.map_structure(relax, self._serialize()))\n\n # TODO(b/206014848): Helper function to support logic that does not consider\n # Tensor name. Will be removed once load-bearing usages of Tensor name are\n # fixed.\n def _without_tensor_names(self) -> \"TypeSpec\":\n \"\"\"Returns a TypeSpec compatible with `self`, with tensor names removed.\n\n Returns:\n A `TypeSpec` that is compatible with `self`, where the name of any\n `TensorSpec` is set to `None`.\n \"\"\"\n\n # === Subclassing ===\n # If not overridden by a subclass, the default behavior is to serialize\n # this TypeSpec, set the TensorSpecs' names to None, and deserialize the\n # result.\n\n def rename(value):\n if isinstance(value, TypeSpec):\n return value._without_tensor_names() # pylint: disable=protected-access\n return value\n\n return self._deserialize(nest.map_structure(rename, self._serialize()))\n\n # === Component encoding for values ===\n\n @abc.abstractmethod\n def _to_components(self, value):\n \"\"\"Encodes `value` as a nested structure of `Tensor` or `CompositeTensor`.\n\n Args:\n value: A value compatible with this `TypeSpec`. (Caller is responsible\n for ensuring compatibility.)\n\n Returns:\n A nested structure of `tf.Tensor` or `tf.CompositeTensor` compatible with\n `self._component_specs`, which can be used to reconstruct `value`.\n \"\"\"\n # === Subclassing ===\n # This method must be inexpensive (do not call TF ops).\n raise NotImplementedError(\"%s._to_components()\" % type(self).__name__)\n\n @abc.abstractmethod\n def _from_components(self, components):\n \"\"\"Reconstructs a value from a nested structure of Tensor/CompositeTensor.\n\n Args:\n components: A nested structure of `tf.Tensor` or `tf.CompositeTensor`,\n compatible with `self._component_specs`. (Caller is responsible for\n ensuring compatibility.)\n\n Returns:\n A value that is compatible with this `TypeSpec`.\n \"\"\"\n # === Subclassing ===\n # This method must be inexpensive (do not call TF ops).\n raise NotImplementedError(\"%s._from_components()\" % type(self).__name__)\n\n @abc.abstractproperty\n def _component_specs(self):\n \"\"\"A nested structure of TypeSpecs for this type's components.\n\n Returns:\n A nested structure describing the component encodings that are returned\n by this TypeSpec's `_to_components` method. In particular, for a\n TypeSpec `spec` and a compatible value `value`:\n\n ```\n nest.map_structure(lambda t, c: assert t.is_compatible_with(c),\n spec._component_specs, spec._to_components(value))\n ```\n \"\"\"\n raise NotImplementedError(\"%s._component_specs()\" % type(self).__name__)\n\n # === Tensor list encoding for values ===\n\n def _to_tensor_list(self, value) -> List[\"ops.Tensor\"]:\n \"\"\"Encodes `value` as a flat list of `tf.Tensor`.\n\n By default, this just flattens `self._to_components(value)` using\n `nest.flatten`. However, subclasses may override this to return a\n different tensor encoding for values. In particular, some subclasses\n of `BatchableTypeSpec` override this method to return a \"boxed\" encoding\n for values, which then can be batched or unbatched. See\n `BatchableTypeSpec` for more details.\n\n Args:\n value: A value with compatible this `TypeSpec`. (Caller is responsible\n for ensuring compatibility.)\n\n Returns:\n A list of `tf.Tensor`, compatible with `self._flat_tensor_specs`, which\n can be used to reconstruct `value`.\n \"\"\"\n return nest.flatten(self._to_components(value), expand_composites=True)\n\n def _from_tensor_list(self, tensor_list: List[\"ops.Tensor\"]) -> Any:\n \"\"\"Reconstructs a value from a flat list of `tf.Tensor`.\n\n Args:\n tensor_list: A flat list of `tf.Tensor`, compatible with\n `self._flat_tensor_specs`.\n\n Returns:\n A value that is compatible with this `TypeSpec`.\n\n Raises:\n ValueError: If `tensor_list` is not compatible with\n `self._flat_tensor_specs`.\n \"\"\"\n self.__check_tensor_list(tensor_list)\n return self._from_compatible_tensor_list(tensor_list)\n\n def _from_compatible_tensor_list(\n self, tensor_list: List[\"ops.Tensor\"]) -> Any:\n \"\"\"Reconstructs a value from a compatible flat list of `tf.Tensor`.\n\n Args:\n tensor_list: A flat list of `tf.Tensor`, compatible with\n `self._flat_tensor_specs`. (Caller is responsible for ensuring\n compatibility.)\n\n Returns:\n A value that is compatible with this `TypeSpec`.\n \"\"\"\n return self._from_components(\n nest.pack_sequence_as(\n self._component_specs, tensor_list, expand_composites=True))\n\n @property\n def _flat_tensor_specs(self):\n \"\"\"A list of TensorSpecs compatible with self._to_tensor_list(v).\"\"\"\n return nest.flatten(self._component_specs, expand_composites=True)\n\n # === Serialization for types ===\n\n @abc.abstractmethod\n def _serialize(self):\n \"\"\"Returns a nested tuple containing the state of this TypeSpec.\n\n The serialization may contain the following value types: boolean,\n integer, string, float, None, `TensorSpec`, `tf.TensorShape`, `tf.DType`,\n `np.ndarray`, `TypeSpec`, and nested tuples, namedtuples, dicts, and\n OrderedDicts of any of the above.\n\n This method is used to provide default definitions for: equality\n testing (__eq__, __ne__), hashing (__hash__), pickling (__reduce__),\n string representation (__repr__), `self.is_compatible_with()`,\n `self.most_specific_compatible_type()`, and protobuf serialization\n (e.g. TensorInfo and StructuredValue).\n \"\"\"\n raise NotImplementedError(\"%s._serialize()\" % type(self).__name__)\n\n @classmethod\n def _deserialize(cls, serialization):\n \"\"\"Reconstructs a TypeSpec from a value returned by `serialize`.\n\n Args:\n serialization: A value returned by _serialize. In some contexts,\n `namedtuple`s in `serialization` may not have the identical type that\n was returned by `_serialize` (but its type will still be a `namedtuple`\n type with the same type name and field names). For example, the code\n that loads a SavedModel does not have access to the original\n `namedtuple` type, so it dynamically creates a new `namedtuple` type\n with the same type name and field names as the original one. If\n necessary, you can check `serialization` for these duck-typed\n `nametuple` types, and restore them to the original type. (E.g., this\n would be necessary if you rely on type checks such as `isinstance` for\n this `TypeSpec`'s member variables).\n\n Returns:\n A `TypeSpec` of type `cls`.\n \"\"\"\n return cls(*serialization)\n\n # === Operators ===\n\n def __eq__(self, other) -> bool:\n # pylint: disable=protected-access\n return (type(other) is type(self) and\n self.__get_cmp_key() == other.__get_cmp_key())\n\n def __ne__(self, other) -> bool:\n return not self == other\n\n def __hash__(self) -> int:\n return hash(self.__get_cmp_key())\n\n def __reduce__(self):\n return type(self), self._serialize()\n\n def __repr__(self) -> str:\n return \"%s%r\" % (type(self).__name__, self._serialize())\n\n # === Legacy Output ===\n # TODO(b/133606651) Document and/or deprecate the legacy_output methods.\n # (These are used by tf.data.)\n\n def _to_legacy_output_types(self):\n raise NotImplementedError(\"%s._to_legacy_output_types()\" %\n type(self).__name__)\n\n def _to_legacy_output_shapes(self):\n raise NotImplementedError(\"%s._to_legacy_output_shapes()\" %\n type(self).__name__)\n\n def _to_legacy_output_classes(self):\n return self.value_type\n\n # === Private Helper Methods ===\n\n # TODO(b/216206374): Currently this usage is used to represent a Tensor\n # argument not a TensorSpec argument as it should be.\n def __tf_tracing_type__(self,\n context: trace.TracingContext) -> trace.TraceType:\n if context.include_tensor_ranks_only:\n return self._with_tensor_ranks_only()\n else:\n return self\n\n def __check_tensor_list(self, tensor_list):\n \"\"\"Raises an exception if tensor_list incompatible w/ flat_tensor_specs.\"\"\"\n expected = self._flat_tensor_specs\n specs = [type_spec_from_value(t) for t in tensor_list]\n if len(specs) != len(expected):\n raise ValueError(f\"Cannot create a {self.value_type.__name__} from the \"\n f\"tensor list because the TypeSpec expects \"\n f\"{len(expected)} items, but the provided tensor list \"\n f\"has {len(specs)} items.\")\n for i, (s1, s2) in enumerate(zip(specs, expected)):\n if not s1.is_compatible_with(s2):\n raise ValueError(f\"Cannot create a {self.value_type.__name__} from the \"\n f\"tensor list because item {i} ({tensor_list[i]!r}) \"\n f\"is incompatible with the expected TypeSpec {s2}.\")\n\n def __get_cmp_key(self):\n \"\"\"Returns a hashable eq-comparable key for `self`.\"\"\"\n # TODO(b/133606651): Decide whether to cache this value.\n return (type(self), self.__make_cmp_key(self._serialize()))\n\n def __make_cmp_key(self, value):\n \"\"\"Converts `value` to a hashable key.\"\"\"\n if isinstance(value, (int, float, bool, np.generic, dtypes.DType, TypeSpec,\n tensor_shape.TensorShape)):\n return value\n if isinstance(value, compat.bytes_or_text_types):\n return value\n if value is None:\n return value\n if isinstance(value, dict):\n return tuple([\n tuple([self.__make_cmp_key(key),\n self.__make_cmp_key(value[key])])\n for key in sorted(value.keys())\n ])\n if isinstance(value, tuple):\n return tuple([self.__make_cmp_key(v) for v in value])\n if isinstance(value, list):\n return (list, tuple([self.__make_cmp_key(v) for v in value]))\n if isinstance(value, np.ndarray):\n return (np.ndarray, value.shape,\n TypeSpec.__nested_list_to_tuple(value.tolist()))\n raise ValueError(f\"Cannot generate a hashable key for {self} because \"\n f\"the _serialize() method \"\n f\"returned an unsupproted value of type {type(value)}\")\n\n @staticmethod\n def __nested_list_to_tuple(value):\n \"\"\"Converts a nested list to a corresponding nested tuple.\"\"\"\n if isinstance(value, list):\n return tuple(TypeSpec.__nested_list_to_tuple(v) for v in value)\n return value\n\n @staticmethod\n def __same_types(a, b):\n \"\"\"Returns whether a and b have the same type, up to namedtuple equivalence.\n\n Consistent with tf.nest.assert_same_structure(), two namedtuple types\n are considered the same iff they agree in their class name (without\n qualification by module name) and in their sequence of field names.\n This makes namedtuples recreated by nested_structure_coder compatible with\n their original Python definition.\n\n Args:\n a: a Python object.\n b: a Python object.\n\n Returns:\n A boolean that is true iff type(a) and type(b) are the same object\n or equivalent namedtuple types.\n \"\"\"\n if nest.is_namedtuple(a) and nest.is_namedtuple(b):\n return nest.same_namedtuples(a, b)\n else:\n return type(a) is type(b)\n\n @staticmethod\n def __is_compatible(a, b):\n \"\"\"Returns true if the given type serializations compatible.\"\"\"\n if isinstance(a, TypeSpec):\n return a.is_compatible_with(b)\n if not TypeSpec.__same_types(a, b):\n return False\n if isinstance(a, (list, tuple)):\n return (len(a) == len(b) and\n all(TypeSpec.__is_compatible(x, y) for (x, y) in zip(a, b)))\n if isinstance(a, dict):\n return (len(a) == len(b) and sorted(a.keys()) == sorted(b.keys()) and\n all(TypeSpec.__is_compatible(a[k], b[k]) for k in a.keys()))\n if isinstance(a, (tensor_shape.TensorShape, dtypes.DType)):\n return a.is_compatible_with(b)\n return a == b\n\n # TODO(b/221459366): Remove after usages are removed.\n @staticmethod\n def __most_specific_compatible_type_serialization(a, b):\n \"\"\"Helper for most_specific_compatible_type.\n\n Combines two type serializations as follows:\n\n * If they are both tuples of the same length, then recursively combine\n the respective tuple elements.\n * If they are both dicts with the same keys, then recursively combine\n the respective dict elements.\n * If they are both TypeSpecs, then combine using\n TypeSpec.most_specific_compatible_type.\n * If they are both TensorShapes, then combine using\n TensorShape.most_specific_compatible_shape.\n * If they are both TensorSpecs with the same dtype, then combine using\n TensorShape.most_specific_compatible_shape to combine shapes.\n * If they are equal, then return a.\n * If none of the above, then raise a ValueError.\n\n Args:\n a: A serialized TypeSpec or nested component from a serialized TypeSpec.\n b: A serialized TypeSpec or nested component from a serialized TypeSpec.\n\n Returns:\n A value with the same type and structure as `a` and `b`.\n\n Raises:\n ValueError: If `a` and `b` are incompatible.\n \"\"\"\n if not TypeSpec.__same_types(a, b):\n raise ValueError(\n f\"Encountered incompatible types while determining the most specific \"\n f\"compatible type. \"\n f\"The Python type structures of `a` and `b` are different. \"\n f\"`a` : {a!r} `b` : {b!r}\")\n if nest.is_namedtuple(a):\n assert a._fields == b._fields # Implied by __same_types(a, b).\n return type(a)(*[\n TypeSpec.__most_specific_compatible_type_serialization(x, y)\n for (x, y) in zip(a, b)\n ])\n if isinstance(a, (list, tuple)):\n if len(a) != len(b):\n raise ValueError(\n f\"Encountered incompatible types while determining the most specific \"\n f\"compatible type. \"\n f\"Type spec structure `a` has a length of {len(a)} and \"\n f\"type spec structure `b` has a different length of {len(b)}.\"\n f\"`a` : {a!r} `b` : {b!r}\")\n return tuple(\n TypeSpec.__most_specific_compatible_type_serialization(x, y)\n for (x, y) in zip(a, b))\n if isinstance(a, collections.OrderedDict):\n a_keys, b_keys = a.keys(), b.keys()\n if len(a) != len(b) or a_keys != b_keys:\n raise ValueError(\n f\"Encountered incompatible types while determining the most specific \"\n f\"compatible type. \"\n f\"Type spec structure `a` has keys {a_keys} and \"\n f\"type spec structure `b` has different keys {b_keys}.\"\n f\"`a` : {a!r} `b` : {b!r}\")\n return collections.OrderedDict([\n (k,\n TypeSpec.__most_specific_compatible_type_serialization(a[k], b[k]))\n for k in a_keys\n ])\n if isinstance(a, dict):\n a_keys, b_keys = sorted(a.keys()), sorted(b.keys())\n if len(a) != len(b) or a_keys != b_keys:\n raise ValueError(\n f\"Encountered incompatible types while determining the most specific \"\n f\"compatible type. \"\n f\"Type spec structure `a` has keys {a_keys} and \"\n f\"type spec structure `b` has different keys {b_keys}.\"\n f\"`a` : {a!r} `b` : {b!r}\")\n return {\n k: TypeSpec.__most_specific_compatible_type_serialization(a[k], b[k])\n for k in a_keys\n }\n if isinstance(a, tensor_shape.TensorShape):\n return a.most_specific_compatible_shape(b)\n if isinstance(a, list):\n raise AssertionError(\n f\"{type(a).__name__}._serialize() should not return list values.\")\n if isinstance(a, TypeSpec):\n return a.most_specific_compatible_type(b)\n if a != b:\n raise ValueError(\n f\"Encountered incompatible types while determining the most specific \"\n f\"compatible type. \"\n f\"Type spec structure `a` and `b` are different. \"\n f\"`a` : {a!r} `b` : {b!r}\")\n return a\n\n\nclass TypeSpecBatchEncoder(object, metaclass=abc.ABCMeta):\n \"\"\"Class used to encode and decode composite tensor values for batching.\n\n In order to be batched and unbatched by APIs such as `tf.data.Dataset` and\n `tf.map_fn`, composite tensors must be encoded using flat tensors that can\n themselves be batched or unbatched. `TypeSpecBatchEncoder`s are\n responsible for implementing this encoding.\n\n If a composite tensor's shape is a prefix of the shape of all of its\n component tensors, then this encoding can usually be performed by just\n returning those component tensors as a list. But if the composite tensor\n has components whose shape has a more complex relationship to the shape\n of the composite tensor, then a custom `TypeSpecBatchEncoder` may\n need to be implemented.\n \"\"\"\n\n @abc.abstractmethod\n def batch(self, spec, batch_size):\n \"\"\"Returns the TypeSpec representing a batch of values described by `spec`.\n\n Args:\n spec: The `TypeSpec` for an individual value.\n batch_size: An `int` indicating the number of values that are batched\n together, or `None` if the batch size is not known.\n\n Returns:\n A `TypeSpec` for a batch of values.\n \"\"\"\n raise NotImplementedError(f\"{type(self).__name__}.batch\")\n\n @abc.abstractmethod\n def unbatch(self, spec):\n \"\"\"Returns the TypeSpec for a single unbatched element in `spec`.\n\n Args:\n spec: The `TypeSpec` for a batch of values.\n\n Returns:\n A `TypeSpec` for an individual value.\n \"\"\"\n raise NotImplementedError(f\"{type(self).__name__}.unbatch\")\n\n @abc.abstractmethod\n def encode(self, spec, value, minimum_rank=0):\n \"\"\"Encodes `value` as a nest of batchable `Tensor` or `CompositeTensor`.\n\n Args:\n spec: The TypeSpec of the value to encode.\n value: A value compatible with `spec`.\n minimum_rank: The minimum rank for the returned Tensors, CompositeTensors,\n and ExtensionType values. This can be used to ensure that the encoded\n values can be unbatched this number of times. If `minimum_rank>0`,\n then `t.shape[:minimum_rank]` must be compatible for all values `t`\n returned by `encode`.\n\n Returns:\n A nest (as defined by `tf.nest`) of `tf.Tensor`s, batchable\n `tf.CompositeTensor`s, or `tf.ExtensionType`s. Stacking, unstacking, or\n concatenating these encoded values and then decoding the result must be\n equivalent to stacking, unstacking, or concatenating the original values.\n \"\"\"\n raise NotImplementedError(f\"{type(self).__name__}.encode\")\n\n @abc.abstractmethod\n def decode(self, spec, encoded_value):\n \"\"\"Decodes `value` from a batchable tensor encoding.\n\n Args:\n spec: The TypeSpec for the result value. If encoded values with spec `s`\n were batched, then `spec` should be `s.batch(batch_size)`; or if encoded\n values with spec `s` were unbatched, then `spec` should be\n `s.unbatch()`.\n encoded_value: A nest of values returned by `encode`; or a nest of\n values that was formed by stacking, unstacking, or concatenating the\n corresponding elements of values returned by `encode`.\n\n Returns:\n A value compatible with `type_spec`.\n \"\"\"\n raise NotImplementedError(f\"{type(self).__name__}.decode\")\n\n @abc.abstractmethod\n def encoding_specs(self, spec):\n \"\"\"Returns a nest of `TypeSpec`(s) describing the encoding for `spec`.\n\n Args:\n spec: The TypeSpec whose encoding should be described.\n\n Returns:\n A nest (as defined by `tf.nest) of `tf.TypeSpec`, describing the values\n that are returned by `self.encode(spec, ...)`. All TypeSpecs in this\n nest must be batchable.\n \"\"\"\n raise NotImplementedError(f\"{type(self).__name__}.encoding_specs\")\n\n\nclass LegacyTypeSpecBatchEncoder(TypeSpecBatchEncoder):\n \"\"\"TypeSpecBatchEncoder for legacy composite tensor classes.\n\n TODO(edloper): Update existing composite tensors to use non-legacy\n CompositTensorBatchEncoders.\n \"\"\"\n\n def batch(self, type_spec, batch_size):\n return type_spec._batch(batch_size) # pylint: disable=protected-access\n\n def unbatch(self, type_spec):\n return type_spec._unbatch() # pylint: disable=protected-access\n\n def encode(self, type_spec, value, minimum_rank=0):\n if minimum_rank == 0:\n return type_spec._to_tensor_list(value) # pylint: disable=protected-access\n elif minimum_rank == 1:\n if not isinstance(type_spec, BatchableTypeSpec):\n raise ValueError(f\"{type_spec.__name__}.encode does not support \"\n \"minimum_rank>0.\")\n return type_spec._to_batched_tensor_list(value) # pylint: disable=protected-access\n else:\n raise ValueError(f\"{type_spec.__name__}.encode does not support \"\n \"minimum_rank>1.\")\n\n def decode(self, type_spec, encoded_value):\n return type_spec._from_tensor_list(encoded_value) # pylint: disable=protected-access\n\n def encoding_specs(self, spec):\n return spec._flat_tensor_specs # pylint: disable=protected-access\n\n\nclass BatchableTypeSpec(TypeSpec, metaclass=abc.ABCMeta):\n \"\"\"TypeSpec with a batchable tensor encoding.\n\n The batchable tensor encoding is a list of `tf.Tensor`s that supports\n batching and unbatching. In particular, stacking (or unstacking)\n values with the same `TypeSpec` must be equivalent to stacking (or\n unstacking) each of their tensor lists. Unlike the component encoding\n (returned by `self._to_components)`, the batchable tensor encoding\n may require using encoding/decoding ops.\n\n If a subclass's batchable tensor encoding is not simply a flattened version\n of the component encoding, then the subclass must override `_to_tensor_list`,\n `_from_tensor_list`, and _flat_tensor_specs`.\n \"\"\"\n\n __slots__ = []\n\n __batch_encoder__ = LegacyTypeSpecBatchEncoder()\n\n @abc.abstractmethod\n def _batch(self, batch_size) -> TypeSpec:\n \"\"\"Returns a TypeSpec representing a batch of objects with this TypeSpec.\n\n Args:\n batch_size: An `int` representing the number of elements in a batch, or\n `None` if the batch size may vary.\n\n Returns:\n A `TypeSpec` representing a batch of objects with this TypeSpec.\n \"\"\"\n raise NotImplementedError(f\"{type(self).__name__}._batch\")\n\n @abc.abstractmethod\n def _unbatch(self) -> TypeSpec:\n \"\"\"Returns a TypeSpec representing a single element this TypeSpec.\n\n Returns:\n A `TypeSpec` representing a single element of objects with this TypeSpec.\n \"\"\"\n raise NotImplementedError(f\"{type(self).__name__}._unbatch\")\n\n @property\n def _flat_tensor_specs(self) -> List[TypeSpec]:\n \"\"\"A list of TensorSpecs compatible with self._to_tensor_list(v).\"\"\"\n component_flat_tensor_specs = nest.map_structure(\n functools.partial(get_batchable_flat_tensor_specs, context_spec=self),\n self._component_specs)\n return nest.flatten(component_flat_tensor_specs)\n\n def _to_tensor_list(\n self,\n value: composite_tensor.CompositeTensor) -> List[\"ops.Tensor\"]:\n \"\"\"Encodes `value` as a flat list of `ops.Tensor`.\"\"\"\n component_tensor_lists = nest.map_structure(\n batchable_to_tensor_list,\n self._component_specs,\n self._to_components(value))\n return nest.flatten(component_tensor_lists)\n\n def _to_batched_tensor_list(\n self,\n value: composite_tensor.CompositeTensor) -> List[\"ops.Tensor\"]:\n \"\"\"Encodes `value` as a flat list of `ops.Tensor` each with rank>0.\"\"\"\n get_spec_tensor_list = lambda spec, v: ( # pylint: disable=g-long-lambda\n batchable_to_tensor_list(spec, v, minimum_rank=1)\n if isinstance(spec, BatchableTypeSpec) else spec._to_tensor_list(v)) # pylint: disable=protected-access\n component_batched_tensor_lists = nest.map_structure(\n get_spec_tensor_list, self._component_specs, self._to_components(value))\n tensor_list = nest.flatten(component_batched_tensor_lists)\n if any(t.shape.ndims == 0 for t in tensor_list):\n raise ValueError(\n f\"While converting {value} to a list of tensors for batching, \"\n f\"found a scalar item which cannot be batched.\")\n return tensor_list\n\n def _from_compatible_tensor_list(\n self, tensor_list: List[\"ops.Tensor\"]\n ) -> composite_tensor.CompositeTensor:\n \"\"\"Reconstructs a value from a compatible flat list of `ops.Tensor`.\"\"\"\n flat_specs = nest.map_structure(\n functools.partial(get_batchable_flat_tensor_specs, context_spec=self),\n self._component_specs)\n nested_tensor_list = nest.pack_sequence_as(flat_specs, tensor_list)\n components = nest.map_structure_up_to(\n self._component_specs,\n batchable_from_tensor_list,\n self._component_specs,\n nested_tensor_list)\n return self._from_components(components)\n\n\ndef get_batchable_flat_tensor_specs(spec, context_spec=None):\n \"\"\"Returns the flat tensor specs for `spec`.\"\"\"\n if isinstance(spec, tensor_spec.TensorSpec):\n return [spec]\n elif hasattr(spec, \"__batch_encoder__\"):\n encoding_specs = nest.map_structure(\n functools.partial(get_batchable_flat_tensor_specs,\n context_spec=context_spec),\n spec.__batch_encoder__.encoding_specs(spec))\n return nest.flatten(encoding_specs)\n else:\n # TODO(edloper) Fix existing CompositeTensors that permit this, and\n # then turn this warning into an error.\n warnings.warn(f\"Batchable type {context_spec} contains non-batchable \"\n f\"field or component with type {spec}.\")\n return spec._flat_tensor_specs # pylint: disable=protected-access\n\n\ndef batchable_to_tensor_list(spec, value, minimum_rank=0):\n \"\"\"Returns a list of tensors encoding `value`, whose type is `spec`.\"\"\"\n if isinstance(spec, tensor_spec.TensorSpec):\n return [value]\n elif hasattr(spec, \"__batch_encoder__\"):\n encoded_value = spec.__batch_encoder__.encode(spec, value, minimum_rank)\n encoded_specs = spec.__batch_encoder__.encoding_specs(spec)\n encoded_flats = nest.map_structure(\n functools.partial(batchable_to_tensor_list, minimum_rank=minimum_rank),\n encoded_specs,\n encoded_value)\n return nest.flatten(encoded_flats)\n else:\n return spec._to_tensor_list(value) # pylint: disable=protected-access\n\n\ndef batchable_from_tensor_list(spec, tensor_list):\n \"\"\"Returns a value with type `spec` decoded from `tensor_list`.\"\"\"\n if isinstance(spec, tensor_spec.TensorSpec):\n assert len(tensor_list) == 1\n return tensor_list[0]\n elif hasattr(spec, \"__batch_encoder__\"):\n encoded_specs = spec.__batch_encoder__.encoding_specs(spec)\n flat_specs = nest.map_structure(get_batchable_flat_tensor_specs,\n encoded_specs)\n encoded_flats = nest.pack_sequence_as(flat_specs, tensor_list)\n encoded_value = nest.map_structure_up_to(\n encoded_specs,\n batchable_from_tensor_list,\n encoded_specs,\n encoded_flats)\n return spec.__batch_encoder__.decode(spec, encoded_value)\n else:\n return spec._from_compatible_tensor_list(tensor_list) # pylint: disable=protected-access\n\n\n@tf_export(\"type_spec_from_value\")\ndef type_spec_from_value(value) -> TypeSpec:\n \"\"\"Returns a `tf.TypeSpec` that represents the given `value`.\n\n Examples:\n\n >>> tf.type_spec_from_value(tf.constant([1, 2, 3]))\n TensorSpec(shape=(3,), dtype=tf.int32, name=None)\n >>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64))\n TensorSpec(shape=(2,), dtype=tf.float64, name=None)\n >>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]]))\n RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64)\n\n >>> example_input = tf.ragged.constant([[1, 2], [3]])\n >>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)])\n ... def f(x):\n ... return tf.reduce_sum(x, axis=1)\n\n Args:\n value: A value that can be accepted or returned by TensorFlow APIs. Accepted\n types for `value` include `tf.Tensor`, any value that can be converted to\n `tf.Tensor` using `tf.convert_to_tensor`, and any subclass of\n `CompositeTensor` (such as `tf.RaggedTensor`).\n\n Returns:\n A `TypeSpec` that is compatible with `value`.\n\n Raises:\n TypeError: If a TypeSpec cannot be built for `value`, because its type\n is not supported.\n \"\"\"\n spec = _type_spec_from_value(value)\n if spec is not None:\n return spec\n\n # Fallback: try converting value to a tensor.\n try:\n tensor = ops.convert_to_tensor(value)\n spec = _type_spec_from_value(tensor)\n if spec is not None:\n return spec\n except (ValueError, TypeError) as e:\n logging.vlog(\n 3, \"Failed to convert %r to tensor: %s\" % (type(value).__name__, e))\n\n raise TypeError(f\"Could not build a TypeSpec for {value} of \"\n f\"unsupported type {type(value)}.\")\n\n\ndef _type_spec_from_value(value) -> TypeSpec:\n \"\"\"Returns a `TypeSpec` that represents the given `value`.\"\"\"\n if isinstance(value, ops.Tensor):\n # Note: we do not include Tensor names when constructing TypeSpecs.\n return tensor_spec.TensorSpec(value.shape, value.dtype)\n\n if isinstance(value, composite_tensor.CompositeTensor):\n return value._type_spec # pylint: disable=protected-access\n\n # If `value` is a list and all of its elements can be represented by the same\n # batchable type spec, then we can represent the entire list using a single\n # type spec that captures the type accurately (unlike the `convert_to_tensor`\n # fallback).\n if isinstance(value, list) and value:\n subspecs = [_type_spec_from_value(v) for v in value]\n if isinstance(subspecs[0], BatchableTypeSpec):\n merged_subspec = subspecs[0]\n try:\n for subspec in subspecs[1:]:\n merged_subspec = merged_subspec.most_specific_compatible_type(subspec)\n return merged_subspec._batch(len(subspecs)) # pylint: disable=protected-access\n except (ValueError, TypeError):\n pass # incompatible subspecs\n\n for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):\n type_object, converter_fn, allow_subclass = entry\n if ((type(value) is type_object) or # pylint: disable=unidiomatic-typecheck\n (allow_subclass and isinstance(value, type_object))):\n return converter_fn(value)\n\n return None\n\n\n_TYPE_CONVERSION_FUNCTION_REGISTRY = []\n\n\ndef register_type_spec_from_value_converter(type_object,\n converter_fn,\n allow_subclass=False):\n \"\"\"Registers a function for converting values with a given type to TypeSpecs.\n\n If multiple registered `type_object`s match a value, then the most recent\n registration takes precedence. Custom converters should not be defined for\n `CompositeTensor`s; use `CompositeTensor._type_spec` instead.\n\n Args:\n type_object: A Python `type` object representing the type of values accepted\n by `converter_fn`.\n converter_fn: A function that takes one argument (an instance of the type\n represented by `type_object`) and returns a `TypeSpec`.\n allow_subclass: If true, then use `isinstance(value, type_object)` to check\n for matches. If false, then use `type(value) is type_object`.\n \"\"\"\n _, type_object = tf_decorator.unwrap(type_object)\n _TYPE_CONVERSION_FUNCTION_REGISTRY.append(\n (type_object, converter_fn, allow_subclass))\n\n\n_pywrap_utils.RegisterType(\"TypeSpec\", TypeSpec)\n\n_TYPE_SPEC_TO_NAME = {}\n_NAME_TO_TYPE_SPEC = {}\n\n# Regular expression for valid TypeSpec names.\n_REGISTERED_NAME_RE = re.compile(r\"^(\\w+\\.)+\\w+$\")\n\n\n# TODO(b/173744905) tf_export this as \"tf.register_type_spec\". (And add a\n# usage example to the docstring, once the API is public.)\n#\n# TODO(b/173744905) Update this decorator to apply to ExtensionType rather than\n# TypeSpec (once we do refactoring to move to_components/from_components from\n# TypeSpec to ExtensionType).\ndef register(name):\n \"\"\"Decorator used to register a globally unique name for a TypeSpec subclass.\n\n Args:\n name: The name of the type spec. Must be globally unique. Must have the\n form `\"{project_name}.{type_name}\"`. E.g. `\"my_project.MyTypeSpec\"`.\n\n Returns:\n A class decorator that registers the decorated class with the given name.\n \"\"\"\n if not isinstance(name, str):\n raise TypeError(\"Expected `name` to be a string; got %r\" % (name,))\n if not _REGISTERED_NAME_RE.match(name):\n raise ValueError(\n \"Registered name must have the form '{project_name}.{type_name}' \"\n \"(e.g. 'my_project.MyTypeSpec'); got %r.\" % name)\n\n def decorator_fn(cls):\n if not (isinstance(cls, type) and issubclass(cls, TypeSpec)):\n raise TypeError(\"Expected `cls` to be a TypeSpec; got %r\" % (cls,))\n if cls in _TYPE_SPEC_TO_NAME:\n raise ValueError(\"Class %s.%s has already been registered with name %s.\" %\n (cls.__module__, cls.__name__, _TYPE_SPEC_TO_NAME[cls]))\n if name in _NAME_TO_TYPE_SPEC:\n raise ValueError(\"Name %s has already been registered for class %s.%s.\" %\n (name, _NAME_TO_TYPE_SPEC[name].__module__,\n _NAME_TO_TYPE_SPEC[name].__name__))\n _TYPE_SPEC_TO_NAME[cls] = name\n _NAME_TO_TYPE_SPEC[name] = cls\n return cls\n\n return decorator_fn\n\n\n# TODO(edloper) tf_export this as \"tf.get_type_spec_name\" (or some similar name)\ndef get_name(cls):\n \"\"\"Returns the registered name for TypeSpec `cls`.\"\"\"\n if not (isinstance(cls, type) and issubclass(cls, TypeSpec)):\n raise TypeError(\"Expected `cls` to be a TypeSpec; got %r\" % (cls,))\n if cls not in _TYPE_SPEC_TO_NAME:\n raise ValueError(\"TypeSpec %s.%s has not been registered.\" %\n (cls.__module__, cls.__name__))\n return _TYPE_SPEC_TO_NAME[cls]\n\n\n# TODO(edloper) tf_export this as \"tf.lookup_type_spec\" (or some similar name)\ndef lookup(name):\n \"\"\"Returns the TypeSpec that has been registered with name `name`.\"\"\"\n if not isinstance(name, str):\n raise TypeError(\"Expected `name` to be a string; got %r\" % (name,))\n if name not in _NAME_TO_TYPE_SPEC:\n raise ValueError(\"No TypeSpec has been registered with name %r\" % (name,))\n return _NAME_TO_TYPE_SPEC[name]\n"
] | [
[
"tensorflow.python.framework.device.merge_device",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.func_graph.device_stack_has_callable",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.core.function.polymorphism.type_dispatch.TypeDispatchTable",
"tensorflow.python.util.memory.dismantle_ordered_dict",
"tensorflow.python.saved_model.save_context.get_save_options",
"tensorflow.python.saved_model.save_context.in_save_context",
"tensorflow.core.function.trace_type.SignatureContext",
"tensorflow.core.function.trace_type.make_function_signature"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.util._pywrap_utils.RegisterType",
"tensorflow.python.util.nest.map_structure_up_to",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.nest.is_namedtuple",
"tensorflow.python.util.tf_decorator.unwrap",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.util.nest.same_namedtuples"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.9"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
skye/flax | [
"23a91dbc27dd182e26f196546468d33238ca5735",
"23a91dbc27dd182e26f196546468d33238ca5735"
] | [
"examples/lm1b/train.py",
"flax/jax_utils.py"
] | [
"# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Language Modeling example.\n\nThis script trains a Transformer on the lm1b dataset.\nThe data is loaded using tensorflow_datasets.\n\"\"\"\n\nimport functools\nimport itertools\nimport os\nimport time\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom flax import jax_utils\nfrom flax import nn\nfrom flax import optim\nimport decode\nimport input_pipeline\nimport models\nfrom flax.metrics import tensorboard\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\nimport jax\nfrom jax import random\nimport jax.nn\nimport jax.numpy as jnp\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'model_dir', default=None,\n help='Directory to store model data.')\n\nflags.DEFINE_string(\n 'data_dir', default=None,\n help='Directory containing TFDS lm1b/subwords32k dataset.')\n\nflags.DEFINE_integer(\n 'batch_size', default=2048,\n help='Batch size for training.')\n\nflags.DEFINE_integer(\n 'eval_frequency', default=1000,\n help='Frequency of eval during training, e.g. every 1000 steps.')\n\nflags.DEFINE_integer(\n 'num_train_steps', default=500000,\n help='Number of training steps.')\n\nflags.DEFINE_integer(\n 'num_eval_steps', default=20,\n help='Number of evaluation steps. If -1 use the whole evaluation set.')\n\nflags.DEFINE_float(\n 'learning_rate', default=0.05,\n help='Learning rate.')\n\nflags.DEFINE_float(\n 'weight_decay', default=1e-1,\n help='Decay factor for AdamW-style weight decay.')\n\nflags.DEFINE_integer(\n 'max_target_length', default=512,\n help='Maximum length of training examples.')\n\nflags.DEFINE_integer(\n 'max_eval_target_length', default=2048,\n help='Maximum length of eval examples.')\n\nflags.DEFINE_float(\n 'sampling_temperature', default=0.6,\n help='Sampling temperature for language model inference.')\n\nflags.DEFINE_integer(\n 'sampling_top_k', default=20,\n help='Top k cutoff for logit sampling. If 0 then no top-k cutoff is used.')\n\nflags.DEFINE_string(\n 'prompt', default='I love to ',\n help='Prompt for language model sampling.')\n\nflags.DEFINE_integer(\n 'max_predict_token_length', default=50,\n help='Maximum example text inference token length.')\n\nflags.DEFINE_bool(\n 'save_checkpoints', default=True,\n help='Whether to save model checkpoints for debugging.')\n\nflags.DEFINE_bool(\n 'restore_checkpoints', default=True,\n help='Whether to restore from existing model checkpoints.')\n\nflags.DEFINE_integer(\n 'checkpoint_freq', default=10000,\n help='Whether to restore from existing model checkpoints.')\n\nflags.DEFINE_integer(\n 'random_seed', default=0,\n help='Integer for PRNG random seed.')\n\n\[email protected](jax.jit, static_argnums=(1, 2))\ndef create_model(key, input_shape, model_kwargs):\n module = models.TransformerLM.partial(**model_kwargs)\n with nn.attention.Cache().mutate() as cache_def:\n _, initial_params = module.init_by_shape(key,\n [(input_shape, jnp.float32)],\n cache=cache_def)\n model = nn.Model(module, initial_params)\n return model, cache_def\n\n\ndef create_optimizer(model, learning_rate):\n optimizer_def = optim.Adam(\n learning_rate,\n beta1=0.9,\n beta2=0.98,\n eps=1e-9,\n weight_decay=FLAGS.weight_decay)\n optimizer = optimizer_def.create(model)\n return optimizer\n\n\ndef create_learning_rate_scheduler(\n factors='constant * linear_warmup * rsqrt_decay',\n base_learning_rate=0.5,\n warmup_steps=8000,\n decay_factor=0.5,\n steps_per_decay=20000,\n steps_per_cycle=100000):\n \"\"\"Creates learning rate schedule.\n\n Interprets factors in the factors string which can consist of:\n * constant: interpreted as the constant value,\n * linear_warmup: interpreted as linear warmup until warmup_steps,\n * rsqrt_decay: divide by square root of max(step, warmup_steps)\n * decay_every: Every k steps decay the learning rate by decay_factor.\n * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.\n\n Args:\n factors: a string with factors separated by '*' that defines the schedule.\n base_learning_rate: float, the starting constant for the lr schedule.\n warmup_steps: how many steps to warm up for in the warmup schedule.\n decay_factor: The amount to decay the learning rate by.\n steps_per_decay: How often to decay the learning rate.\n steps_per_cycle: Steps per cycle when using cosine decay.\n\n Returns:\n A function learning_rate(step): float -> {'learning_rate': float}, the\n step-dependent lr.\n \"\"\"\n factors = [n.strip() for n in factors.split('*')]\n\n def step_fn(step):\n \"\"\"Step to learning rate function.\"\"\"\n ret = 1.0\n for name in factors:\n if name == 'constant':\n ret *= base_learning_rate\n elif name == 'linear_warmup':\n ret *= jnp.minimum(1.0, step / warmup_steps)\n elif name == 'rsqrt_decay':\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'rsqrt_normalized_decay':\n ret *= jnp.sqrt(warmup_steps)\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'decay_every':\n ret *= (decay_factor**(step // steps_per_decay))\n elif name == 'cosine_decay':\n progress = jnp.maximum(0.0,\n (step - warmup_steps) / float(steps_per_cycle))\n ret *= jnp.maximum(0.0,\n 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))\n else:\n raise ValueError('Unknown factor %s.' % name)\n return jnp.asarray(ret, dtype=jnp.float32)\n\n return step_fn\n\n\ndef compute_weighted_cross_entropy(logits, targets, weights=None):\n \"\"\"Compute weighted cross entropy and entropy for log probs and targets.\n\n Args:\n logits: [batch, length, num_classes] float array.\n targets: categorical targets [batch, length] int array.\n weights: None or array of shape [batch x length]\n\n Returns:\n Tuple of scalar loss and batch normalizing factor.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n onehot_targets = common_utils.onehot(targets, logits.shape[-1])\n loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)\n normalizing_factor = onehot_targets.sum()\n if weights is not None:\n loss = loss * weights\n normalizing_factor = weights.sum()\n\n return loss.sum(), normalizing_factor\n\n\ndef compute_weighted_accuracy(logits, targets, weights=None):\n \"\"\"Compute weighted accuracy for log probs and targets.\n\n Args:\n logits: [batch, length, num_classes] float array.\n targets: categorical targets [batch, length] int array.\n weights: None or array of shape [batch x length]\n\n Returns:\n Tuple of scalar accuracy and batch normalizing factor.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)\n normalizing_factor = jnp.prod(logits.shape[:-1])\n if weights is not None:\n loss = loss * weights\n normalizing_factor = weights.sum()\n\n return loss.sum(), normalizing_factor\n\n\ndef compute_metrics(logits, labels, weights):\n \"\"\"Compute summary metrics.\"\"\"\n loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights)\n acc, _ = compute_weighted_accuracy(logits, labels, weights)\n metrics = {\n 'loss': loss,\n 'accuracy': acc,\n 'denominator': weight_sum,\n }\n metrics = jax.lax.psum(metrics, 'batch')\n return metrics\n\n\ndef train_step(optimizer, inputs, learning_rate_fn, dropout_rng=None):\n \"\"\"Perform a single training step.\"\"\"\n weights = jnp.where(inputs > 0, 1, 0)\n\n # We handle PRNG splitting inside the top pmap, rather\n # than handling it outside in the training loop - doing the\n # latter can add some stalls to the devices.\n dropout_rng, new_dropout_rng = random.split(dropout_rng)\n\n def loss_fn(model):\n \"\"\"Loss function used for training.\"\"\"\n with nn.stochastic(dropout_rng):\n logits = model(inputs, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, inputs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits\n\n step = optimizer.state.step\n lr = learning_rate_fn(step)\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (_, logits), grad = grad_fn(optimizer.target)\n grad = jax.lax.pmean(grad, 'batch')\n new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)\n metrics = compute_metrics(logits, inputs, weights)\n metrics['learning_rate'] = lr\n\n return new_optimizer, metrics, new_dropout_rng\n\n\ndef eval_step(model, inputs):\n weights = jnp.where(inputs > 0, 1, 0)\n logits = model(inputs, train=False)\n return compute_metrics(logits, inputs, weights)\n\n\ndef predict_step(inputs, model, cache, prng_key):\n \"\"\"Fast sampling of language model from prompt.\"\"\"\n prefix_len = inputs.shape[1]\n pad_len = FLAGS.max_predict_token_length - prefix_len\n padded_inputs = jnp.pad(inputs, jnp.array([[0, 0], [0, pad_len]]))\n\n def tokens_ids_to_logits(ids, cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n with cache.mutate() as new_cache:\n logits = model(ids, shift=False, train=False, cache=new_cache)\n # Remove singleton sequence-length dimension\n # [batch, 1, vocab] --> [batch, vocab]\n logits = logits.squeeze(axis=1)\n return logits, new_cache\n\n sampled_seqs = decode.temperature_sample(\n padded_inputs,\n cache,\n tokens_ids_to_logits,\n prng_key,\n temperature=FLAGS.sampling_temperature,\n topk=FLAGS.sampling_top_k,\n eos_token=2**16) # No EOS tokens used in default lm1b dataset encoding.\n\n return sampled_seqs\n\n\ndef tohost(x):\n \"\"\"Collect batches from all devices to host and flatten batch dimensions.\"\"\"\n n_device, n_batch, *remaining_dims = x.shape\n return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n tf.enable_v2_behavior()\n\n batch_size = FLAGS.batch_size\n learning_rate = FLAGS.learning_rate\n num_train_steps = FLAGS.num_train_steps\n num_eval_steps = FLAGS.num_eval_steps\n eval_freq = FLAGS.eval_frequency\n max_target_length = FLAGS.max_target_length\n max_eval_target_length = FLAGS.max_eval_target_length\n random_seed = FLAGS.random_seed\n\n if jax.host_id() == 0:\n train_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'train'))\n eval_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'eval'))\n\n if batch_size % jax.device_count() > 0:\n raise ValueError('Batch size must be divisible by the number of devices')\n train_ds, eval_ds, info_ds = input_pipeline.get_lm1b_datasets(\n n_devices=jax.local_device_count(),\n data_dir=FLAGS.data_dir,\n batch_size=batch_size,\n dynamic_batching=True,\n max_target_length=max_target_length,\n max_eval_target_length=max_eval_target_length)\n vocab_size = info_ds['text'].encoder.vocab_size\n encoder = info_ds['text'].encoder\n\n train_iter = iter(train_ds)\n input_shape = (batch_size, max_target_length)\n\n transformer_lm_kwargs = {\n 'vocab_size': vocab_size,\n 'emb_dim': 512,\n 'num_heads': 8,\n 'num_layers': 6,\n 'qkv_dim': 512,\n 'mlp_dim': 2048,\n 'max_len': max(max_target_length, max_eval_target_length)\n }\n\n rng = random.PRNGKey(random_seed)\n rng = jax.random.fold_in(rng, jax.host_id())\n rng, init_rng = random.split(rng)\n # We init the first set of dropout PRNG keys, but update it afterwards inside\n # the main pmap'd training update for performance.\n dropout_rngs = random.split(rng, jax.local_device_count())\n\n model, cache_def = create_model(init_rng, input_shape, transformer_lm_kwargs)\n optimizer = create_optimizer(model, learning_rate)\n del model # Don't keep a copy of the initial model.\n start_step = 0\n if FLAGS.restore_checkpoints:\n # Restore unreplicated optimizer + model state from last checkpoint.\n optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)\n # Grab last step.\n start_step = int(optimizer.state.step)\n\n # Replicate optimizer.\n optimizer = jax_utils.replicate(optimizer)\n\n learning_rate_fn = create_learning_rate_scheduler(\n base_learning_rate=learning_rate)\n p_train_step = jax.pmap(\n functools.partial(train_step, learning_rate_fn=learning_rate_fn),\n axis_name='batch')\n p_eval_step = jax.pmap(eval_step, axis_name='batch')\n p_pred_step = jax.pmap(predict_step, axis_name='batch')\n\n metrics_all = []\n tick = time.time()\n for step, batch in zip(range(start_step, num_train_steps), train_iter):\n batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access\n optimizer, metrics, dropout_rngs = p_train_step(\n optimizer, batch, dropout_rng=dropout_rngs)\n metrics_all.append(metrics)\n\n # Save a Checkpoint\n if ((step % FLAGS.checkpoint_freq == 0 and step > 0) or\n step == num_train_steps - 1):\n if jax.host_id() == 0 and FLAGS.save_checkpoints:\n # Save unreplicated optimizer + model state.\n checkpoints.save_checkpoint(\n FLAGS.model_dir, jax_utils.unreplicate(optimizer), step)\n\n # Periodic metric handling.\n if step % eval_freq == 0 and step > 0:\n metrics_all = common_utils.get_metrics(metrics_all)\n lr = metrics_all.pop('learning_rate').mean()\n metrics_sums = jax.tree_map(jnp.sum, metrics_all)\n denominator = metrics_sums.pop('denominator')\n summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop\n summary['learning_rate'] = lr\n # Calculate (clipped) perplexity after averaging log-perplexities:\n summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)\n logging.info('train in step: %d, loss: %.4f', step, summary['loss'])\n if jax.host_id() == 0:\n tock = time.time()\n steps_per_sec = eval_freq / (tock - tick)\n tick = tock\n train_summary_writer.scalar('steps per second', steps_per_sec, step)\n for key, val in summary.items():\n train_summary_writer.scalar(key, val, step)\n train_summary_writer.flush()\n # Reset metric accumulation for next evaluation cycle.\n metrics_all = []\n\n # Eval Metrics\n eval_metrics = []\n eval_iter = iter(eval_ds)\n if num_eval_steps == -1:\n num_iter = itertools.repeat(1)\n else:\n num_iter = range(num_eval_steps)\n for _, eval_batch in zip(num_iter, eval_iter):\n # pylint: disable=protected-access\n eval_batch = common_utils.shard(\n jax.tree_map(lambda x: x._numpy(), eval_batch))\n # pylint: enable=protected-access\n metrics = p_eval_step(optimizer.target, eval_batch)\n eval_metrics.append(metrics)\n eval_metrics = common_utils.get_metrics(eval_metrics)\n eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)\n eval_denominator = eval_metrics_sums.pop('denominator')\n eval_summary = jax.tree_map(\n lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop\n eval_metrics_sums)\n # Calculate (clipped) perplexity after averaging log-perplexities:\n eval_summary['perplexity'] = jnp.clip(\n jnp.exp(eval_summary['loss']), a_max=1.0e4)\n logging.info('eval in step: %d, loss: %.4f', step, eval_summary['loss'])\n if jax.host_id() == 0:\n for key, val in eval_summary.items():\n eval_summary_writer.scalar(key, val, step)\n eval_summary_writer.flush()\n\n # Fast inference of prompt extension using trained LM.\n rng, subrng = jax.random.split(rng)\n pred_rngs = random.split(subrng, jax.local_device_count())\n prompt = jnp.array(encoder.encode(FLAGS.prompt))\n prompt = jax_utils.replicate(prompt)\n prompt = jnp.reshape(prompt, (prompt.shape[0], 1, prompt.shape[1]))\n cache = jax_utils.replicate(\n cache_def.initialize_cache((1, FLAGS.max_predict_token_length)))\n predicted = p_pred_step(prompt, optimizer.target, cache, pred_rngs)\n predicted = tohost(predicted)\n exemplars = ''\n for n in range(predicted.shape[0]):\n exemplars += encoder.decode(predicted[n]) + '\\n\\n'\n if jax.host_id() == 0:\n eval_summary_writer.text('samples', exemplars, step)\n eval_summary_writer.flush()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\n# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities we could consider upstreaming to Jax.\n\"\"\"\n\nimport collections\nfrom collections.abc import Iterable # pylint: disable=g-importing-member\nimport warnings\n\nimport numpy as onp\n\nimport jax\nfrom jax.interpreters import partial_eval as pe\nfrom jax import linear_util as lu\nfrom jax import lax\nimport jax.numpy as jnp\nimport jax.lib.xla_bridge as xb\n\n_is_omnistaging = hasattr(pe, 'trace_to_jaxpr_dynamic')\n\n\ndef _replicate(x, devices=None):\n x = jax.numpy.asarray(x)\n if devices is None:\n # match the default device assignments used in pmap:\n # for single-host, that's the XLA default device assignment\n # for multi-host, it's the order of jax.local_devices()\n if jax.host_count() == 1:\n devices = [d for d in xb.get_backend().get_default_device_assignment(\n jax.device_count()) if d.host_id == jax.host_id()]\n else:\n devices = jax.local_devices()\n aval = jax.ShapedArray((len(devices),) + x.shape, x.dtype)\n buffers = [jax.interpreters.xla.device_put(x, device=d) for d in devices]\n return jax.pxla.ShardedDeviceArray(aval, buffers)\n\n\ndef replicate(tree, devices=None):\n \"\"\"Replicates arrays to multiple devices.\n\n Args:\n tree: a pytree containing the arrays that should be replicated.\n devices: the devices the data is replicated to\n (default: `jax.local_devices()`).\n Returns:\n A new pytree containing the replicated arrays.\n \"\"\"\n return jax.tree_map(lambda x: _replicate(x, devices), tree)\n\n\ndef unreplicate(tree):\n \"\"\"Returns a single instance of a replicated array.\"\"\"\n return jax.tree_map(lambda x: x[0], tree)\n\n\ndef pmean(xs, axis_name):\n warnings.warn('use jax.lax.pmean instead',\n DeprecationWarning)\n return lax.pmean(xs, axis_name)\n\n\ndef partial_eval_by_shape(fn, input_spec, *args, **kwargs):\n \"\"\"Lazily evaluate a function by using the shapes of the inputs.\n\n This function is similar to `jax.eval_shape` with the key difference that\n function outputs that can be computed without a concrete value of the\n inputs are returned as is instead of only the shape. See for example\n `module.init_by_shape` where this functionality is used to initialize a\n model without using input data lr computation.\n\n Args:\n fn: the function to be lazily evaluated.\n input_spec: an iterable of shapes or (shape, dtype) tuples specifying the\n shape and type of the inputs. If unspecified the dtype is float32.\n *args: other arguments passed to the module's apply function\n **kwargs: keyword arguments passed to the module's apply function\n Returns:\n A pair consisting of the model output and an instance of Model\n \"\"\"\n # output cannot be returned in lazy_create because jax.eval_shape will only\n # return the shape and dtype.\n # TODO(mattjj,jheek): use a public JAX API\n f = lambda *inputs: fn(*inputs, *args, **kwargs)\n input_structs = [_parse_spec(spec) for spec in input_spec]\n inputs_flat, in_tree = jax.tree_flatten(input_structs)\n f_flat, out_tree = jax.api_util.flatten_fun_nokwargs(lu.wrap_init(f), in_tree)\n in_pvals = [pe.PartialVal.unknown(jax.ShapedArray(x.shape, x.dtype))\n for x in inputs_flat]\n\n if _is_omnistaging:\n _, out_pvals, _ = pe.trace_to_jaxpr(f_flat, in_pvals)\n else:\n _, out_pvals, _ = pe.trace_to_jaxpr(f_flat, in_pvals, stage_out=True)\n out_flat = [const if pv is None else jax.ShapeDtypeStruct(pv.shape, pv.dtype)\n for pv, const in out_pvals]\n return jax.tree_unflatten(out_tree(), out_flat)\n\n\ndef _parse_spec(spec):\n \"\"\"Parse an input spec of the form (shape, dtype) or shape into a jax.ShapeDtypeStruct.\"\"\"\n spec = tuple(spec)\n if len(spec) == 2 and isinstance(spec[0], Iterable):\n return jax.ShapeDtypeStruct(tuple(spec[0]), spec[1])\n else:\n return jax.ShapeDtypeStruct(spec, jnp.float32)\n\n\ndef prefetch_to_device(iterator, size, devices=None):\n \"\"\"\"Shard and prefetch batches on device.\n\n This utility takes an iterator and returns a new iterator which fills an on\n device prefetch buffer. Eager prefetching can improve the performance of\n training loops significantly by overlapping compute and data transfer.\n\n Args:\n iterator: an iterator that yields a pytree of ndarrays where the first\n dimension is sharded across devices.\n size: the size of the prefetch buffer.\n devices: the list of devices to which the arrays should be prefetched.\n Yields:\n The original items from the iterator where each ndarray is now a sharded to\n the specified devices.\n \"\"\"\n queue = collections.deque()\n if devices is None:\n devices = jax.local_devices()\n def _prefetch(xs):\n aval = jax.xla.abstractify(xs)\n assert xs.shape[0] == len(devices), (\n \"The first dimension of the iterator's ndarrays is not \"\n \"equal to the number of devices.\")\n buffers = [jax.interpreters.xla.device_put(x, devices[i])\n for i, x in enumerate(xs)]\n return jax.pxla.ShardedDeviceArray(aval, buffers)\n try:\n while len(queue) < size:\n queue.append(jax.tree_map(_prefetch, next(iterator)))\n except StopIteration:\n pass\n\n while True:\n try:\n xs = queue.popleft()\n except IndexError:\n return\n try:\n queue.append(jax.tree_map(_prefetch, next(iterator)))\n except StopIteration:\n pass\n yield xs\n\n\ndef _scan_nd(body_fn, init, xs, n=1):\n \"\"\"Utility for performing an n-dimensional `lax.scan`.\n\n The n-d scan is simply recursive call of 1-d scan.\n Args:\n body_fn: the body of the loop of type (c, x) -> (c, y).\n init: initial value for the carry.\n xs: a pytree of tensors to scan over.\n n: number of dimensions to scan over (default: 1)\n Returns:\n A tuple of the final carry and the values returned by the body.\n \"\"\"\n if n == 1:\n return lax.scan(body_fn, init, xs)\n else:\n def scan_body(c, x):\n return _scan_nd(body_fn, c, x, n=n-1)\n return lax.scan(scan_body, init, xs)\n\n\ndef _invert_perm(perm):\n perm_inv = [0] * len(perm)\n for i, j in enumerate(perm):\n perm_inv[j] = i\n return tuple(perm_inv)\n\n\ndef scan_in_dim(body_fn, init, xs, axis=(0,), keepdims=False):\n \"\"\"utility for doing a scan along arbitrary dimensions.\n\n see `lax.scan` for details on how the scan operation works.\n Args:\n body_fn: the body of the loop of type (c, x) -> (c, y).\n init: initial value for the carry.\n xs: a pytree of tensors to scan over.\n axis: the axis to scan over.\n keepdims: keep the dimensions that are scanned over.\n Returns:\n A tuple of the final carry and the values returned by the body.\n \"\"\"\n if not isinstance(axis, Iterable):\n axis = (axis,)\n\n def transpose_in(x):\n perm = axis + tuple(onp.delete(onp.arange(x.ndim), axis))\n return x.transpose(perm)\n def transpose_out(x):\n perm = axis + tuple(onp.delete(onp.arange(x.ndim), axis))\n return x.transpose(_invert_perm(perm))\n\n def body_wrapper(c, xs):\n if keepdims:\n xs = jax.tree_map(lambda x: x.reshape((1,) * len(axis) + x.shape), xs)\n xs = jax.tree_map(transpose_out, xs)\n c, ys = body_fn(c, xs)\n if keepdims:\n ys = jax.tree_map(transpose_in, ys)\n ys = jax.tree_map(lambda x: x.reshape(x.shape[len(axis):]), ys)\n return c, ys\n\n xs = jax.tree_map(transpose_in, xs)\n c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis))\n ys = jax.tree_map(transpose_out, ys)\n return c, ys\n"
] | [
[
"tensorflow.compat.v2.enable_v2_behavior",
"numpy.array"
],
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
albertomancino/elliot | [
"339c6421b86646c7a5a1f5001b08a16550ed1d37"
] | [
"elliot/run.py"
] | [
"\"\"\"\nModule description:\n\n\"\"\"\n\n__version__ = '0.3.1'\n__author__ = 'Vito Walter Anelli, Claudio Pomo'\n__email__ = '[email protected], [email protected]'\n\nimport importlib\nimport sys\nfrom os import path\n\nimport numpy as np\nfrom hyperopt import Trials, fmin\n\nimport elliot.hyperoptimization as ho\nfrom elliot.namespace.namespace_model_builder import NameSpaceBuilder\nfrom elliot.result_handler.result_handler import ResultHandler, HyperParameterStudy, StatTest\nfrom elliot.utils import logging as logging_project\n\n_rstate = np.random.RandomState(42)\nhere = path.abspath(path.dirname(__file__))\n\nprint(u'''\n /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\/////////// \\\\////\\\\\\\\\\\\ \\\\////\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\\\\\ /\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/// /\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\/////// \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\\\\\ /\\\\\\\\\\\\///\\\\\\\\\\\\ \\\\////\\\\\\\\\\\\//// \n \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\\\\\ \\\\//\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\//\\\\\\\\\\\\ /\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\ \n \\\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\///\\\\\\\\\\\\\\\\\\\\/ \\\\//\\\\\\\\\\\\\\\\\\\\ \n \\\\/////////////// \\\\///////// \\\\///////// \\\\/// \\\\///// \\\\///// ''')\n\nprint(f'Version Number: {__version__}')\n\n\ndef run_experiment(config_path: str = ''):\n builder = NameSpaceBuilder(config_path, here, path.abspath(path.dirname(config_path)))\n base = builder.base\n config_test(builder, base)\n logging_project.init(base.base_namespace.path_logger_config, base.base_namespace.path_log_folder)\n logger = logging_project.get_logger(\"__main__\")\n\n if base.base_namespace.version != __version__:\n logger.error(f'Your config file use a different version of Elliot! '\n f'In different versions of Elliot the results may slightly change due to progressive improvement! '\n f'Some feature could be deprecated! Download latest version at this link '\n f'https://github.com/sisinflab/elliot/releases')\n raise Exception(\n 'Version mismatch! In different versions of Elliot the results may slightly change due to progressive improvement!')\n\n logger.info(\"Start experiment\")\n base.base_namespace.evaluation.relevance_threshold = getattr(base.base_namespace.evaluation, \"relevance_threshold\",\n 0)\n res_handler = ResultHandler(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n hyper_handler = HyperParameterStudy(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n dataloader_class = getattr(importlib.import_module(\"elliot.dataset\"), base.base_namespace.data_config.dataloader)\n dataloader = dataloader_class(config=base.base_namespace)\n data_test_list = dataloader.generate_dataobjects()\n for key, model_base in builder.models():\n test_results = []\n test_trials = []\n for test_fold_index, data_test in enumerate(data_test_list):\n logging_project.prepare_logger(key, base.base_namespace.path_log_folder)\n if key.startswith(\"external.\"):\n spec = importlib.util.spec_from_file_location(\"external\",\n path.relpath(base.base_namespace.external_models_path))\n external = importlib.util.module_from_spec(spec)\n sys.modules[spec.name] = external\n spec.loader.exec_module(external)\n model_class = getattr(importlib.import_module(\"external\"), key.split(\".\", 1)[1])\n else:\n model_class = getattr(importlib.import_module(\"elliot.recommender\"), key)\n\n model_placeholder = ho.ModelCoordinator(data_test, base.base_namespace, model_base, model_class,\n test_fold_index)\n if isinstance(model_base, tuple):\n logger.info(f\"Tuning begun for {model_class.__name__}\\\\n\")\n trials = Trials()\n fmin(model_placeholder.objective,\n space=model_base[1],\n algo=model_base[3],\n trials=trials,\n verbose=False,\n rstate=_rstate,\n max_evals=model_base[2])\n\n # argmin relativo alla combinazione migliore di iperparametri\n min_val = np.argmin([i[\"result\"][\"loss\"] for i in trials._trials])\n ############################################\n best_model_loss = trials._trials[min_val][\"result\"][\"loss\"]\n best_model_params = trials._trials[min_val][\"result\"][\"params\"]\n best_model_results = trials._trials[min_val][\"result\"][\"test_results\"]\n ############################################\n\n # aggiunta a lista performance test\n test_results.append(trials._trials[min_val][\"result\"])\n test_trials.append(trials)\n logger.info(f\"Tuning ended for {model_class.__name__}\")\n else:\n logger.info(f\"Training begun for {model_class.__name__}\\\\n\")\n single = model_placeholder.single()\n\n ############################################\n best_model_loss = single[\"loss\"]\n best_model_params = single[\"params\"]\n best_model_results = single[\"test_results\"]\n ############################################\n\n # aggiunta a lista performance test\n test_results.append(single)\n logger.info(f\"Training ended for {model_class.__name__}\")\n\n logger.info(f\"Loss:\\\\t{best_model_loss}\")\n logger.info(f\"Best Model params:\\\\t{best_model_params}\")\n logger.info(f\"Best Model results:\\\\t{best_model_results}\")\n\n # Migliore sui test, aggiunta a performance totali\n min_val = np.argmin([i[\"loss\"] for i in test_results])\n\n res_handler.add_oneshot_recommender(**test_results[min_val])\n\n if isinstance(model_base, tuple):\n hyper_handler.add_trials(test_trials[min_val])\n\n # res_handler.save_results(output=base.base_namespace.path_output_rec_performance)\n hyper_handler.save_trials(output=base.base_namespace.path_output_rec_performance)\n res_handler.save_best_results(output=base.base_namespace.path_output_rec_performance)\n cutoff_k = getattr(base.base_namespace.evaluation, \"cutoffs\", [base.base_namespace.top_k])\n cutoff_k = cutoff_k if isinstance(cutoff_k, list) else [cutoff_k]\n first_metric = base.base_namespace.evaluation.simple_metrics[\n 0] if base.base_namespace.evaluation.simple_metrics else \"\"\n res_handler.save_best_models(output=base.base_namespace.path_output_rec_performance, default_metric=first_metric,\n default_k=cutoff_k)\n if hasattr(base.base_namespace,\n \"print_results_as_triplets\") and base.base_namespace.print_results_as_triplets == True:\n res_handler.save_best_results_as_triplets(output=base.base_namespace.path_output_rec_performance)\n hyper_handler.save_trials_as_triplets(output=base.base_namespace.path_output_rec_performance)\n if hasattr(base.base_namespace.evaluation, \"paired_ttest\") and base.base_namespace.evaluation.paired_ttest:\n res_handler.save_best_statistical_results(stat_test=StatTest.PairedTTest,\n output=base.base_namespace.path_output_rec_performance)\n if hasattr(base.base_namespace.evaluation, \"wilcoxon_test\") and base.base_namespace.evaluation.wilcoxon_test:\n res_handler.save_best_statistical_results(stat_test=StatTest.WilcoxonTest,\n output=base.base_namespace.path_output_rec_performance)\n\n logger.info(\"End experiment\")\n\n\ndef _reset_verbose_option(model):\n if isinstance(model, tuple):\n model[0].meta.verbose = False\n model[0].meta.save_recs = False\n model[0].meta.save_weights = False\n else:\n model.meta.verbose = False\n model.meta.save_recs = False\n model.meta.save_weights = False\n return model\n\n\ndef config_test(builder, base):\n if base.base_namespace.config_test:\n logging_project.init(base.base_namespace.path_logger_config, base.base_namespace.path_log_folder)\n logger = logging_project.get_logger(\"__main__\")\n logger.info(\"Start config test\")\n base.base_namespace.evaluation.relevance_threshold = getattr(base.base_namespace.evaluation,\n \"relevance_threshold\", 0)\n res_handler = ResultHandler(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n hyper_handler = HyperParameterStudy(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n dataloader_class = getattr(importlib.import_module(\"elliot.dataset\"),\n base.base_namespace.data_config.dataloader)\n dataloader = dataloader_class(config=base.base_namespace)\n data_test_list = dataloader.generate_dataobjects_mock()\n for key, model_base in builder.models():\n test_results = []\n test_trials = []\n for data_test in data_test_list:\n if key.startswith(\"external.\"):\n spec = importlib.util.spec_from_file_location(\"external\",\n path.relpath(\n base.base_namespace.external_models_path))\n external = importlib.util.module_from_spec(spec)\n sys.modules[spec.name] = external\n spec.loader.exec_module(external)\n model_class = getattr(importlib.import_module(\"external\"), key.split(\".\", 1)[1])\n else:\n model_class = getattr(importlib.import_module(\"elliot.recommender\"), key)\n\n model_base_mock = model_base\n model_base_mock = _reset_verbose_option(model_base_mock)\n model_placeholder = ho.ModelCoordinator(data_test, base.base_namespace, model_base_mock, model_class)\n if isinstance(model_base, tuple):\n trials = Trials()\n fmin(model_placeholder.objective,\n space=model_base_mock[1],\n algo=model_base_mock[3],\n trials=trials,\n rstate=_rstate,\n max_evals=model_base_mock[2])\n\n min_val = np.argmin([i[\"result\"][\"loss\"] for i in trials._trials])\n\n test_results.append(trials._trials[min_val][\"result\"])\n test_trials.append(trials)\n else:\n single = model_placeholder.single()\n\n test_results.append(single)\n\n min_val = np.argmin([i[\"loss\"] for i in test_results])\n\n res_handler.add_oneshot_recommender(**test_results[min_val])\n\n if isinstance(model_base, tuple):\n hyper_handler.add_trials(test_trials[min_val])\n logger.info(\"End config test without issues\")\n base.base_namespace.config_test = False\n\n\nif __name__ == '__main__':\n run_experiment(\"./config/VBPR_amazon_baby.yml\")\n"
] | [
[
"numpy.random.RandomState",
"numpy.argmin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DigitalPhonetics/SpeechRepresentationFinetuning | [
"11d7130919888d0a27de61f5075e72f4a024673b"
] | [
"Combine/models.py"
] | [
"\"\"\"\nAdapt from:\nhttps://github.com/facebookresearch/barlowtwins/blob/main/main.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom transformers import Wav2Vec2Model\nfrom transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices\n\n\ndef off_diagonal(x):\n \"\"\"\n For the purpose of calculation:\n return flattened view of the off-diagonal elements of a square matrix\n \"\"\"\n n, m = x.shape\n # need to ensure it is matrix\n assert n == m\n return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()\n\n\nclass BarlowTwins(nn.Module):\n def __init__(self, output_size, lambd, batch_size, device):\n super().__init__()\n self.output_size = output_size\n self.lambd = lambd\n self.batch_size = batch_size\n self.device = device\n\n # linear layer as projector\n # self.linear_layer = nn.Sequential(nn.Linear(1024, 64))\n self.dropout = nn.Sequential(nn.Dropout(0.5))\n self.wav2vec_model = Wav2Vec2Model.from_pretrained(\"facebook/wav2vec2-base\")\n self.wav2vec_model.fc = nn.Identity()\n # We will try to use projector in the original paper\n # 3-layers projector\n proj_layers = []\n for layer in range(3):\n if layer == 0: # first layer\n proj_layers.append(nn.Linear(1024, self.output_size, bias=False))\n else:\n proj_layers.append(\n nn.Linear(self.output_size, self.output_size, bias=False)\n )\n if layer < 2: # if not the last layer\n proj_layers.append(nn.BatchNorm1d(self.output_size))\n proj_layers.append(nn.ReLU(inplace=True))\n self.projector = nn.Sequential(*proj_layers)\n self.bn = nn.BatchNorm1d(self.output_size, affine=False)\n\n def forward(self, input_1, input_2):\n # compute masked indices\n batch_size, raw_sequence_length = input_1.shape\n sequence_length = self.wav2vec_model._get_feat_extract_output_lengths(\n raw_sequence_length\n )\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length), mask_prob=0.2, mask_length=2\n )\n mask_time_indices = torch.from_numpy(mask_time_indices).to(self.device)\n\n # compute masked indices\n n = input_1.shape[0]\n # print(\"n: \\n\", n) # 32\n output_1 = self.wav2vec_model(\n input_1, mask_time_indices=mask_time_indices\n ).extract_features # [32, 2, 512]\n output_1 = output_1.reshape(n, -1) # [32, 1024]\n # TODO: try droupout\n output_1 = self.dropout(output_1)\n # print(\"output_1: \\n\", output_1.shape) # 32\n\n # TODO: (batch)normalization version of representation\n # output_1 = self.linear_layer(output_1) # [32, 64]\n output_1 = self.projector(output_1)\n\n output_2 = self.wav2vec_model(\n input_2, mask_time_indices=mask_time_indices\n ).extract_features\n # TODO: remove reshape perphas\n output_2 = output_2.reshape(n, -1)\n # output_2 = self.linear_layer(output_2)\n output_2 = self.projector(output_2)\n # TODO: try droupout\n output_2 = self.dropout(output_2)\n\n return output_1, output_2\n\n def loss(self, output_1, output_2):\n # empirical cross-correlation matrix\n c = self.bn(output_1).T @ self.bn(output_2) # [32, 64]\n\n # sum the cross-correlation matrix between all gpus\n c.div_(self.batch_size) # 32 is batch size\n # torch.distributed.all_reduce(c)\n\n on_diag = torch.diagonal(c).add_(-1).pow(2).sum()\n off_diag = off_diagonal(c).pow_(2).sum()\n loss_val = on_diag + self.lambd * off_diag\n return loss_val\n\n\nclass BarlowTwins_Contrastive(nn.Module):\n def __init__(\n self, output_size, lambd, triplet_margin, barlowtwins_lambd, batch_size, device\n ):\n super().__init__()\n self.output_size = output_size\n self.lambd = lambd\n self.barlowtwins_lambd = barlowtwins_lambd\n self.batch_size = batch_size\n self.device = device\n self.cosine_similarity = nn.CosineSimilarity()\n self.triplet_margin = triplet_margin\n\n # linear layer as projector\n # self.linear_layer = nn.Sequential(nn.Linear(1024, 64))\n self.dropout = nn.Sequential(nn.Dropout(0.5))\n self.wav2vec_model = Wav2Vec2Model.from_pretrained(\"facebook/wav2vec2-base\")\n # self.wav2vec_model.fc = nn.Identity()\n # 3-layers projector\n proj_layers = []\n for layer in range(3):\n if layer == 0: # first layer\n proj_layers.append(nn.Linear(1024, self.output_size, bias=False))\n else:\n proj_layers.append(\n nn.Linear(self.output_size, self.output_size, bias=False)\n )\n if layer < 2: # if not the last layer\n proj_layers.append(nn.BatchNorm1d(self.output_size))\n proj_layers.append(nn.ReLU(inplace=True))\n self.projector = nn.Sequential(*proj_layers)\n self.bn = nn.BatchNorm1d(self.output_size, affine=False)\n\n def forward(self, anchor, positive, negative):\n # compute masked indices\n n = anchor.shape[0]\n batch_size, raw_sequence_length = anchor.shape\n sequence_length = self.wav2vec_model._get_feat_extract_output_lengths(\n raw_sequence_length\n )\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length), mask_prob=0.2, mask_length=2\n )\n mask_time_indices = torch.from_numpy(mask_time_indices).to(self.device)\n\n anchor_out = self.wav2vec_model(\n anchor, mask_time_indices=mask_time_indices\n ).extract_features\n anchor_out = self.dropout(anchor_out)\n anchor_out = anchor_out.reshape(n, -1)\n anchor_out = self.projector(anchor_out)\n\n positive_out = self.wav2vec_model(\n positive, mask_time_indices=mask_time_indices\n ).extract_features\n positive_out = self.dropout(positive_out)\n positive_out = positive_out.reshape(n, -1)\n positive_out = self.projector(positive_out)\n\n negative_out = self.wav2vec_model(\n negative, mask_time_indices=mask_time_indices\n ).extract_features\n negative_out = self.dropout(negative_out)\n negative_out = negative_out.reshape(n, -1)\n negative_out = self.projector(negative_out)\n\n return anchor_out, positive_out, negative_out\n\n def barlowtwins_loss(self, anchor_out, positive_out):\n # empirical cross-correlation matrix\n c = self.bn(anchor_out).T @ self.bn(positive_out) # [32, 64]\n\n # sum the cross-correlation matrix between all gpus\n # TODO: use argueparser for batch size 32\n c.div_(self.batch_size) # 32 is batch size\n # torch.distributed.all_reduce(c)\n\n on_diag = torch.diagonal(c).add_(-1).pow(2).sum()\n off_diag = off_diagonal(c).pow_(2).sum()\n loss_val = on_diag + self.barlowtwins_lambd * off_diag\n return loss_val\n\n def triplet_loss(self, anchor_out, positive_out, negative_out, reduction=\"mean\"):\n positive_distance = 1 - self.cosine_similarity(anchor_out, positive_out)\n\n negative_distance = 1 - self.cosine_similarity(anchor_out, negative_out)\n\n losses = torch.max(\n positive_distance - negative_distance + self.triplet_margin,\n torch.full_like(positive_distance, 0),\n )\n if reduction == \"mean\":\n return torch.mean(losses)\n else:\n return torch.sum(losses)\n\n def combine_loss(self, barlowtwins_loss, triplet_loss):\n return barlowtwins_loss * self.lambd + triplet_loss\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.BatchNorm1d",
"torch.mean",
"torch.diagonal",
"torch.sum",
"torch.from_numpy",
"torch.nn.CosineSimilarity",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.full_like",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HawChang/PaddleHub | [
"9894fbb1dc8575ae1fa74f32a23cc1363467461b"
] | [
"hub_module/modules/image/text_recognition/chinese_text_detection_db_server/module.py"
] | [
"# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport ast\nimport math\nimport os\nimport time\n\nfrom paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor\nfrom paddlehub.common.logger import logger\nfrom paddlehub.module.module import moduleinfo, runnable, serving\nfrom PIL import Image\nimport base64\nimport cv2\nimport numpy as np\nimport paddle.fluid as fluid\nimport paddlehub as hub\n\n\ndef base64_to_cv2(b64str):\n data = base64.b64decode(b64str.encode('utf8'))\n data = np.fromstring(data, np.uint8)\n data = cv2.imdecode(data, cv2.IMREAD_COLOR)\n return data\n\n\n@moduleinfo(\n name=\"chinese_text_detection_db_server\",\n version=\"1.0.0\",\n summary=\n \"The module aims to detect chinese text position in the image, which is based on differentiable_binarization algorithm.\",\n author=\"paddle-dev\",\n author_email=\"[email protected]\",\n type=\"cv/text_recognition\")\nclass ChineseTextDetectionDBServer(hub.Module):\n def _initialize(self):\n \"\"\"\n initialize with the necessary elements\n \"\"\"\n self.pretrained_model_path = os.path.join(self.directory,\n 'ch_det_r50_vd_db')\n self._set_config()\n\n def check_requirements(self):\n try:\n import shapely, pyclipper\n except:\n print(\n 'This module requires the shapely, pyclipper tools. The running enviroment does not meet the requirments. Please install the two packages.'\n )\n exit()\n\n def _set_config(self):\n \"\"\"\n predictor config setting\n \"\"\"\n model_file_path = os.path.join(self.pretrained_model_path, 'model')\n params_file_path = os.path.join(self.pretrained_model_path, 'params')\n\n config = AnalysisConfig(model_file_path, params_file_path)\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n use_gpu = True\n except:\n use_gpu = False\n\n if use_gpu:\n config.enable_use_gpu(8000, 0)\n else:\n config.disable_gpu()\n\n config.disable_glog_info()\n\n # use zero copy\n config.delete_pass(\"conv_transpose_eltwiseadd_bn_fuse_pass\")\n config.switch_use_feed_fetch_ops(False)\n self.predictor = create_paddle_predictor(config)\n input_names = self.predictor.get_input_names()\n self.input_tensor = self.predictor.get_input_tensor(input_names[0])\n output_names = self.predictor.get_output_names()\n self.output_tensors = []\n for output_name in output_names:\n output_tensor = self.predictor.get_output_tensor(output_name)\n self.output_tensors.append(output_tensor)\n\n def read_images(self, paths=[]):\n images = []\n for img_path in paths:\n assert os.path.isfile(\n img_path), \"The {} isn't a valid file.\".format(img_path)\n img = cv2.imread(img_path)\n if img is None:\n logger.info(\"error in loading image:{}\".format(img_path))\n continue\n images.append(img)\n return images\n\n def filter_tag_det_res(self, dt_boxes, image_shape):\n img_height, img_width = image_shape[0:2]\n dt_boxes_new = []\n for box in dt_boxes:\n box = self.order_points_clockwise(box)\n left = int(np.min(box[:, 0]))\n right = int(np.max(box[:, 0]))\n top = int(np.min(box[:, 1]))\n bottom = int(np.max(box[:, 1]))\n bbox_height = bottom - top\n bbox_width = right - left\n diffh = math.fabs(box[0, 1] - box[1, 1])\n diffw = math.fabs(box[0, 0] - box[3, 0])\n rect_width = int(np.linalg.norm(box[0] - box[1]))\n rect_height = int(np.linalg.norm(box[0] - box[3]))\n if rect_width <= 10 or rect_height <= 10:\n continue\n dt_boxes_new.append(box)\n dt_boxes = np.array(dt_boxes_new)\n return dt_boxes\n\n def order_points_clockwise(self, pts):\n \"\"\"\n reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py\n # sort the points based on their x-coordinates\n \"\"\"\n xSorted = pts[np.argsort(pts[:, 0]), :]\n\n # grab the left-most and right-most points from the sorted\n # x-roodinate points\n leftMost = xSorted[:2, :]\n rightMost = xSorted[2:, :]\n\n # now, sort the left-most coordinates according to their\n # y-coordinates so we can grab the top-left and bottom-left\n # points, respectively\n leftMost = leftMost[np.argsort(leftMost[:, 1]), :]\n (tl, bl) = leftMost\n\n rightMost = rightMost[np.argsort(rightMost[:, 1]), :]\n (tr, br) = rightMost\n\n rect = np.array([tl, tr, br, bl], dtype=\"float32\")\n return rect\n\n def detect_text(self,\n images=[],\n paths=[],\n use_gpu=False,\n output_dir='detection_result',\n visualization=False,\n box_thresh=0.5):\n \"\"\"\n Get the text box in the predicted images.\n Args:\n images (list(numpy.ndarray)): images data, shape of each is [H, W, C]. If images not paths\n paths (list[str]): The paths of images. If paths not images\n use_gpu (bool): Whether to use gpu. Default false.\n output_dir (str): The directory to store output images.\n visualization (bool): Whether to save image or not.\n box_thresh(float): the threshold of the detected text box's confidence\n Returns:\n res (list): The result of text detection box and save path of images.\n \"\"\"\n self.check_requirements()\n\n from chinese_text_detection_db_server.processor import DBPreProcess, DBPostProcess, draw_boxes, get_image_ext\n\n if use_gpu:\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n except:\n raise RuntimeError(\n \"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES via export CUDA_VISIBLE_DEVICES=cuda_device_id.\"\n )\n\n if images != [] and isinstance(images, list) and paths == []:\n predicted_data = images\n elif images == [] and isinstance(paths, list) and paths != []:\n predicted_data = self.read_images(paths)\n else:\n raise TypeError(\"The input data is inconsistent with expectations.\")\n\n assert predicted_data != [], \"There is not any image to be predicted. Please check the input data.\"\n\n preprocessor = DBPreProcess()\n postprocessor = DBPostProcess(box_thresh)\n\n all_imgs = []\n all_ratios = []\n all_results = []\n for original_image in predicted_data:\n im, ratio_list = preprocessor(original_image)\n res = {'save_path': ''}\n if im is None:\n res['data'] = []\n\n else:\n im = im.copy()\n starttime = time.time()\n self.input_tensor.copy_from_cpu(im)\n self.predictor.zero_copy_run()\n data_out = self.output_tensors[0].copy_to_cpu()\n dt_boxes_list = postprocessor(data_out, [ratio_list])\n boxes = self.filter_tag_det_res(dt_boxes_list[0],\n original_image.shape)\n res['data'] = boxes.astype(np.int).tolist()\n\n all_imgs.append(im)\n all_ratios.append(ratio_list)\n if visualization:\n img = Image.fromarray(\n cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))\n draw_img = draw_boxes(img, boxes)\n draw_img = np.array(draw_img)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n ext = get_image_ext(original_image)\n saved_name = 'ndarray_{}{}'.format(time.time(), ext)\n cv2.imwrite(\n os.path.join(output_dir, saved_name),\n draw_img[:, :, ::-1])\n res['save_path'] = os.path.join(output_dir, saved_name)\n\n all_results.append(res)\n\n return all_results\n\n def save_inference_model(self,\n dirname,\n model_filename=None,\n params_filename=None,\n combined=True):\n if combined:\n model_filename = \"__model__\" if not model_filename else model_filename\n params_filename = \"__params__\" if not params_filename else params_filename\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n model_file_path = os.path.join(self.pretrained_model_path, 'model')\n params_file_path = os.path.join(self.pretrained_model_path, 'params')\n program, feeded_var_names, target_vars = fluid.io.load_inference_model(\n dirname=self.pretrained_model_path,\n model_filename=model_file_path,\n params_filename=params_file_path,\n executor=exe)\n\n fluid.io.save_inference_model(\n dirname=dirname,\n main_program=program,\n executor=exe,\n feeded_var_names=feeded_var_names,\n target_vars=target_vars,\n model_filename=model_filename,\n params_filename=params_filename)\n\n @serving\n def serving_method(self, images, **kwargs):\n \"\"\"\n Run as a service.\n \"\"\"\n images_decode = [base64_to_cv2(image) for image in images]\n results = self.detect_text(images=images_decode, **kwargs)\n return results\n\n @runnable\n def run_cmd(self, argvs):\n \"\"\"\n Run as a command\n \"\"\"\n self.parser = argparse.ArgumentParser(\n description=\"Run the %s module.\" % self.name,\n prog='hub run %s' % self.name,\n usage='%(prog)s',\n add_help=True)\n\n self.arg_input_group = self.parser.add_argument_group(\n title=\"Input options\", description=\"Input data. Required\")\n self.arg_config_group = self.parser.add_argument_group(\n title=\"Config options\",\n description=\n \"Run configuration for controlling module behavior, not required.\")\n\n self.add_module_config_arg()\n self.add_module_input_arg()\n\n args = self.parser.parse_args(argvs)\n results = self.detect_text(\n paths=[args.input_path],\n use_gpu=args.use_gpu,\n output_dir=args.output_dir,\n visualization=args.visualization)\n return results\n\n def add_module_config_arg(self):\n \"\"\"\n Add the command config options\n \"\"\"\n self.arg_config_group.add_argument(\n '--use_gpu',\n type=ast.literal_eval,\n default=False,\n help=\"whether use GPU or not\")\n self.arg_config_group.add_argument(\n '--output_dir',\n type=str,\n default='detection_result',\n help=\"The directory to save output images.\")\n self.arg_config_group.add_argument(\n '--visualization',\n type=ast.literal_eval,\n default=False,\n help=\"whether to save output as images.\")\n\n def add_module_input_arg(self):\n \"\"\"\n Add the command input options\n \"\"\"\n self.arg_input_group.add_argument(\n '--input_path', type=str, default=None, help=\"diretory to image\")\n\n\nif __name__ == '__main__':\n db = ChineseTextDetectionDBServer()\n image_path = [\n '/mnt/zhangxuefei/PaddleOCR/doc/imgs/11.jpg',\n '/mnt/zhangxuefei/PaddleOCR/doc/imgs/12.jpg'\n ]\n res = db.detect_text(paths=image_path, visualization=True)\n db.save_inference_model('save')\n print(res)\n"
] | [
[
"numpy.min",
"numpy.linalg.norm",
"numpy.max",
"numpy.fromstring",
"numpy.argsort",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SoulaimenTheGreat/Wine-Prediction | [
"0e692ee430c09f90e7b49a3d3fb6450b6e25b663"
] | [
"src/data/fetch_database.py"
] | [
"# import motor.motor_asyncio\nfrom pymongo import MongoClient\nfrom dotenv import load_dotenv\nimport os\nimport pandas as pd\nfrom dagster import solid\n\n\ndef load_env_variables():\n \"\"\"\n Function to load environment variables from .env file\n :return: database password and database name\n \"\"\"\n load_dotenv()\n database_password = os.environ.get('PASSWORD')\n database_name = os.environ.get('DATABASE')\n return database_password, database_name\n\n\ndef configure_database_collection(collection_name: str):\n \"\"\"\n Configure the database connection, database and collection by passing the collection name\n :return: the collection\n \"\"\"\n # load database password and name from environment variables\n database_password, database_name = load_env_variables()\n MONGO_DETAILS = \"mongodb+srv://admin:\" + database_password + \"@wineestimations.ycvrd.mongodb.net/\" + database_name + \\\n \"?retryWrites=true \"\n client = MongoClient(MONGO_DETAILS)\n database = client[database_name]\n collection = database.get_collection(collection_name)\n return collection\n\n\n# def estimation_helper(estimation) -> dict:\n# return {\n# \"id\": str(estimation[\"_id\"]),\n# \"wineName\": estimation[\"wineName\"],\n# \"designation\": estimation[\"designation\"],\n# \"vineyard\": estimation[\"vineyard\"],\n# \"cuvee\": estimation[\"cuvee\"],\n# \"bottleType\": estimation[\"bottleType\"],\n# \"color\": estimation[\"color\"],\n# \"vintage\": estimation[\"vintage\"],\n# \"wineSearcherMin\": estimation[\"wineSearcherMin\"],\n# \"wineSearcherMax\": estimation[\"wineSearcherMax\"],\n# \"idealWinePrice\": estimation[\"idealWinePrice\"],\n# \"correctedMin\": estimation[\"correctedMin\"],\n# \"correctedMax\": estimation[\"correctedMax\"],\n# \"weightedMin\": estimation[\"weightedMin\"],\n# \"weightedMax\": estimation[\"weightedMax\"],\n# \"wineLevel\": estimation[\"wineLevel\"],\n# \"label\": estimation[\"label\"],\n# \"cap\": estimation[\"cap\"],\n# \"limpidity\": estimation[\"limpidity\"],\n# \"date\": estimation[\"date\"],\n# }\n\n@solid\ndef retrieve_filtered_estimations(collection_name: str, condition: dict):\n \"\"\"\n Retrieve records from mongo database by passing collection name and condition for filtering\n :return: list of retrieved records\n\n example: collection_name:'estimations_collection', condition:{\"wineLevel\": 1, \"label\": 1, \"cap\": 1, \"limpidity\": 1}\n \"\"\"\n collection = configure_database_collection(collection_name)\n filtered_estimations = []\n for estimation in collection.find(condition):\n filtered_estimations.append(estimation)\n return filtered_estimations\n\n\n@solid\ndef convert_to_csv(collection_name: str, condition: dict, filename: str):\n \"\"\"\n Convert the retrieved data from the database to csv format by passing collection name, condition, and filename in\n order to save it in data/raw as a centralised directory for data\n \"\"\"\n records = retrieve_filtered_estimations(collection_name, condition)\n records_df = pd.DataFrame.from_records(records)\n records_df.to_csv(path_or_buf=\"../../data/raw/\" + filename + \".csv\",\n index=False)\n\n\n# convert_to_csv(\"estimations_collection\", {\"wineLevel\": 1, \"label\": 1, \"cap\": 1, \"limpidity\": 1}, \"wine_estimations\")\nconvert_to_csv(\"add_weight_collection\", {\"updatedWeight\": True, \"caps_score\": 1, \"label_score\": 1, \"limpidity_score\": 1,\n \"wineLevel_score\": 1}, \"weighted_wine_estimations\")\n"
] | [
[
"pandas.DataFrame.from_records"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
josephedradan/algorithms | [
"6caa107b0df245653eab81143ebf0d9c7e5515fb"
] | [
"algorithms/miniumum_edit_distance.py"
] | [
"\"\"\"\nCreated by Joseph Edradan\nGithub: https://github.com/josephedradan\n\nDate created: 2/15/2021\n\nPurpose:\n\nDetails:\n\nDescription:\n\nNotes:\n\nIMPORTANT NOTES:\n\nExplanation:\n\nReference:\n Minimum Edit Distance Algorithm in Python in 2020 (EXPLAINED)\n Notes:\n Using Rylan Fowers' minimum edit distance algo\n\n Reference:\n https://www.youtube.com/watch?v=AY2DZ4a9gyk\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\ndef min_edit_distance_fowers(source, target):\n list_source_char = [c for c in source]\n list_target_char = [c for c in target]\n\n # Make np_array\n np_array = np.zeros((len(source), len(target)))\n\n # Target is 0th row\n np_array[0] = [c for c in range(len(target))]\n\n # Source is the 0th col ([index_row_all, index_column])\n np_array[:, 0] = [c for c in range(len(source))]\n\n \"\"\"\n Solve the [1,1] location if necessary\n \n If the char at index 1 of both target and source are different the amount of edits needed to achieve the target\n string is the min of the top and left indices values around the [1,1]\n \n \"\"\"\n\n try:\n # if target[1] != source[1]:\n # np_array[1, 1] = 2\n\n # For item index in row, start on index 1 (Target is the row)\n for i in range(1, len(target)):\n\n # for item index in col, start on index 1 (Source is the column)\n for j in range(1, len(source)):\n\n # If the respective chars from i and j for the source and target are NOT the same\n if target[i] != source[j]:\n\n \"\"\"\n Change the value at the given position given i and j\n \n Note that i and j are switched \n \n \"\"\"\n np_array[j, i] = min(np_array[j - 1, i], np_array[j, i - 1]) + 1\n\n # If the respective chars from i and j for the source and target are the same\n else:\n np_array[j, i] = np_array[j - 1, i - 1]\n\n except Exception as e:\n print(e)\n print(\"S:{:<20} T:{:<20} j{:<5} i:{:<5} \".format(source, target, j, i))\n\n # Make pandas DF of the np array\n data_frame = pd.DataFrame(np_array, columns=list_target_char, index=list_source_char)\n\n return np_array, data_frame, np_array[-1, -1]\n\n\nif __name__ == '__main__':\n print(min_edit_distance_fowers(\"joseph\", \"edradan\")[0])\n print(min_edit_distance_fowers(\"joseph\", \"edradan\")[1])\n print(min_edit_distance_fowers(\"joseph\", \"edradan\")[2])\n print()\n\n print(min_edit_distance_fowers(\"#joseph\", \"#joe\")[0])\n print(min_edit_distance_fowers(\"#joseph\", \"#joe\")[1])\n print(min_edit_distance_fowers(\"#joseph\", \"#joe\")[2])\n print()\n\n print(min_edit_distance_fowers(\"$BCDE\", \"#DE\")[0])\n print(min_edit_distance_fowers(\"$BCDE\", \"#DE\")[1])\n print(min_edit_distance_fowers(\"$BCDE\", \"#DE\")[2])\n print()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MahdiSajedei/Searching-for-activation-functions | [
"031ef131df7fe84fa2cafadc946b5a33df8925ec"
] | [
"src/utils.py"
] | [
"import os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\n\ndef path_exists(path, overwrite=False):\n if not os.path.isdir(path):\n os.mkdir(path)\n elif overwrite == True :\n shutil.rmtree(path)\n return path\n\ndef remove_dir(path):\n os.rmdir(path)\n return True\n\ndef relu_init(shape, dtype=tf.float32, partition_info=None):\n init_range = np.sprt(2.0 / shape[1])\n return tf.random_normal(shape, dtype=dtype) * init_range\n\ndef ones(shape, dtype=tf.float32):\n return tf.ones(shape, dtype=dtype)\n\ndef zeros(shape, dtype=tf.float32):\n return tf.zeros(shape, dtype=dtype)\n\ndef tanh_init(shape, dtype=tf.float32, partition_info=None):\n init_range = np.sqrt(6.0 / (shape[0] + shape[1]))\n return tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=dtype)\n\ndef leaky_relu(X, alpha=0.01):\n return tf.maximum(X, alpha * X)\n\ndef max(input):\n return tf.argmax(input)\n"
] | [
[
"numpy.sqrt",
"tensorflow.zeros",
"tensorflow.maximum",
"tensorflow.ones",
"numpy.sprt",
"tensorflow.argmax",
"tensorflow.random_uniform",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
gem763/crawly | [
"df41e5fc67a4e5092120a1bfe459d57e201849b8"
] | [
"newscrawler/record.py"
] | [
"import pandas as pd\nimport pandas_gbq as gbq\nimport json\nfrom google.oauth2 import service_account\nfrom IPython.core.debugger import set_trace\nfrom pathlib import Path\nimport time\nfrom . import accounts\n\n'''\nConfiguration\n'''\nproj = 'global-news-crawl'\ntable_downloaded = 'news_dataset.downloaded'\ntable_trashed = 'news_dataset.trashed'\ncredentials = service_account.Credentials.from_service_account_info(accounts.bigquery_account)\n#credentials = service_account.Credentials.from_service_account_file('global-news-crawl-c48d7cd9aa81.json')\n\nlocalpath_to_downloaded = 'newsdata/downloaded'\nlocalpath_to_trashed = 'newsdata/trashed'\n\n\nclass Recorder:\n def __init__(self, storage='local'):\n self.storage = storage\n self.ids = self._get_ids(storage)\n\n def _query_ids_from_bigquery(self, tb):\n qry = 'SELECT id FROM `{}`'.format(proj + '.' + tb)\n return gbq.read_gbq(qry, credentials=credentials).id\n\n def _retreive_ids_from_local(self, path):\n return [p.stem for p in Path(path).glob('**/*.json')]\n\n\n def _get_ids(self, storage):\n start = time.time()\n print('checking ' + storage + ' storage... ', end='')\n\n if storage == 'bigquery':\n ids_downloaded = self._query_ids_from_bigquery(table_downloaded)\n ids_trashed = self._query_ids_from_bigquery(table_trashed)\n\n elif storage == 'local':\n ids_downloaded = self._retreive_ids_from_local(localpath_to_downloaded)\n ids_trashed = self._retreive_ids_from_local(localpath_to_trashed)\n\n ids_downloaded_set = set(ids_downloaded)\n ids_trashed_set = set(ids_trashed)\n\n if len(ids_downloaded) != len(ids_downloaded_set):\n '''downloaded articles의 uniqueness'''\n raise self.DuplicatesInSingleTable('duplicated in downloaded')\n\n if len(ids_trashed) != len(ids_trashed_set):\n '''trashed articles의 uniqueness'''\n raise self.DuplicatesInSingleTable('duplicated in trashed')\n\n if len(ids_downloaded_set & ids_trashed_set) != 0:\n '''downloaded와 trashed 간의 uniqueness'''\n raise self.DuplicatesBetweenTwoTables('duplicated between downloaded and trashed')\n\n ids = ids_downloaded_set | ids_trashed_set\n\n print('done ({howlong:.2f} seconds)'.format(howlong=time.time()-start))\n print('we have total {} articles ({} downloaded, {} trashed)'.format(len(ids), len(ids_downloaded_set), len(ids_trashed_set)))\n return ids\n\n\n def has(self, id):\n return id in self.ids\n\n\n def update(self, downloaded=None, trashed=None, chunksize=1000, subdir_len=3):\n '''\n downloaded or trashed = {\n id0: {...},\n id1: {...},\n ...\n }\n '''\n if self.storage == 'bigquery':\n self._update_bigquery('downloaded', downloaded, chunksize)\n self._update_bigquery('trashed', trashed, chunksize)\n\n elif self.storage == 'local':\n self._update_local('downloaded', downloaded, subdir_len)\n self._update_local('trashed', trashed, subdir_len)\n\n\n def _update_local(self, newstype, newsdict, subdir_len):\n if newsdict is not None:\n if newstype == 'downloaded':\n path = localpath_to_downloaded\n elif newstype == 'trashed':\n path = localpath_to_trashed\n\n for id, article in newsdict.items():\n# '''\n# local storage의 경우,\n# downloaded는 downloaded 폴더에,\n# trashed는 trashed/id[:3] 폴더에 저장했다\n# 나중에 혹시 local에 저장할 일이 있다면, 저장방식을 통일하는 것이 좋겠다 (2019.10.31)\n# '''\n# if newstype == 'downloaded':\n# _dir = Path(path)\n# elif newstype == 'trashed':\n# _dir = Path(path + '/' + id[:subdir_len])\n\n _dir = Path(path + '/' + id[:subdir_len])\n _dir.mkdir(parents=True, exist_ok=True)\n fname = id + '.json'\n fpath = _dir / fname\n with fpath.open('w') as f:\n json.dump(article, f)\n\n\n def _update_bigquery(self, newstype, newsdict, chunksize):\n if newsdict is not None:\n if newstype == 'downloaded':\n tb = table_downloaded #+ '2'\n elif newstype == 'trashed':\n tb = table_trashed #+ '2'\n\n df = pd.DataFrame.from_dict(newsdict, orient='index')\n df.index.name = 'id'\n df = df.reset_index()\n gbq.to_gbq(df, tb, project_id=proj, if_exists='append', chunksize=chunksize, credentials=credentials, progress_bar=False)\n\n class DuplicatesInSingleTable(Exception):\n pass\n\n class DuplicatesBetweenTwoTables(Exception):\n pass\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sriniiyer/concode | [
"864e30807f6988731ac3b4b98af6562c18bb42ff"
] | [
"ConcodeDecoder.py"
] | [
"import torch\nfrom torch import nn\nfrom GlobalAttention import GlobalAttention\nfrom torch.autograd import Variable\nfrom Beam import TreeBeam\nfrom UtilClass import bottle, unbottle\nfrom preprocess import rhs, CDDataset\nfrom decoders import DecoderState, Prediction\nimport torch.nn.functional as F\n\nclass ConcodeDecoder(nn.Module):\n\n def __init__(self, vocabs, opt):\n super(ConcodeDecoder, self).__init__()\n\n self.opt = opt\n self.vocabs = vocabs\n\n self.nt_embedding = nn.Embedding(\n len(vocabs['nt']),\n opt.tgt_word_vec_size,\n padding_idx=vocabs['nt'].stoi['<blank>'])\n\n self.rule_embedding = nn.Embedding(\n len(vocabs['prev_rules']),\n opt.tgt_word_vec_size,\n padding_idx=vocabs['prev_rules'].stoi['<blank>'])\n\n self.attn = GlobalAttention(\n opt.decoder_rnn_size,\n attn_type='general',\n include_rnn=False)\n\n self.attn_linear = nn.Linear(self.opt.decoder_rnn_size * 3, self.opt.decoder_rnn_size)\n\n self.var_attn = GlobalAttention(\n opt.decoder_rnn_size,\n attn_type='general',\n include_rnn=False)\n\n if opt.copy_attn:\n self.copy_attn = GlobalAttention(\n opt.decoder_rnn_size,\n attn_type='general')\n\n self.decoder_rnn = nn.LSTM(\n input_size=opt.tgt_word_vec_size * 3 + opt.decoder_rnn_size, # nt and prev_rule\n hidden_size=opt.decoder_rnn_size,\n num_layers=opt.dec_layers,\n dropout=opt.dropout,\n batch_first=True)\n\n self.decoder_dropout = nn.Dropout(opt.dropout)\n\n def forward(self, batch, all_context, context_masks, decState):\n\n src_context = all_context[0]\n src_context_mask = context_masks[0]\n rest_context = torch.cat(all_context[1:], 1)\n rest_context_mask = torch.cat(context_masks[1:], 1)\n\n context = torch.cat(all_context, 1)\n context_lengths = torch.cat(context_masks, 1)\n\n # embed everything\n nt_embeddings = self.nt_embedding(Variable(batch['nt'].cuda(), requires_grad=False))\n rule_embeddings = self.rule_embedding(Variable(batch['prev_rules'].cuda(), requires_grad=False))\n parent_rule_embeddings = self.rule_embedding(Variable(batch['parent_rules'].cuda(), requires_grad=False))\n\n attn_outputs, attn_scores, copy_attn_scores = [], [], []\n # For each batch we have to maintain states\n\n batch_size = batch['nt'].size(0) # 1 for predict\n num_decodes = 0\n\n attn_outputs, attn_scores, copy_attn_scores = [], [], []\n for i, (nt, rule, parent_rule) in enumerate(zip(nt_embeddings.split(1, 1), rule_embeddings.split(1, 1), parent_rule_embeddings.split(1, 1))):\n # accumulate parent decoder states\n parent_states = []\n for j in range(0, batch_size):\n try: # this is needed coz the batch is of different sizes\n parent_states.append(batch['parent_states'][j][i]) # one state for every batch\n except:\n parent_states.append(batch['parent_states'][j][0]) # one state for every batch\n parent_states = torch.cat(parent_states, 0)\n\n rnn_output, prev_hidden = self.decoder_rnn(torch.cat((nt, rule, parent_rule, parent_states), 2), decState.hidden)\n num_decodes += 1\n rnn_output.contiguous()\n\n if self.opt.twostep:\n src_attn_output, src_attn_score = self.attn(rnn_output, src_context, src_context_mask)\n varmet_attn_output, varmet_attn_score = self.var_attn(src_attn_output, rest_context, rest_context_mask)\n\n attn_output = F.tanh(self.attn_linear(torch.cat((rnn_output, src_attn_output, varmet_attn_output), 2)))\n attn_scores.append(varmet_attn_score)\n copy_attn_scores.append(varmet_attn_score)\n else:\n attn_output, attn_score = self.attn(rnn_output, context, context_lengths)\n # attn_score is b x tgt_len x src_len, src should be removed from this\n attn_scores.append(attn_score[:, :, src_context.size(1):])\n copy_attn_scores.append(attn_score[:, :, src_context.size(1):])\n\n\n attn_output = self.decoder_dropout(attn_output)\n attn_outputs.append(attn_output)\n\n decState.update_state(prev_hidden, attn_output)\n\n # update all children\n for j, elem in enumerate(rnn_output.split(1, 0)):\n # children wont be there during prediction\n if 'children' in batch and i in batch['children'][j]: # rule i has children\n for child in batch['children'][j][i]:\n batch['parent_states'][j][child] = elem\n\n output = torch.cat(attn_outputs, 1)\n attn_scores = torch.cat(attn_scores, 1)\n copy_attn_scores = torch.cat(copy_attn_scores, 1) if self.opt.copy_attn else None\n\n return output, attn_scores, copy_attn_scores\n\n def predict(self, enc_hidden, context, context_lengths, batch, beam_size, max_code_length, generator, replace_unk, vis_params):\n\n # This decoder does not have input feeding. Parent state replces that\n decState = DecoderState(\n enc_hidden, #encoder hidden\n Variable(torch.zeros(1, 1, self.opt.decoder_rnn_size).cuda(), requires_grad=False) # parent state\n )\n # Repeat everything beam_size times.\n def rvar(a, beam_size):\n return Variable(a.repeat(beam_size, 1, 1), volatile=True)\n\n context = tuple(rvar(context[i].data, beam_size) for i in range(0, len(context)))\n context_lengths = tuple(context_lengths[i].repeat(beam_size, 1) for i in range(0, len(context_lengths)))\n\n decState.repeat_beam_size_times(beam_size)\n\n # Use only one beam\n beam = TreeBeam(beam_size, True, self.vocabs, self.opt.decoder_rnn_size)\n\n for count in range(0, max_code_length): # We will break when we have the required number of terminals\n # to be consistent with seq2seq\n\n if beam.done():\n break\n\n # Construct batch x beam_size nxt words.\n # Get all the pending current beam words and arrange for forward.\n # Uses the start symbol in the beginning\n inp = beam.getCurrentState() # Should return a batch of the frontier\n\n # Run one step., decState gets automatically updated\n output, attn, copy_attn = self.forward(inp, context, context_lengths, decState)\n src_map = torch.zeros(0, 0)\n if self.opt.var_names:\n src_map = torch.cat((src_map, batch['concode_src_map_vars']), 1)\n if self.opt.method_names:\n src_map = torch.cat((src_map, batch['concode_src_map_methods']), 1)\n\n scores = generator(bottle(output), bottle(copy_attn), src_map, inp) #generator needs the non-terminals\n\n out = generator.collapseCopyScores(unbottle(scores.data.clone(), beam_size), batch) # needs seq2seq from batch\n out = out.log()\n\n # beam x tgt_vocab\n\n beam.advance(out[:, 0], attn.data[:, 0], output)\n decState.beam_update(beam.getCurrentOrigin(), beam_size)\n\n pred_score_total = 0\n pred_words_total = 0\n\n score, times, k = beam.getFinal() # times is the length of the prediction\n hyp, att = beam.getHyp(times, k)\n goldNl = []\n if self.opt.var_names:\n goldNl += batch['concode_var'][0] # because batch = 1\n if self.opt.method_names:\n goldNl += batch['concode_method'][0] # because batch = 1\n\n goldCode = self.vocabs['code'].addStartOrEnd(batch['raw_code'][0])\n predSent, copied_tokens, replaced_tokens = self.buildTargetTokens(\n hyp,\n self.vocabs,\n goldNl,\n att,\n batch['concode_vocab'][0],\n replace_unk\n )\n predSent = ConcodeDecoder.rulesToCode(predSent)\n pred_score_total += score\n pred_words_total += len(predSent)\n\n return Prediction(goldNl, goldCode, predSent, att)\n\n @staticmethod\n def rulesToCode(rules):\n stack = []\n code = []\n for i in range(0, len(rules)):\n if not CDDataset._is_terminal_rule(rules[i]):\n stack.extend(rhs(rules[i]).split('___')[::-1])\n else:\n code.append(rhs(rules[i]))\n\n try:\n top = stack.pop()\n\n while not top[0].isupper():\n code.append(top)\n if len(stack) == 0:\n break\n top = stack.pop()\n except:\n pass\n\n return code\n\n def buildTargetTokens(self, pred, vocabs, src, attn, copy_vocab, replace_unk):\n vocab = vocabs['next_rules']\n tokens = []\n copied_tokens, replaced_tokens = [], []\n for tok in pred:\n if tok < len(vocab):\n tokens.append(vocab.itos[tok])\n else:\n tokens.append(\"IdentifierNT-->\" + copy_vocab.itos[tok - len(vocab)])\n copied_tokens.append(copy_vocab.itos[tok - len(vocab)])\n\n if replace_unk and attn is not None:\n for i in range(len(tokens)):\n if tokens[i] == '<unk>':\n _, maxIndex = attn[i].max(0)\n tokens[i] = \"IdentifierNT-->\" + src[maxIndex[0]]\n replaced_tokens.append(src[maxIndex[0]])\n\n return tokens, copied_tokens, replaced_tokens\n"
] | [
[
"torch.nn.Dropout",
"torch.zeros",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ngohaily/geopandas | [
"2725f346e430edb6a5164c21dd707de328329f31",
"2725f346e430edb6a5164c21dd707de328329f31"
] | [
"geopandas/tests/test_geom_methods.py",
"geopandas/geodataframe.py"
] | [
"from __future__ import absolute_import\n\nimport string\n\nimport numpy as np\nfrom pandas import Series, DataFrame, MultiIndex\nfrom shapely.geometry import (\n Point, LinearRing, LineString, Polygon, MultiPoint)\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.ops import unary_union\n\nfrom geopandas import GeoSeries, GeoDataFrame\nfrom geopandas.base import GeoPandasBase\n\nfrom geopandas.tests.util import (\n geom_equals, geom_almost_equals, assert_geoseries_equal)\n\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom pandas.util.testing import assert_series_equal, assert_frame_equal\n\n\ndef assert_array_dtype_equal(a, b, *args, **kwargs):\n a = np.asanyarray(a)\n b = np.asanyarray(b)\n assert a.dtype == b.dtype\n assert_array_equal(a, b, *args, **kwargs)\n\n\nclass TestGeomMethods:\n\n def setup_method(self):\n self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])\n self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])\n self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])\n self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.inner_sq = Polygon([(0.25, 0.25), (0.75, 0.25), (0.75, 0.75),\n (0.25, 0.75)])\n self.nested_squares = Polygon(self.sq.boundary,\n [self.inner_sq.boundary])\n self.p0 = Point(5, 5)\n self.p3d = Point(5, 5, 5)\n self.g0 = GeoSeries([self.t1, self.t2, self.sq, self.inner_sq,\n self.nested_squares, self.p0])\n self.g1 = GeoSeries([self.t1, self.sq])\n self.g2 = GeoSeries([self.sq, self.t1])\n self.g3 = GeoSeries([self.t1, self.t2])\n self.g3.crs = {'init': 'epsg:4326', 'no_defs': True}\n self.g4 = GeoSeries([self.t2, self.t1])\n self.g4.crs = {'init': 'epsg:4326', 'no_defs': True}\n self.g_3d = GeoSeries([self.p0, self.p3d])\n self.na = GeoSeries([self.t1, self.t2, Polygon()])\n self.na_none = GeoSeries([self.t1, None])\n self.a1 = self.g1.copy()\n self.a1.index = ['A', 'B']\n self.a2 = self.g2.copy()\n self.a2.index = ['B', 'C']\n self.esb = Point(-73.9847, 40.7484)\n self.sol = Point(-74.0446, 40.6893)\n self.landmarks = GeoSeries([self.esb, self.sol],\n crs={'init': 'epsg:4326', 'no_defs': True})\n self.l1 = LineString([(0, 0), (0, 1), (1, 1)])\n self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.g5 = GeoSeries([self.l1, self.l2])\n self.g6 = GeoSeries([self.p0, self.t3])\n self.empty = GeoSeries([])\n self.empty_poly = Polygon()\n\n # Crossed lines\n self.l3 = LineString([(0, 0), (1, 1)])\n self.l4 = LineString([(0, 1), (1, 0)])\n self.crossed_lines = GeoSeries([self.l3, self.l4])\n\n # Placeholder for testing, will just drop in different geometries\n # when needed\n self.gdf1 = GeoDataFrame({'geometry': self.g1,\n 'col0': [1.0, 2.0],\n 'col1': ['geo', 'pandas']})\n self.gdf2 = GeoDataFrame({'geometry': self.g1,\n 'col3': [4, 5],\n 'col4': ['rand', 'string']})\n\n def _test_unary_real(self, op, expected, a):\n \"\"\" Tests for 'area', 'length', 'is_valid', etc. \"\"\"\n fcmp = assert_series_equal\n self._test_unary(op, expected, a, fcmp)\n\n def _test_unary_topological(self, op, expected, a):\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert a.equals(b)\n self._test_unary(op, expected, a, fcmp)\n\n def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):\n \"\"\" Tests for 'intersection', 'union', 'symmetric_difference', etc. \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, True, right_df,\n *args, **kwargs)\n\n def _test_binary_real(self, op, expected, a, b, *args, **kwargs):\n fcmp = assert_series_equal\n self._binary_op_test(op, expected, a, b, fcmp, True, False,\n *args, **kwargs)\n\n def _test_binary_operator(self, op, expected, a, b):\n \"\"\"\n The operators only have GeoSeries on the left, but can have\n GeoSeries or GeoDataFrame on the right.\n\n \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, False, right_df)\n\n def _binary_op_test(self, op, expected, left, right, fcmp, left_df,\n right_df,\n *args, **kwargs):\n \"\"\"\n This is a helper to call a function on GeoSeries and GeoDataFrame\n arguments. For example, 'intersection' is a member of both GeoSeries\n and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs.\n This function has the ability to test all four combinations of input\n types.\n\n Parameters\n ----------\n\n expected : str\n The operation to be tested. e.g., 'intersection'\n left: GeoSeries\n right: GeoSeries\n fcmp: function\n Called with the result of the operation and expected. It should\n assert if the result is incorrect\n left_df: bool\n If the left input should also be called with a GeoDataFrame\n right_df: bool\n Indicates whether the right input should be called with a\n GeoDataFrame\n\n \"\"\"\n def _make_gdf(s):\n n = len(s)\n col1 = string.ascii_lowercase[:n]\n col2 = range(n)\n\n return GeoDataFrame({'geometry': s.values,\n 'col1': col1,\n 'col2': col2},\n index=s.index, crs=s.crs)\n\n # Test GeoSeries.op(GeoSeries)\n result = getattr(left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoSeries)\n gdf_left = _make_gdf(left)\n result = getattr(gdf_left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if right_df:\n # Test GeoSeries.op(GeoDataFrame)\n gdf_right = _make_gdf(right)\n result = getattr(left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoDataFrame)\n result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n def _test_unary(self, op, expected, a, fcmp):\n # GeoSeries, (GeoSeries or geometry)\n result = getattr(a, op)\n fcmp(result, expected)\n\n # GeoDataFrame, (GeoSeries or geometry)\n gdf = self.gdf1.set_geometry(a)\n result = getattr(gdf, op)\n fcmp(result, expected)\n\n def test_crs_warning(self):\n # operations on geometries should warn for different CRS\n no_crs_g3 = self.g3.copy()\n no_crs_g3.crs = None\n with pytest.warns(UserWarning):\n self._test_binary_topological('intersection', self.g3,\n self.g3, no_crs_g3)\n\n def test_intersection(self):\n self._test_binary_topological('intersection', self.t1,\n self.g1, self.g2)\n self._test_binary_topological('intersection', self.empty_poly,\n self.g1, self.empty)\n\n def test_union_series(self):\n self._test_binary_topological('union', self.sq, self.g1, self.g2)\n\n def test_union_polygon(self):\n self._test_binary_topological('union', self.sq, self.g1, self.t2)\n\n def test_symmetric_difference_series(self):\n self._test_binary_topological('symmetric_difference', self.sq,\n self.g3, self.g4)\n\n def test_symmetric_difference_poly(self):\n expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)\n self._test_binary_topological('symmetric_difference', expected,\n self.g3, self.t1)\n\n def test_difference_series(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n self._test_binary_topological('difference', expected,\n self.g1, self.g2)\n\n def test_difference_poly(self):\n expected = GeoSeries([self.t1, self.t1])\n self._test_binary_topological('difference', expected,\n self.g1, self.t2)\n\n def test_geo_op_empty_result(self):\n l1 = LineString([(0, 0), (1, 1)])\n l2 = LineString([(2, 2), (3, 3)])\n expected = GeoSeries([GeometryCollection()])\n # binary geo resulting in empty geometry\n result = GeoSeries([l1]).intersection(l2)\n assert_geoseries_equal(result, expected)\n # binary geo empty result with right GeoSeries\n result = GeoSeries([l1]).intersection(GeoSeries([l2]))\n assert_geoseries_equal(result, expected)\n # unary geo resulting in emtpy geometry\n result = GeoSeries([GeometryCollection()]).convex_hull\n assert_geoseries_equal(result, expected)\n\n def test_boundary(self):\n l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])\n l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])\n expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)\n\n self._test_unary_topological('boundary', expected, self.g1)\n\n def test_area(self):\n expected = Series(np.array([0.5, 1.0]), index=self.g1.index)\n self._test_unary_real('area', expected, self.g1)\n\n expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)\n self._test_unary_real('area', expected, self.na_none)\n\n def test_bounds(self):\n # Set columns to get the order right\n expected = DataFrame({'minx': [0.0, 0.0], 'miny': [0.0, 0.0],\n 'maxx': [1.0, 1.0], 'maxy': [1.0, 1.0]},\n index=self.g1.index,\n columns=['minx', 'miny', 'maxx', 'maxy'])\n\n result = self.g1.bounds\n assert_frame_equal(expected, result)\n\n gdf = self.gdf1.set_geometry(self.g1)\n result = gdf.bounds\n assert_frame_equal(expected, result)\n\n def test_unary_union(self):\n p1 = self.t1\n p2 = Polygon([(2, 0), (3, 0), (3, 1)])\n expected = unary_union([p1, p2])\n g = GeoSeries([p1, p2])\n\n self._test_unary_topological('unary_union', expected, g)\n\n def test_contains(self):\n expected = [True, False, True, False, False, False]\n assert_array_dtype_equal(expected, self.g0.contains(self.t1))\n\n def test_length(self):\n expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)\n self._test_unary_real('length', expected, self.g1)\n\n expected = Series(\n np.array([2 + np.sqrt(2), np.nan]),\n index=self.na_none.index)\n self._test_unary_real('length', expected, self.na_none)\n\n def test_crosses(self):\n expected = [False, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.crosses(self.t1))\n\n expected = [False, True]\n assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))\n\n def test_disjoint(self):\n expected = [False, False, False, False, False, True]\n assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))\n\n def test_relate(self):\n expected = Series(['212101212',\n '212101212',\n '212FF1FF2',\n '2FFF1FFF2',\n 'FF2F112F2',\n 'FF0FFF212'],\n index=self.g0.index)\n assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))\n\n expected = Series(['FF0FFF212',\n None],\n index=self.g6.index)\n assert_array_dtype_equal(expected, self.g6.relate(self.na_none))\n\n def test_distance(self):\n expected = Series(np.array([np.sqrt((5 - 1)**2 + (5 - 1)**2), np.nan]),\n self.na_none.index)\n assert_array_dtype_equal(expected, self.na_none.distance(self.p0))\n\n expected = Series(np.array([np.sqrt(4**2 + 4**2), np.nan]),\n self.g6.index)\n assert_array_dtype_equal(expected, self.g6.distance(self.na_none))\n\n def test_intersects(self):\n expected = [True, True, True, True, True, False]\n assert_array_dtype_equal(expected, self.g0.intersects(self.t1))\n\n expected = [True, False]\n assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(expected, self.empty.intersects(self.t1))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(\n expected, self.empty.intersects(self.empty_poly))\n\n expected = [False] * 6\n assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))\n\n def test_overlaps(self):\n expected = [True, True, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))\n\n expected = [False, False]\n assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))\n\n def test_touches(self):\n expected = [False, True, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.touches(self.t1))\n\n def test_within(self):\n expected = [True, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.within(self.t1))\n\n expected = [True, True, True, True, True, False]\n assert_array_dtype_equal(expected, self.g0.within(self.sq))\n\n def test_is_valid(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_valid', expected, self.g1)\n\n def test_is_empty(self):\n expected = Series(np.array([False] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_empty', expected, self.g1)\n\n def test_is_ring(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_ring', expected, self.g1)\n\n def test_is_simple(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_simple', expected, self.g1)\n\n def test_has_z(self):\n expected = Series([False, True], self.g_3d.index)\n self._test_unary_real('has_z', expected, self.g_3d)\n\n def test_xy_points(self):\n expected_x = [-73.9847, -74.0446]\n expected_y = [40.7484, 40.6893]\n\n assert_array_dtype_equal(expected_x, self.landmarks.geometry.x)\n assert_array_dtype_equal(expected_y, self.landmarks.geometry.y)\n\n def test_xy_polygons(self):\n # accessing x attribute in polygon geoseries should raise an error\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.x\n # and same for accessing y attribute in polygon geoseries\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.y\n\n def test_centroid(self):\n polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])\n point = Point(0, 0)\n polygons = GeoSeries([polygon for i in range(3)])\n points = GeoSeries([point for i in range(3)])\n assert_geoseries_equal(polygons.centroid, points)\n\n def test_convex_hull(self):\n # the convex hull of a square should be the same as the square\n squares = GeoSeries([self.sq for i in range(3)])\n assert_geoseries_equal(squares, squares.convex_hull)\n\n def test_exterior(self):\n exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])\n for expected, computed in zip(exp_exterior, self.g3.exterior):\n assert computed.equals(expected)\n\n def test_interiors(self):\n original = GeoSeries([self.t1, self.nested_squares])\n\n # This is a polygon with no interior.\n expected = []\n assert original.interiors[0] == expected\n # This is a polygon with an interior.\n expected = LinearRing(self.inner_sq.boundary)\n assert original.interiors[1][0].equals(expected)\n\n def test_interpolate(self):\n expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])\n self._test_binary_topological('interpolate', expected, self.g5,\n 0.75, normalized=True)\n\n expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)])\n self._test_binary_topological('interpolate', expected, self.g5,\n 1.5)\n\n def test_interpolate_distance_array(self):\n expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)])\n self._test_binary_topological('interpolate', expected, self.g5,\n np.array([0.75, 1.5]))\n\n expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)])\n self._test_binary_topological('interpolate', expected, self.g5,\n np.array([0.75, 1.5]), normalized=True)\n\n def test_interpolate_distance_wrong_length(self):\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_interpolate_distance_wrong_index(self):\n distances = Series([1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_project(self):\n expected = Series([2.0, 1.5], index=self.g5.index)\n p = Point(1.0, 0.5)\n self._test_binary_real('project', expected, self.g5, p)\n\n expected = Series([1.0, 0.5], index=self.g5.index)\n self._test_binary_real('project', expected, self.g5, p,\n normalized=True)\n\n def test_translate_tuple(self):\n trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y\n assert self.landmarks.translate(*trans)[0].equals(self.sol)\n\n res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]\n assert res.equals(self.sol)\n\n def test_rotate(self):\n angle = 98\n expected = self.g4\n\n o = Point(0, 0)\n res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)\n assert geom_almost_equals(self.g4, res)\n\n res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))\n assert geom_almost_equals(expected, res.rotate(-angle, origin=o))\n\n def test_scale(self):\n expected = self.g4\n\n scale = 2., 1.\n inv = tuple(1./i for i in scale)\n\n o = Point(0, 0)\n res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)\n res = res.scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_skew(self):\n expected = self.g4\n\n skew = 45.\n o = Point(0, 0)\n\n # Test xs\n res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)\n res = res.skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n # Test ys\n res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)\n res = res.skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_buffer(self):\n original = GeoSeries([Point(0, 0)])\n expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5),\n (5, 0)))])\n calculated = original.buffer(5, resolution=1)\n assert geom_almost_equals(expected, calculated)\n\n def test_buffer_args(self):\n args = dict(cap_style=3, join_style=2, mitre_limit=2.5)\n calculated_series = self.g0.buffer(10, **args)\n for original, calculated in zip(self.g0, calculated_series):\n expected = original.buffer(10, **args)\n assert calculated.equals(expected)\n\n def test_buffer_distance_array(self):\n original = GeoSeries([self.p0, self.p0])\n expected = GeoSeries(\n [Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))),\n Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))),\n ])\n calculated = original.buffer(np.array([1, 5]), resolution=1)\n assert_geoseries_equal(calculated, expected, check_less_precise=True)\n\n def test_buffer_distance_wrong_length(self):\n original = GeoSeries([self.p0, self.p0])\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_buffer_distance_wrong_index(self):\n original = GeoSeries([self.p0, self.p0], index=[0, 1])\n distances = Series(data=[1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_envelope(self):\n e = self.g3.envelope\n assert np.all(e.geom_equals(self.sq))\n assert isinstance(e, GeoSeries)\n assert self.g3.crs == e.crs\n\n def test_total_bounds(self):\n bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y\n assert isinstance(self.landmarks.total_bounds, np.ndarray)\n assert tuple(self.landmarks.total_bounds) == bbox\n\n df = GeoDataFrame({'geometry': self.landmarks,\n 'col1': range(len(self.landmarks))})\n assert tuple(df.total_bounds) == bbox\n\n def test_explode_geoseries(self):\n s = GeoSeries([MultiPoint([(0, 0), (1, 1)]),\n MultiPoint([(2, 2), (3, 3), (4, 4)])])\n s.index.name = 'test_index_name'\n expected_index_name = ['test_index_name', None]\n index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]\n expected = GeoSeries([Point(0, 0), Point(1, 1), Point(2, 2),\n Point(3, 3), Point(4, 4)],\n index=MultiIndex.from_tuples(\n index, names=expected_index_name))\n assert_geoseries_equal(expected, s.explode())\n\n @pytest.mark.parametrize(\"index_name\", [None, 'test'])\n def test_explode_geodataframe(self, index_name):\n s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])\n df = GeoDataFrame({'col': [1, 2], 'geometry': s})\n df.index.name = index_name\n\n test_df = df.explode()\n\n expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])\n expected_df = GeoDataFrame({'col': [1, 1, 2], 'geometry': expected_s})\n expected_index = MultiIndex([[0, 1], [0, 1]], # levels\n [[0, 0, 1], [0, 1, 0]], # labels/codes\n names=[index_name, None])\n expected_df = expected_df.set_index(expected_index)\n assert_frame_equal(test_df, expected_df)\n\n #\n # Test '&', '|', '^', and '-'\n # The left can only be a GeoSeries. The right hand side can be a\n # GeoSeries, GeoDataFrame or Shapely geometry\n #\n def test_intersection_operator(self):\n self._test_binary_operator('__and__', self.t1, self.g1, self.g2)\n\n def test_union_operator(self):\n self._test_binary_operator('__or__', self.sq, self.g1, self.g2)\n\n def test_union_operator_polygon(self):\n self._test_binary_operator('__or__', self.sq, self.g1, self.t2)\n\n def test_symmetric_difference_operator(self):\n self._test_binary_operator('__xor__', self.sq, self.g3, self.g4)\n\n def test_difference_series2(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n self._test_binary_operator('__sub__', expected, self.g1, self.g2)\n\n def test_difference_poly2(self):\n expected = GeoSeries([self.t1, self.t1])\n self._test_binary_operator('__sub__', expected, self.g1, self.t2)\n",
"import json\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom shapely.geometry import mapping, shape, Point\nfrom shapely.geometry.base import BaseGeometry\nfrom six import string_types, PY3\n\nfrom geopandas.base import GeoPandasBase, _CoordinateIndexer\nfrom geopandas.geoseries import GeoSeries\nfrom geopandas.plotting import plot_dataframe\nimport geopandas.io\n\n\nDEFAULT_GEO_COLUMN_NAME = 'geometry'\n\n\nclass GeoDataFrame(GeoPandasBase, DataFrame):\n \"\"\"\n A GeoDataFrame object is a pandas.DataFrame that has a column\n with geometry. In addition to the standard DataFrame constructor arguments,\n GeoDataFrame also accepts the following keyword arguments:\n\n Parameters\n ----------\n crs : str (optional)\n Coordinate system\n geometry : str or array (optional)\n If str, column to use as geometry. If array, will be set as 'geometry'\n column on GeoDataFrame.\n \"\"\"\n\n # XXX: This will no longer be necessary in pandas 0.17\n _internal_names = ['_data', '_cacher', '_item_cache', '_cache',\n 'is_copy', '_subtyp', '_index',\n '_default_kind', '_default_fill_value', '_metadata',\n '__array_struct__', '__array_interface__']\n\n _metadata = ['crs', '_geometry_column_name']\n\n _geometry_column_name = DEFAULT_GEO_COLUMN_NAME\n\n def __init__(self, *args, **kwargs):\n crs = kwargs.pop('crs', None)\n geometry = kwargs.pop('geometry', None)\n super(GeoDataFrame, self).__init__(*args, **kwargs)\n self.crs = crs\n if geometry is not None:\n self.set_geometry(geometry, inplace=True)\n self._invalidate_sindex()\n\n # Serialize metadata (will no longer be necessary in pandas 0.17+)\n # See https://github.com/pydata/pandas/pull/10557\n def __getstate__(self):\n meta = dict((k, getattr(self, k, None)) for k in self._metadata)\n return dict(_data=self._data, _typ=self._typ,\n _metadata=self._metadata, **meta)\n\n def __setattr__(self, attr, val):\n # have to special case geometry b/c pandas tries to use as column...\n if attr == 'geometry':\n object.__setattr__(self, attr, val)\n else:\n super(GeoDataFrame, self).__setattr__(attr, val)\n\n def _get_geometry(self):\n if self._geometry_column_name not in self:\n raise AttributeError(\"No geometry data set yet (expected in\"\n \" column '%s'.\" % self._geometry_column_name)\n return self[self._geometry_column_name]\n\n def _set_geometry(self, col):\n # TODO: Use pandas' core.common.is_list_like() here.\n if not isinstance(col, (list, np.ndarray, Series)):\n raise ValueError(\"Must use a list-like to set the geometry\"\n \" property\")\n self.set_geometry(col, inplace=True)\n\n geometry = property(fget=_get_geometry, fset=_set_geometry,\n doc=\"Geometry data for GeoDataFrame\")\n\n def set_geometry(self, col, drop=False, inplace=False, crs=None):\n \"\"\"\n Set the GeoDataFrame geometry using either an existing column or\n the specified input. By default yields a new object.\n\n The original geometry column is replaced with the input.\n\n Parameters\n ----------\n col : column label or array\n drop : boolean, default True\n Delete column to be used as the new geometry\n inplace : boolean, default False\n Modify the GeoDataFrame in place (do not create a new object)\n crs : str/result of fion.get_crs (optional)\n Coordinate system to use. If passed, overrides both DataFrame and\n col's crs. Otherwise, tries to get crs from passed col values or\n DataFrame.\n\n Examples\n --------\n >>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])\n >>> df2 = df.set_geometry('geom1')\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n # Most of the code here is taken from DataFrame.set_index()\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n if not crs:\n crs = getattr(col, 'crs', self.crs)\n\n to_remove = None\n geo_column_name = self._geometry_column_name\n if isinstance(col, (Series, list, np.ndarray)):\n level = col\n elif hasattr(col, 'ndim') and col.ndim != 1:\n raise ValueError(\"Must pass array with one dimension only.\")\n else:\n try:\n level = frame[col].values\n except KeyError:\n raise ValueError(\"Unknown column %s\" % col)\n except:\n raise\n if drop:\n to_remove = col\n geo_column_name = self._geometry_column_name\n else:\n geo_column_name = col\n\n if to_remove:\n del frame[to_remove]\n\n if isinstance(level, GeoSeries) and level.crs != crs:\n # Avoids caching issues/crs sharing issues\n level = level.copy()\n level.crs = crs\n\n # Check that we are using a listlike of geometries\n if not all(isinstance(item, BaseGeometry) or pd.isnull(item) for item in level):\n raise TypeError(\"Input geometry column must contain valid geometry objects.\")\n frame[geo_column_name] = level\n frame._geometry_column_name = geo_column_name\n frame.crs = crs\n frame._invalidate_sindex()\n if not inplace:\n return frame\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"Alternate constructor to create a ``GeoDataFrame`` from a file.\n\n Can load a ``GeoDataFrame`` from a file in any format recognized by\n `fiona`. See http://fiona.readthedocs.io/en/latest/manual.html for details.\n\n Parameters\n ----------\n\n filename : str\n File path or file handle to read from. Depending on which kwargs\n are included, the content of filename may vary. See\n http://fiona.readthedocs.io/en/latest/README.html#usage for usage details.\n kwargs : key-word arguments\n These arguments are passed to fiona.open, and can be used to\n access multi-layer data, data stored within archives (zip files),\n etc.\n\n Examples\n --------\n\n >>> df = geopandas.GeoDataFrame.from_file('nybb.shp')\n \"\"\"\n return geopandas.io.file.read_file(filename, **kwargs)\n\n @classmethod\n def from_features(cls, features, crs=None, columns=None):\n \"\"\"\n Alternate constructor to create GeoDataFrame from an iterable of\n features or a feature collection.\n\n Parameters\n ----------\n features\n - Iterable of features, where each element must be a feature\n dictionary or implement the __geo_interface__.\n - Feature collection, where the 'features' key contains an\n iterable of features.\n - Object holding a feature collection that implements the\n ``__geo_interface__``.\n crs : str or dict (optional)\n Coordinate reference system to set on the resulting frame.\n columns : list of column names, optional\n Optionally specify the column names to include in the output frame.\n This does not overwrite the property names of the input, but can\n ensure a consistent output format.\n\n Returns\n -------\n GeoDataFrame\n\n Notes\n -----\n For more information about the ``__geo_interface__``, see\n https://gist.github.com/sgillies/2217756\n\n \"\"\"\n # Handle feature collections\n if hasattr(features, \"__geo_interface__\"):\n fs = features.__geo_interface__\n else:\n fs = features\n\n if isinstance(fs, dict) and fs.get('type') == 'FeatureCollection':\n features_lst = fs['features']\n else:\n features_lst = features\n\n rows = []\n for f in features_lst:\n if hasattr(f, \"__geo_interface__\"):\n f = f.__geo_interface__\n else:\n f = f\n\n d = {'geometry': shape(f['geometry']) if f['geometry'] else None}\n d.update(f['properties'])\n rows.append(d)\n df = GeoDataFrame(rows, columns=columns)\n df.crs = crs\n return df\n\n @classmethod\n def from_postgis(cls, sql, con, geom_col='geom', crs=None,\n index_col=None, coerce_float=True,\n parse_dates=None, params=None):\n \"\"\"\n Alternate constructor to create a ``GeoDataFrame`` from a sql query\n containing a geometry column in WKB representation.\n\n Parameters\n ----------\n sql : string\n con : DB connection object or SQLAlchemy engine\n geom_col : string, default 'geom'\n column name to convert to shapely geometries\n crs : optional\n Coordinate reference system to use for the returned GeoDataFrame\n index_col : string or list of strings, optional, default: None\n Column(s) to set as index(MultiIndex)\n coerce_float : boolean, default True\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets\n parse_dates : list or dict, default None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict\n corresponds to the keyword arguments of\n :func:`pandas.to_datetime`. Especially useful with databases\n without native Datetime support, such as SQLite.\n params : list, tuple or dict, optional, default None\n List of parameters to pass to execute method.\n\n Examples\n --------\n >>> sql = \"SELECT geom, highway FROM roads\"\n SpatiaLite\n >>> sql = \"SELECT ST_Binary(geom) AS geom, highway FROM roads\"\n >>> df = geopandas.GeoDataFrame.from_postgis(sql, con)\n \"\"\"\n\n df = geopandas.io.sql.read_postgis(\n sql, con, geom_col=geom_col, crs=crs,\n index_col=index_col, coerce_float=coerce_float,\n parse_dates=parse_dates, params=params)\n\n return df\n\n def to_json(self, na='null', show_bbox=False, **kwargs):\n \"\"\"\n Returns a GeoJSON representation of the ``GeoDataFrame`` as a string.\n\n Parameters\n ----------\n na : {'null', 'drop', 'keep'}, default 'null'\n Indicates how to output missing (NaN) values in the GeoDataFrame.\n See below.\n show_bbox : bool, optional, default: False\n Include bbox (bounds) in the geojson\n\n Notes\n -----\n The remaining *kwargs* are passed to json.dumps().\n\n Missing (NaN) values in the GeoDataFrame can be represented as follows:\n\n - ``null``: output the missing entries as JSON null.\n - ``drop``: remove the property from the feature. This applies to each\n feature individually so that features may have different properties.\n - ``keep``: output the missing entries as NaN.\n \"\"\"\n return json.dumps(self._to_geo(na=na, show_bbox=show_bbox), **kwargs)\n\n @property\n def __geo_interface__(self):\n \"\"\"Returns a ``GeoDataFrame`` as a python feature collection.\n\n Implements the `geo_interface`. The returned python data structure\n represents the ``GeoDataFrame`` as a GeoJSON-like\n ``FeatureCollection``.\n\n This differs from `_to_geo()` only in that it is a property with\n default args instead of a method\n \"\"\"\n return self._to_geo(na='null', show_bbox=True)\n\n def iterfeatures(self, na='null', show_bbox=False):\n \"\"\"\n Returns an iterator that yields feature dictionaries that comply with\n __geo_interface__\n\n Parameters\n ----------\n na : {'null', 'drop', 'keep'}, default 'null'\n Indicates how to output missing (NaN) values in the GeoDataFrame\n * null: ouput the missing entries as JSON null\n * drop: remove the property from the feature. This applies to\n each feature individually so that features may have\n different properties\n * keep: output the missing entries as NaN\n\n show_bbox : include bbox (bounds) in the geojson. default False\n \"\"\"\n if na not in ['null', 'drop', 'keep']:\n raise ValueError('Unknown na method {0}'.format(na))\n\n ids = np.array(self.index, copy=False)\n geometries = np.array(self[self._geometry_column_name], copy=False)\n\n properties_cols = self.columns.difference([self._geometry_column_name])\n\n if len(properties_cols) > 0:\n # convert to object to get python scalars.\n properties = self[properties_cols].astype(object).values\n if na == 'null':\n properties[pd.isnull(self[properties_cols]).values] = None\n\n for i, row in enumerate(properties):\n geom = geometries[i]\n\n if na == 'drop':\n properties_items = dict((k, v) for k, v\n in zip(properties_cols, row)\n if not pd.isnull(v))\n else:\n properties_items = dict((k, v) for k, v\n in zip(properties_cols, row))\n\n feature = {'id': str(ids[i]),\n 'type': 'Feature',\n 'properties': properties_items,\n 'geometry': mapping(geom) if geom else None}\n\n if show_bbox:\n feature['bbox'] = geom.bounds if geom else None\n yield feature\n\n else:\n for fid, geom in zip(ids, geometries):\n feature = {'id': str(fid),\n 'type': 'Feature',\n 'properties': {},\n 'geometry': mapping(geom) if geom else None}\n if show_bbox:\n feature['bbox'] = geom.bounds if geom else None\n yield feature\n\n def _to_geo(self, **kwargs):\n \"\"\"\n Returns a python feature collection (i.e. the geointerface)\n representation of the GeoDataFrame.\n\n \"\"\"\n geo = {'type': 'FeatureCollection',\n 'features': list(self.iterfeatures(**kwargs))}\n\n if kwargs.get('show_bbox', False):\n geo['bbox'] = tuple(self.total_bounds)\n\n return geo\n\n def to_file(self, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"Write the ``GeoDataFrame`` to a file.\n\n By default, an ESRI shapefile is written, but any OGR data source\n supported by Fiona can be written. A dictionary of supported OGR\n providers is available via:\n\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n filename : string\n File path or file handle to write to.\n driver : string, default: 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default: None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written.\n\n Notes\n -----\n The extra keyword arguments ``**kwargs`` are passed to fiona.open and\n can be used to write to multi-layer data, store data within archives\n (zip files), etc.\n \"\"\"\n from geopandas.io.file import to_file\n to_file(self, filename, driver, schema, **kwargs)\n\n def to_crs(self, crs=None, epsg=None, inplace=False):\n \"\"\"Transform geometries to a new coordinate reference system.\n\n Transform all geometries in a GeoSeries to a different coordinate\n reference system. The ``crs`` attribute on the current GeoSeries must\n be set. Either ``crs`` in string or dictionary form or an EPSG code\n may be specified for output.\n\n This method will transform all points in all objects. It has no notion\n or projecting entire geometries. All segments joining points are\n assumed to be lines in the current projection, not geodesics. Objects\n crossing the dateline (or other projection boundary) will have\n undesirable behavior.\n\n Parameters\n ----------\n crs : dict or str\n Output projection parameters as string or in dictionary form.\n epsg : int\n EPSG code specifying output projection.\n inplace : bool, optional, default: False\n Whether to return a new GeoDataFrame or do the transformation in\n place.\n \"\"\"\n if inplace:\n df = self\n else:\n df = self.copy()\n geom = df.geometry.to_crs(crs=crs, epsg=epsg)\n df.geometry = geom\n df.crs = geom.crs\n if not inplace:\n return df\n\n def __getitem__(self, key):\n \"\"\"\n If the result is a column containing only 'geometry', return a\n GeoSeries. If it's a DataFrame with a 'geometry' column, return a\n GeoDataFrame.\n \"\"\"\n result = super(GeoDataFrame, self).__getitem__(key)\n geo_col = self._geometry_column_name\n if isinstance(key, string_types) and key == geo_col:\n result.__class__ = GeoSeries\n result.crs = self.crs\n result._invalidate_sindex()\n elif isinstance(result, DataFrame) and geo_col in result:\n result.__class__ = GeoDataFrame\n result.crs = self.crs\n result._geometry_column_name = geo_col\n result._invalidate_sindex()\n elif isinstance(result, DataFrame) and geo_col not in result:\n result.__class__ = DataFrame\n return result\n\n #\n # Implement pandas methods\n #\n\n def merge(self, *args, **kwargs):\n result = DataFrame.merge(self, *args, **kwargs)\n geo_col = self._geometry_column_name\n if isinstance(result, DataFrame) and geo_col in result:\n result.__class__ = GeoDataFrame\n result.crs = self.crs\n result._geometry_column_name = geo_col\n result._invalidate_sindex()\n elif isinstance(result, DataFrame) and geo_col not in result:\n result.__class__ = DataFrame\n return result\n\n @property\n def _constructor(self):\n return GeoDataFrame\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\"propagate metadata from other to self \"\"\"\n # merge operation: using metadata of the left object\n if method == 'merge':\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == 'concat':\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n else:\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this GeoDataFrame object\n\n Parameters\n ----------\n deep : boolean, default True\n Make a deep copy, i.e. also copy data\n\n Returns\n -------\n copy : GeoDataFrame\n \"\"\"\n # FIXME: this will likely be unnecessary in pandas >= 0.13\n data = self._data\n if deep:\n data = data.copy()\n return GeoDataFrame(data).__finalize__(self)\n\n def plot(self, *args, **kwargs):\n \"\"\"Generate a plot of the geometries in the ``GeoDataFrame``.\n\n If the ``column`` parameter is given, colors plot according to values\n in that column, otherwise calls ``GeoSeries.plot()`` on the\n ``geometry`` column.\n\n Wraps the ``plot_dataframe()`` function, and documentation is copied\n from there.\n \"\"\"\n return plot_dataframe(self, *args, **kwargs)\n\n plot.__doc__ = plot_dataframe.__doc__\n\n\n def dissolve(self, by=None, aggfunc='first', as_index=True):\n \"\"\"\n Dissolve geometries within `groupby` into single observation.\n This is accomplished by applying the `unary_union` method\n to all geometries within a groupself.\n\n Observations associated with each `groupby` group will be aggregated\n using the `aggfunc`.\n\n Parameters\n ----------\n by : string, default None\n Column whose values define groups to be dissolved\n aggfunc : function or string, default \"first\"\n Aggregation function for manipulation of data associated\n with each group. Passed to pandas `groupby.agg` method.\n as_index : boolean, default True\n If true, groupby columns become index of result.\n\n Returns\n -------\n GeoDataFrame\n \"\"\"\n\n # Process non-spatial component\n data = self.drop(labels=self.geometry.name, axis=1)\n aggregated_data = data.groupby(by=by).agg(aggfunc)\n\n\n # Process spatial component\n def merge_geometries(block):\n merged_geom = block.unary_union\n return merged_geom\n\n g = self.groupby(by=by, group_keys=False)[self.geometry.name].agg(merge_geometries)\n\n # Aggregate\n aggregated_geometry = GeoDataFrame(g, geometry=self.geometry.name, crs=self.crs)\n # Recombine\n aggregated = aggregated_geometry.join(aggregated_data)\n\n # Reset if requested\n if not as_index:\n aggregated = aggregated.reset_index()\n\n return aggregated\n\n # overrides GeoPandasBase method\n def explode(self):\n \"\"\"\n Explode muti-part geometries into multiple single geometries.\n\n Each row containing a multi-part geometry will be split into\n multiple rows with single geometries, thereby increasing the vertical\n size of the GeoDataFrame.\n\n The index of the input geodataframe is no longer unique and is\n replaced with a multi-index (original index with additional level\n indicating the multiple geometries: a new zero-based index for each\n single part geometry per multi-part geometry).\n\n Returns\n -------\n GeoDataFrame\n Exploded geodataframe with each single geometry\n as a separate entry in the geodataframe.\n\n \"\"\"\n df_copy = self.copy()\n\n exploded_geom = df_copy.geometry.explode().reset_index(level=-1)\n exploded_index = exploded_geom.columns[0]\n\n df = pd.concat(\n [df_copy.drop(df_copy._geometry_column_name, axis=1),\n exploded_geom], axis=1)\n # reset to MultiIndex, otherwise df index is only first level of\n # exploded GeoSeries index.\n df.set_index(exploded_index, append=True, inplace=True)\n df.index.names = list(self.index.names) + [None]\n geo_df = df.set_geometry(self._geometry_column_name)\n return geo_df\n\n\ndef points_from_xy(x, y, z=None):\n \"\"\"\n Generate list of shapely Point geometries from x, y(, z) coordinates.\n\n Parameters\n ----------\n x, y, z : array\n\n Examples\n --------\n >>> geometry = geopandas.points_from_xy(x=[1, 0], y=[0, 1])\n >>> geometry = geopandas.points_from_xy(df['x'], df['y'], df['z'])\n >>> gdf = geopandas.GeoDataFrame(\n df, geometry=geopandas.points_from_xy(df['x'], df['y']))\n\n Returns\n -------\n list : list\n \"\"\"\n if not len(x) == len(y):\n raise ValueError(\"x and y arrays must be equal length.\")\n if z is not None:\n if not len(z) == len(x):\n raise ValueError(\"z array must be same length as x and y.\")\n geom = [Point(i, j, k) for i, j, k in zip(x, y, z)]\n else:\n geom = [Point(i, j) for i, j in zip(x, y)]\n return geom\n\n\ndef _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):\n if inplace:\n raise ValueError(\"Can't do inplace setting when converting from\"\n \" DataFrame to GeoDataFrame\")\n gf = GeoDataFrame(self)\n # this will copy so that BlockManager gets copied\n return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)\n\nif PY3:\n DataFrame.set_geometry = _dataframe_set_geometry\nelse:\n import types\n DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,\n DataFrame)\n\n\nGeoDataFrame._create_indexer('cx', _CoordinateIndexer)\n"
] | [
[
"pandas.Series",
"numpy.sqrt",
"pandas.MultiIndex",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"pandas.util.testing.assert_frame_equal",
"numpy.asanyarray",
"numpy.array"
],
[
"pandas.DataFrame.merge",
"numpy.array",
"pandas.isnull"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
terryjx/FunFact | [
"595c3b68bac7cc92c802969f207f060c1242a88b"
] | [
"funfact/legacy/rbf/_base.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom collections import namedtuple\nimport dill\nimport numpy as np\nimport pycuda.driver as cuda\nfrom funfact.cuda import context_manager, ManagedArray\n\n\nclass RBFExpansionBasePyCUDA:\n\n def __init__(self):\n context_manager.autoinit()\n\n @staticmethod\n def as_namedtuple(name, **kwargs):\n return namedtuple(name, list(kwargs.keys()))(*kwargs.values())\n\n @staticmethod\n def _as_cuda_array(arr, dtype=None, order=None):\n if (isinstance(arr, np.ndarray) and\n isinstance(arr.base, cuda.ManagedAllocation) and\n arr.dtype == dtype and\n ((order is None) or\n (order == 'C' and arr.flags.c_contiguous) or\n (order == 'F' and arr.flags.f_contiguous))):\n return arr\n else:\n return ManagedArray.copy(arr, dtype, order)\n\n @staticmethod\n def _zero_cuda_array(arr):\n assert isinstance(arr.base, cuda.ManagedAllocation)\n cuda.memset_d32(\n arr.base.get_device_pointer(),\n 0,\n arr.dtype.itemsize // 4 * np.prod(arr.shape).item()\n )\n\n def to_pickle(self, file):\n state = self.__dict__.copy()\n open(file, 'wb').write(dill.dumps(state))\n\n @classmethod\n def from_pickle(cls, file):\n fac = cls()\n for key, val in dill.loads(open(file, 'rb').read()).items():\n setattr(fac, key, val)\n return fac\n\n @property\n def config(self):\n return {\n key: self.__dict__[key] for key in self.__dict__\n if not key.startswith('_')\n }\n\n @property\n def report(self):\n return self._report\n\n @report.setter\n def report(self, report_dict):\n self._report = self.as_namedtuple('report', **report_dict)\n\n @property\n def optimum(self):\n return self._optimum\n\n class Model:\n '''An approximation of a dense matrix as a sum of RBF over distance\n matrices.\n '''\n\n def __init__(\n self, f, x, x_names=None\n ):\n for w in x:\n assert w.shape[-1] == x[0].shape[-1],\\\n \"Inconsisent component size.\"\n self.f = f\n self.x = x\n self.x_names = x_names\n\n def __repr__(self):\n xns = ', '.join(self.x_names)\n return f'<ensemble of {len(self)} RBF expansions [x_names = {xns}]>'\n\n def __len__(self):\n return len(self.x[-1])\n\n def __call__(\n self, runs=None, components=None, device=None\n ):\n x = self.x\n if components is not None:\n components = np.array(components)\n if components.ndim == 0:\n components = np.expand_dims(components, 0)\n x = [w[..., components, :] if w.ndim >= 2 else w for w in x]\n if runs is not None:\n x = [w[..., runs] for w in x]\n return self.f(*x)\n\n @property\n def funrank(self):\n return len(self.x[-2])\n\n def __getattr__(self, a):\n return self.x[self.x_names.index(a)]\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n"
] | [
[
"numpy.array",
"numpy.expand_dims",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
houzeyu2683/IRRHW | [
"c44298ad14c468eff36bc75ebc63abdc9ba24d55",
"c44298ad14c468eff36bc75ebc63abdc9ba24d55",
"c44298ad14c468eff36bc75ebc63abdc9ba24d55"
] | [
"HW/2/script/download.py",
"HW/4/data/document.py",
"HW/4/extension/skip/tabulation-skip.py"
] | [
"\n\n##\n## The packages.\nfrom selenium import webdriver\nimport pandas, os, tqdm, time\n\n\n##\n## The arguments.\n# keyword = [\"Covid-19\", \"Stroke\", \"Myocardial Infarction\", \"influenza\", \"asthma\", \"chest cavity\"]\nkeyword = [\"chest cavity\"]\nfor k in keyword:\n\n platform = \"pubmed\"\n site = \"https://pubmed.ncbi.nlm.nih.gov/\"\n number = 100\n folder = \"resource/csv/{}\".format(k)\n os.makedirs(folder) if not os.path.isdir(folder) else None\n\n\n ##\n ##\n option = webdriver.chrome.options.Options()\n option.binary_location = \"/usr/bin/google-chrome\"\n # option.add_argument('--no-sandbox')\n driver = webdriver.Chrome(options=option, executable_path='driver/chrome')\n page = range(1, number+1, 1)\n group = {\n \"link\":[],\n \"title\":[],\n \"abstract\":[],\n \"tag\":[],\n \"author\":[]\n }\n for p in page:\n \n try:\n\n driver.get(\"{}?term={}&filter=simsearch1.fha&page={}\".format(site, k, p))\n group['link'] += [i.get_attribute(\"href\") for i in driver.find_elements_by_css_selector(\".docsum-title\")]\n pass\n\n except:\n\n continue\n\n pass\n\n link = pandas.DataFrame({\"link\":group['link']})\n link.to_csv(os.path.join(folder, \"link.csv\"), index=False)\n\n def remove(x, what=\"\"):\n\n output = []\n for i in x:\n\n if(i==what):\n\n continue\n\n else:\n\n output += [i]\n pass\n\n pass\n \n return(output)\n\n for l in tqdm.tqdm(group['link'], total=len(group['link'])):\n\n try:\n\n driver.get(l)\n pass\n\n except:\n\n group['title'] += [None]\n group['abstract'] += [None]\n group['tag'] += [None]\n group['author'] += [None] \n continue\n\n try:\n \n title = driver.find_element_by_css_selector(\".heading-title\").text\n pass\n\n except:\n\n title = None\n pass\n\n try:\n \n abstract = driver.find_element_by_css_selector(\"#enc-abstract p\").text\n pass\n\n except:\n\n abstract = None\n pass\n\n try:\n\n tag = driver.find_element_by_css_selector(\"#enc-abstract+ p\").text.split(\": \")[-1]\n pass\n\n except:\n\n tag = None\n pass\n\n try:\n \n author = \";\".join(remove([i.text for i in driver.find_elements_by_css_selector(\".full-name\")], what=''))\n pass\n\n except:\n\n author = None\n pass\n\n group['title'] += [title]\n group['abstract'] += [abstract]\n group['tag'] += [tag]\n group['author'] += [author]\n time.sleep(1)\n pass\n\n table = pandas.DataFrame(group)\n table.to_csv(os.path.join(folder, \"{}.csv\".format(k)), index=False)\n driver.close()\n pass\n\n\n## \n## Merge all table together.\npath, folder = 'resource/csv', ['asthma', 'Covid-19', \"influenza\", \"Myocardial Infarction\", 'Stroke', \"chest cavity\"]\ngroup = []\nfor f in folder:\n\n p = os.path.join(path, f, '{}.csv'.format(f))\n t = pandas.read_csv(p)\n t['keyword'] = f\n t = t.dropna(subset=['title'])\n group += [t]\n pass\n\ndata = pandas.concat(group).reset_index(drop=True)\ndata.to_csv(os.path.join(path, \"data.csv\"), index=False)\n\n",
"\n\nimport itertools\nimport pandas\nimport tqdm\nfrom functools import reduce\nfrom collections import Counter\n\n\nclass document:\n\n def __init__(self, title, sentence, vocabulary):\n\n self.title = title\n self.sentence = sentence\n self.vocabulary = vocabulary\n return\n\n def build(self, what='matrix'):\n\n if(what=='matrix'):\n\n matrix = []\n total = len(self.sentence)\n for s, t in tqdm.tqdm(zip(self.sentence, self.title), total=total):\n\n w = self.vocabulary.tokenize(sentence=s)\n c = Counter(w)\n m = pandas.DataFrame.from_dict(c, orient='index').reset_index()\n m = m.rename(columns={'index':'word', 0:t})\n matrix += [m]\n pass\n \n self.matrix = reduce(lambda x, y: pandas.merge(x, y, how='outer'), matrix)\n self.matrix = self.matrix.fillna(0)\n pass\n\n return\n\n pass\n\n\n# voc = {'word':3, \"text\":1, \"hello\":4}\n# w = ['word', 'hello', 'text', 'text', 'word']\n# [i for i in w]\n",
"\n\nimport pandas, tqdm\nimport itertools\nfrom collections import Counter\nimport re\n\n\nclass tabulation:\n\n def __init__(self, path):\n\n self.path = path\n pass\n \n def read(self):\n\n self.table = pandas.read_csv(self.path)\n pass\n\n def load(self, tokenize=None):\n\n self.tokenize = tokenize\n return\n\n def split(self):\n\n if(self.tokenize):\n\n total = len(self.table)\n word = []\n bigram = []\n thigram = []\n for s in tqdm.tqdm(self.table['abstract'], total=total, leave=False):\n \n w, b, t = self.tokenize(sentence=s)\n word += [w]\n bigram += [b]\n thigram += [t]\n pass\n\n self.word = word\n self.bigram = bigram\n self.thigram = thigram\n pass\n \n else:\n\n print(\"The tokenizer function not found.\")\n pass\n\n pass\n\n def build(self, what='dictionary'):\n\n if(what=='dictionary'):\n\n count = Counter(list(itertools.chain(*self.word, *self.bigram, *self.thigram)))\n dictionary = pandas.DataFrame.from_dict(count, orient='index').reset_index()\n dictionary.columns = ['word', 'frequency']\n self.dictionary = dictionary\n pass\n\n return\n \n def search(self, sentence):\n # sentence = \"in the whole genome of SARS-CoV-2 []\"\n head, tail = sentence.split(\"[]\")\n head, _, _ = self.tokenize(head)\n tail, _, _ = self.tokenize(tail)\n head = head[-1] + '_' if(head!=[]) else None\n tail = tail[0] + '_' if(tail!=[]) else None\n pass\n\n index = []\n for w in tqdm.tqdm(self.dictionary['word'], leave=False):\n\n if(head):\n\n if(head in w):\n\n w = re.sub(head, \"\", w)\n index += [w]\n pass\n\n pass\n\n if(tail):\n\n if(tail in w):\n\n w = re.sub(tail, \"\", w)\n index += [w]\n pass\n\n pass\n\n pass\n\n count = Counter(index)\n result = pandas.DataFrame.from_dict(count, orient='index').reset_index()\n result.columns = ['word', 'count']\n result = result.sort_values(by=['count'], ascending=False).head(10)\n return(result)\n\n pass\n\n\n\n# head = 'how' + '_'\n# tail = \"_\" + 'you'\n\n\n# word = [['how', 'are', 'you'],['fine', 'and', \"you\"],['how_are', 'are_you']]\n# bi = [['how', 'are', 'you'],['fine', 'and', \"you\"],['how_are', 'are_you']]\n# count = Counter(list(itertools.chain(*word, *bi)))\n# dictionary = pandas.DataFrame.from_dict(count, orient='index').reset_index()\n# dictionary.columns = ['word', 'frequency']\n\n\n\n\n\n\n# pandas.DataFrame()\n\n\n\n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
],
[
"pandas.merge",
"pandas.DataFrame.from_dict"
],
[
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
eraofelix/PV-RCNN | [
"6361ec99cc1c92120263ef56b2c2b003c2cd7264"
] | [
"pvrcnn/inference.py"
] | [
"import copy\nimport os\nimport os.path as osp\nimport numpy as np\nimport torch\nimport sys\nsys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nsys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')\nimport cv2\nfrom tqdm import tqdm\nimport time\nfrom pvrcnn.core import cfg, Preprocessor\nfrom pvrcnn.detector import PV_RCNN, Second\nfrom pvrcnn.ops import nms_rotated, box_iou_rotated\nfrom pvrcnn.core import cfg, AnchorGenerator\nfrom viz.gen_bev import gen_bev_map, draw_bev_box\n\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n\n\ndef to_device(item):\n for key in ['points', 'features', 'coordinates', 'occupancy']:\n item[key] = item[key].cuda()\n return item\n\ndef inference(out, anchors, cfg):\n cls_map, reg_map = out['P_cls'].squeeze(0), out['P_reg'].squeeze(0)\n score_map = cls_map.sigmoid()\n top_scores, class_idx = score_map.view(cfg.NUM_CLASSES, -1).max(0)\n top_scores, anchor_idx = top_scores.topk(k=cfg.PROPOSAL.TOPK)\n class_idx = class_idx[anchor_idx]\n # import pdb;pdb.set_trace()\n top_anchors = anchors.view(cfg.NUM_CLASSES, -1, cfg.BOX_DOF)[class_idx, anchor_idx]\n top_boxes = reg_map.reshape(cfg.NUM_CLASSES, -1, cfg.BOX_DOF)[class_idx, anchor_idx]\n\n P_xyz, P_wlh, P_yaw = top_boxes.split([3, 3, 1], dim=1)\n A_xyz, A_wlh, A_yaw = top_anchors.split([3, 3, 1], dim=1)\n\n A_wl, A_h = A_wlh.split([2, 1], -1)\n A_norm = A_wl.norm(dim=-1, keepdim=True).expand(-1, 2)\n A_norm = torch.cat((A_norm, A_h), dim=-1)\n\n top_boxes = torch.cat((\n (P_xyz * A_norm + A_xyz),\n (torch.exp(P_wlh) * A_wlh),\n (P_yaw + A_yaw)), dim=1\n )\n\n nms_idx = nms_rotated(top_boxes[:, [0, 1, 3, 4, 6]], top_scores, iou_threshold=0.1)\n top_boxes = top_boxes[nms_idx]\n top_scores = top_scores[nms_idx]\n top_classes = class_idx[nms_idx]\n return top_boxes, top_scores\n\n\nclass Inference():\n def __init__(self,):\n self.cfg = cfg\n self.cfg.merge_from_file('../configs/second/car.yaml')\n self.preprocessor = Preprocessor(cfg)\n self.anchors = AnchorGenerator(cfg).anchors.cuda()\n self.net = PV_RCNN(cfg).cuda().eval()\n # self.net = Second(cfg).cuda().eval()\n ckpt = torch.load('./ckpts/epoch_49.pth')\n self.net.load_state_dict(ckpt['state_dict'])\n pass\n\n def inference_bin_to_img(self, bin_path):\n pc = np.fromfile(bin_path, np.float32).reshape(-1, 4)\n item = dict(points=[pc])\n with torch.no_grad():\n item = to_device(self.preprocessor(item))\n out = self.net(item)\n top_boxes, top_scores= inference(out, self.anchors, self.cfg)\n rgb = draw_bev_box(pc, top_boxes.cpu().numpy())\n return rgb\n\n def inference_bins_to_video(self, bins_dir, vid_path):\n writer = cv2.VideoWriter(vid_path, cv2.VideoWriter_fourcc(*'MJPG'), 10, (2000,1000))\n bin_names = os.listdir(bins_dir)\n bin_names.sort()\n bin_paths = [os.path.join(bins_dir, p) for p in bin_names if '.bin' in p]\n for bin_path in tqdm(bin_paths[:200]):\n rgb = self.inference_bin_to_img(bin_path).astype(np.uint8)\n writer.write(rgb)\n\n\n\nif __name__ == '__main__':\n \n basedir = osp.join(cfg.DATA.ROOTDIR, 'velodyne_reduced/')\n bin_path = osp.join(basedir, '1544426448586.bin')\n bins_dir = '/home/kun.fan/mnt/output/lidar_baseline_20200228/20200227-154819_262'\n png_path = os.path.expanduser('~/mnt/output/1544426448586.png')\n vid_path = os.path.expanduser('~/mnt/output/test.avi')\n\n infer = Inference()\n rgb = infer.inference_bin_to_img(bin_path)\n cv2.imwrite(png_path, rgb)\n infer.inference_bins_to_video(bins_dir, vid_path)\n"
] | [
[
"numpy.fromfile",
"torch.load",
"torch.cat",
"torch.exp",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luizapozzobon/myo_project | [
"ce35149c444dee5a13dc7d1f1915046066ba47e2"
] | [
"primeiros_models/dense_features.py"
] | [
"from sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\ndef load_compilado(arquivo):\n path = '/home/luiza/UFSM/Myo/myo_project/datasets/oficial/' + arquivo\n df = pd.read_csv(path)\n return df\n\ndf = load_compilado('features_balanced.csv')\ndf_labels = df['Label']\ndf = df.drop(columns=['Label'])\n\nx_train, x_test, y_train, y_test = train_test_split(df.values, df_labels.values, test_size=0.3, random_state=0)\n\nprint('All Data:')\nprint(y_train)\nprint(y_test)\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.optimizers import Adam\n\ninput_shape = (x_train.shape[1])\nprint(input_shape, y_train.shape, x_train.shape)\n\noptimizer = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False)\n\nclassifier = Sequential()\nclassifier.add(Dense(32, input_dim=x_train.shape[1]))\nclassifier.add(Activation('relu'))\nclassifier.add(Dense(units = 64))\nclassifier.add(Activation('relu'))\nclassifier.add(Dense(units = 128))\nclassifier.add(Activation('relu'))\nclassifier.add(Dense(units = 1, activation=\"softmax\"))\n\nclassifier.summary()\n\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1])\n\nclassifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics=[\"accuracy\"])\n\nclassifier.fit(x_train, y_train, epochs = 10000, batch_size = 43, verbose=1)\n\n# Save\nclassifier.save(\"model_cross_splited_data.h5\")\nprint(\"Saved model to disk\")\n\n###############################################\n\nfrom tensorflow import keras\n\n# # Load Model\n# model = keras.models.load_model('model_cross_splited_data.h5')\n# model.summary()\n\ndef evaluateModel(prediction, y):\n good = 0\n for i in range(len(y)):\n if (prediction[i] == np.argmax(y[i])):\n good = good +1\n return (good/len(y)) * 100.0\n\n# result_test = classifier.predict_classes(X_test)\n# print(\"Correct classification rate on test data\")\n# print(evaluateModel(result_test, y_test))\n\nresult_train = classifier.predict_classes(x_train)\nprint(\"Correct classification rate on train data\")\nprint(evaluateModel(result_train, y_train))\n"
] | [
[
"pandas.read_csv",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.optimizers.Adam",
"numpy.argmax",
"tensorflow.keras.models.Sequential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
alexborio/Projects | [
"a85ad4aab370b009de14e3696e06aad92ca4859f"
] | [
"VAE/vae.py"
] | [
"\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nencoder_layers = []\ndecoder_layers = []\n\nNormal = tf.contrib.distributions.Normal\nBernoulli = tf.contrib.distributions.Bernoulli\n\nclass Layer(object):\n def __init__(self, n, m, f=tf.nn.relu):\n self.W = tf.Variable(tf.random_normal(shape=(n, m))*2 / np.sqrt(n), dtype=tf.float32)\n self.W = tf.cast(self.W, dtype=tf.float32)\n self.c = tf.Variable(tf.zeros(m), dtype=tf.float32)\n self.c = tf.cast(self.c, dtype=tf.float32)\n self.f = f\n\n def forward(self, X):\n return self.f(tf.matmul(X, self.W) + self.c)\n\n\ndef KLDivergence(mu, sigma):\n KL1 = tf.log(sigma)\n KL2 = (1 + tf.pow((-mu), 2))/2/tf.pow(sigma, 2) - 0.5\n KL = KL1 + KL2\n return tf.reduce_sum(KL, axis=1)\n\n\nmnist = input_data.read_data_sets('../data/MNIST_data', one_hot=True)\nX = mnist.train.images\n\nn_input = 28*28\n\nhidden_layer_sizes = [200, 150, 100, 20, 2]\nX_in = tf.placeholder(dtype=tf.float32, shape=(None, n_input))\nZ = X_in\nn = n_input\n\nfor m in hidden_layer_sizes[:-1]:\n\n layer = Layer(n, m)\n Z = layer.forward(Z)\n encoder_layers.append(layer)\n n = m\n\n\nm_latent = hidden_layer_sizes[-1] * 2\n\nlayer = Layer(n, m_latent, lambda x: x)\n\nZ = layer.forward(Z)\nencoder_layers.append(layer)\n\nmu = Z[:, :(m_latent // 2)]\nsigma = tf.exp(Z[:, (m_latent // 2):])\n\nE = tf.placeholder(dtype=tf.float32, shape=(None, hidden_layer_sizes[-1]))\n\nn = m_latent // 2\n\nZ = E*sigma + mu\n\nfor m in reversed(hidden_layer_sizes[:-1]):\n layer = Layer(n, m)\n Z = layer.forward(Z)\n decoder_layers.append(layer)\n n = m\n\nlayer = Layer(n, n_input, lambda x: x)\nZ = layer.forward(Z)\ndecoder_layers.append(layer)\n\nkl = -tf.log(sigma) + 0.5 * (sigma ** 2 + mu ** 2) - 0.5\nkl = tf.reduce_sum(kl, axis=1)\n\n#kl = KLDivergence(mu, sigma)\n\nprobs = tf.contrib.distributions.Bernoulli(logits=Z).log_prob(X_in)\ncost = tf.reduce_sum(tf.reduce_sum(probs, 1) - kl)\ntrain_op = tf.train.RMSPropOptimizer(0.001).minimize(-cost)\nsess = tf.InteractiveSession()\ninit_op = tf.global_variables_initializer()\n\nsess.run(init_op)\n\nN = (X.shape)[0]\ncosts = []\nn_batch_sz = 100\nepochs = 50\nn_batches = N // n_batch_sz\nX = (X > 0.5).astype(np.float32)\nfor epoch in range(epochs):\n\n np.random.shuffle(X)\n\n for i in range(n_batches):\n\n dict1 = {X_in: X[i*n_batch_sz:(i + 1)*n_batch_sz, :]}\n dict2 = {E: np.reshape(np.random.randn(m_latent // 2), (1, m_latent // 2))}\n dict1.update(dict2)\n _, c = sess.run((train_op, cost), feed_dict=dict1)\n c /= n_batch_sz\n costs.append(c)\n print(c)\n\n\ndone = False\n\nZ_in = tf.placeholder(dtype=tf.float32, shape=(None, hidden_layer_sizes[-1]))\nZ_sim = Z_in\nfor layer in decoder_layers:\n Z_sim = layer.forward(Z_sim)\n\nZ_sim_out = tf.nn.sigmoid(Z_sim)\n\nwhile not done:\n feed = {Z_in: np.reshape(np.random.randn(m_latent // 2), (1, m_latent // 2))}\n X_sim = sess.run(Z_sim_out, feed_dict=feed)\n\n im_X_sim = X_sim.reshape(28, 28)\n plt.imshow(im_X_sim, cmap='gray')\n plt.show()\n\n ans = input(\"Generate another?\")\n if ans and ans[0] in ('n' or 'N'):\n done = True"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.cast",
"numpy.random.randn",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.InteractiveSession",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.pow",
"tensorflow.contrib.distributions.Bernoulli",
"tensorflow.exp",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.show",
"numpy.random.shuffle",
"tensorflow.log",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
bhgedigital/probability | [
"df70fe68491f839df438628fa79cb3378888039e",
"df70fe68491f839df438628fa79cb3378888039e",
"df70fe68491f839df438628fa79cb3378888039e"
] | [
"tensorflow_probability/python/distributions/inverse_gamma.py",
"tensorflow_probability/python/layers/distribution_layer.py",
"tensorflow_probability/python/distributions/independent_test.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The InverseGamma distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n\n__all__ = [\n \"InverseGamma\",\n \"InverseGammaWithSoftplusConcentrationRate\",\n]\n\n\nclass InverseGamma(distribution.Distribution):\n \"\"\"InverseGamma distribution.\n\n The `InverseGamma` distribution is defined over positive real numbers using\n parameters `concentration` (aka \"alpha\") and `scale` (aka \"beta\").\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z\n Z = Gamma(alpha) beta**-alpha\n ```\n\n where:\n\n * `concentration = alpha`,\n * `scale = beta`,\n * `Z` is the normalizing constant, and,\n * `Gamma` is the [gamma function](\n https://en.wikipedia.org/wiki/Gamma_function).\n\n The cumulative density function (cdf) is,\n\n ```none\n cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)\n ```\n\n where `GammaInc` is the [upper incomplete Gamma function](\n https://en.wikipedia.org/wiki/Incomplete_gamma_function).\n\n The parameters can be intuited via their relationship to mean and variance\n when these moments exist,\n\n ```none\n mean = beta / (alpha - 1) when alpha > 1\n variance = beta**2 / (alpha - 1)**2 / (alpha - 2) when alpha > 2\n ```\n\n i.e., under the same conditions:\n\n ```none\n alpha = mean**2 / variance + 2\n beta = mean * (mean**2 / variance + 1)\n ```\n\n Distribution parameters are automatically broadcast in all functions; see\n examples for details.\n\n Samples of this distribution are reparameterized (pathwise differentiable).\n The derivatives are computed using the approach described in the paper\n\n [Michael Figurnov, Shakir Mohamed, Andriy Mnih.\n Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n dist = tfd.InverseGamma(concentration=3.0, scale=2.0)\n dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], scale=[2.0, 3.0])\n ```\n\n Compute the gradients of samples w.r.t. the parameters:\n\n ```python\n tfd = tfp.distributions\n concentration = tf.constant(3.0)\n scale = tf.constant(2.0)\n dist = tfd.InverseGamma(concentration, scale)\n samples = dist.sample(5) # Shape [5]\n loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function\n # Unbiased stochastic gradients of the loss function\n grads = tf.gradients(loss, [concentration, scale])\n ```\n\n \"\"\"\n\n @deprecation.deprecated_args(\n \"2019-05-08\", \"The `rate` parameter is deprecated. Use `scale` instead.\"\n \"The `rate` parameter was always interpreted as a `scale` parameter, \"\n \"but erroneously misnamed.\", \"rate\")\n def __init__(self,\n concentration,\n scale=None,\n validate_args=False,\n allow_nan_stats=True,\n rate=None,\n name=\"InverseGamma\"):\n \"\"\"Construct InverseGamma with `concentration` and `scale` parameters.\n\n The parameters `concentration` and `scale` must be shaped in a way that\n supports broadcasting (e.g. `concentration + scale` is a valid operation).\n\n Args:\n concentration: Floating point tensor, the concentration params of the\n distribution(s). Must contain only positive values.\n scale: Floating point tensor, the scale params of the distribution(s).\n Must contain only positive values.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n rate: Deprecated (mis-named) alias for `scale`.\n name: Python `str` name prefixed to Ops created by this class.\n\n\n Raises:\n TypeError: if `concentration` and `scale` are different dtypes.\n \"\"\"\n if rate is not None:\n scale = rate\n parameters = dict(locals())\n with tf.compat.v1.name_scope(name, values=[concentration, scale]) as name:\n dtype = dtype_util.common_dtype([concentration, scale],\n preferred_dtype=tf.float32)\n concentration = tf.convert_to_tensor(\n value=concentration, name=\"concentration\", dtype=dtype)\n scale = tf.convert_to_tensor(value=scale, name=\"scale\", dtype=dtype)\n with tf.control_dependencies([\n tf.compat.v1.assert_positive(\n concentration, message=\"Concentration must be positive.\"),\n tf.compat.v1\n .assert_positive(scale, message=\"Scale must be positive.\"),\n ] if validate_args else []):\n self._concentration = tf.identity(concentration, name=\"concentration\")\n self._scale = tf.identity(scale, name=\"scale\")\n tf.debugging.assert_same_float_dtype([self._concentration, self._scale])\n\n super(InverseGamma, self).__init__(\n dtype=self._concentration.dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n parameters=parameters,\n graph_parents=[self._concentration, self._scale],\n name=name)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return dict(\n zip((\"concentration\", \"scale\"),\n ([tf.convert_to_tensor(value=sample_shape, dtype=tf.int32)] * 2)))\n\n def _params_event_ndims(self):\n return dict(concentration=0, rate=0, scale=0)\n\n @property\n def concentration(self):\n \"\"\"Concentration parameter.\"\"\"\n return self._concentration\n\n @property\n @deprecation.deprecated(\n \"2019-05-08\", \"The `rate` parameter is deprecated. Use `scale` instead.\"\n \"The `rate` parameter was always interpreted as a `scale`parameter, but \"\n \"erroneously misnamed.\")\n def rate(self):\n \"\"\"Scale parameter.\"\"\"\n return self._scale\n\n @property\n def scale(self):\n \"\"\"Scale parameter.\"\"\"\n return self._scale\n\n def _batch_shape_tensor(self):\n return tf.broadcast_dynamic_shape(\n tf.shape(input=self.concentration), tf.shape(input=self.scale))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(self.concentration.shape,\n self.scale.shape)\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: See `tf.random_gamma` docstring for sampling details and\n caveats.\"\"\")\n def _sample_n(self, n, seed=None):\n return 1. / tf.random.gamma(\n shape=[n],\n alpha=self.concentration,\n beta=self.scale,\n dtype=self.dtype,\n seed=seed)\n\n def _log_prob(self, x):\n return self._log_unnormalized_prob(x) - self._log_normalization()\n\n def _cdf(self, x):\n x = self._maybe_assert_valid_sample(x)\n # Note that igammac returns the upper regularized incomplete gamma\n # function Q(a, x), which is what we want for the CDF.\n return tf.math.igammac(self.concentration, self.scale / x)\n\n def _log_unnormalized_prob(self, x):\n x = self._maybe_assert_valid_sample(x)\n return -(1. + self.concentration) * tf.math.log(x) - self.scale / x\n\n def _log_normalization(self):\n return (tf.math.lgamma(self.concentration) -\n self.concentration * tf.math.log(self.scale))\n\n def _entropy(self):\n return (self.concentration + tf.math.log(self.scale) +\n tf.math.lgamma(self.concentration) -\n ((1. + self.concentration) * tf.math.digamma(self.concentration)))\n\n @distribution_util.AppendDocstring(\n \"\"\"The mean of an inverse gamma distribution is\n `scale / (concentration - 1)`, when `concentration > 1`, and `NaN`\n otherwise. If `self.allow_nan_stats` is `False`, an exception will be\n raised rather than returning `NaN`\"\"\")\n def _mean(self):\n mean = self.scale / (self.concentration - 1.)\n if self.allow_nan_stats:\n nan = tf.fill(\n self.batch_shape_tensor(),\n np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),\n name=\"nan\")\n return tf.where(self.concentration > 1., mean, nan)\n else:\n return distribution_util.with_dependencies([\n tf.compat.v1.assert_less(\n tf.ones([], self.dtype),\n self.concentration,\n message=\"mean undefined when any concentration <= 1\"),\n ], mean)\n\n @distribution_util.AppendDocstring(\n \"\"\"Variance for inverse gamma is defined only for `concentration > 2`. If\n `self.allow_nan_stats` is `False`, an exception will be raised rather\n than returning `NaN`.\"\"\")\n def _variance(self):\n var = (\n tf.square(self.scale) / tf.square(self.concentration - 1.) /\n (self.concentration - 2.))\n if self.allow_nan_stats:\n nan = tf.fill(\n self.batch_shape_tensor(),\n np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),\n name=\"nan\")\n return tf.where(self.concentration > 2., var, nan)\n else:\n return distribution_util.with_dependencies([\n tf.compat.v1.assert_less(\n tf.constant(2., dtype=self.dtype),\n self.concentration,\n message=\"variance undefined when any concentration <= 2\"),\n ], var)\n\n @distribution_util.AppendDocstring(\n \"\"\"The mode of an inverse gamma distribution is `scale / (concentration +\n 1)`.\"\"\")\n def _mode(self):\n return self.scale / (1. + self.concentration)\n\n def _maybe_assert_valid_sample(self, x):\n tf.debugging.assert_same_float_dtype(tensors=[x], dtype=self.dtype)\n if not self.validate_args:\n return x\n return distribution_util.with_dependencies([\n tf.compat.v1.assert_positive(x),\n ], x)\n\n\nclass _InverseGammaWithSoftplusConcentrationScale(InverseGamma):\n \"\"\"`InverseGamma` with softplus of `concentration` and `scale`.\"\"\"\n\n @deprecation.deprecated_args(\n \"2019-05-08\", \"The `rate` parameter is deprecated. Use `scale` instead.\"\n \"The `rate` parameter was always interpreted as a `scale`parameter, but \"\n \"erroneously misnamed.\", \"rate\")\n def __init__(self,\n concentration,\n scale=None,\n validate_args=False,\n allow_nan_stats=True,\n rate=None,\n name=\"InverseGammaWithSoftplusConcentrationScale\"):\n if rate is not None:\n scale = rate\n parameters = dict(locals())\n with tf.compat.v1.name_scope(name, values=[concentration, scale]) as name:\n dtype = dtype_util.common_dtype([concentration, scale])\n concentration = tf.convert_to_tensor(\n value=concentration, name=\"softplus_concentration\", dtype=dtype)\n scale = tf.convert_to_tensor(\n value=scale, name=\"softplus_scale\", dtype=dtype)\n super(_InverseGammaWithSoftplusConcentrationScale, self).__init__(\n concentration=tf.nn.softplus(\n concentration, name=\"softplus_concentration\"),\n scale=tf.nn.softplus(scale, name=\"softplus_scale\"),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=name)\n self._parameters = parameters\n\n\n_rate_deprecator = deprecation.deprecated(\n \"2019-06-05\",\n \"InverseGammaWithSoftplusConcentrationRate is deprecated, use \"\n \"InverseGamma(concentration=tf.nn.softplus(concentration), \"\n \"scale=tf.nn.softplus(scale)) instead.\",\n warn_once=True)\n# pylint: disable=invalid-name\nInverseGammaWithSoftplusConcentrationRate = _rate_deprecator(\n _InverseGammaWithSoftplusConcentrationScale)\n\n_scale_deprecator = deprecation.deprecated(\n \"2019-06-05\",\n \"InverseGammaWithSoftplusConcentrationScale is deprecated, use \"\n \"InverseGamma(concentration=tf.nn.softplus(concentration), \"\n \"scale=tf.nn.softplus(scale)) instead.\",\n warn_once=True)\nInverseGammaWithSoftplusConcentrationScale = _scale_deprecator(\n _InverseGammaWithSoftplusConcentrationScale)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Layers for combining `tfp.distributions` and `tf.keras`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\n\n# By importing `distributions` as `tfd`, docstrings will show\n# `tfd.Distribution`. We import `bijectors` the same way, for consistency.\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import distribution_util as dist_util\nfrom tensorflow_probability.python.layers.internal import distribution_tensor_coercible as dtc\nfrom tensorflow.python.keras.utils import tf_utils as keras_tf_utils\n\n\n__all__ = [\n 'CategoricalMixtureOfOneHotCategorical',\n 'DistributionLambda',\n 'IndependentBernoulli',\n 'IndependentLogistic',\n 'IndependentNormal',\n 'IndependentPoisson',\n 'KLDivergenceAddLoss',\n 'KLDivergenceRegularizer',\n 'MixtureLogistic',\n 'MixtureNormal',\n 'MixtureSameFamily',\n 'MultivariateNormalTriL',\n 'OneHotCategorical',\n 'VariationalGaussianProcess',\n]\n\n\nkeras_tf_utils.register_symbolic_tensor_type(\n dtc._TensorCoercible) # pylint: disable=protected-access\n\n\ndef _event_size(event_shape, name=None):\n \"\"\"Computes the number of elements in a tensor with shape `event_shape`.\n\n Args:\n event_shape: A tensor shape.\n name: The name to use for the tensor op to compute the number of elements\n (if such an op needs to be created).\n\n Returns:\n event_size: The number of elements in `tensor_shape`. Returns a numpy int\n when the number of elements can be computed immediately. Otherwise, returns\n a scalar tensor.\n \"\"\"\n with tf.compat.v1.name_scope(name, 'event_size', [event_shape]):\n event_shape = tf.convert_to_tensor(\n value=event_shape, dtype=tf.int32, name='event_shape')\n\n event_shape_const = tf.get_static_value(event_shape)\n if event_shape_const is not None:\n return np.prod(event_shape_const)\n else:\n return tf.reduce_prod(input_tensor=event_shape)\n\n\nclass DistributionLambda(tf.keras.layers.Lambda):\n \"\"\"Keras layer enabling plumbing TFP distributions through Keras models.\n\n A `DistributionLambda` is minimially characterized by a function that returns\n a `tfp.distributions.Distribution` instance.\n\n Since subsequent Keras layers are functions of tensors, a `DistributionLambda`\n also defines how the `tfp.distributions.Distribution` shall be \"concretized\"\n as a tensor. By default, a distribution is represented as a tensor via a\n random draw, e.g., `tfp.distributions.Distribution.sample`. Alternatively the\n user may provide a `callable` taking the distribution instance and producing a\n `tf.Tensor`.\n\n #### Examples\n\n ```python\n tfk = tf.keras\n tfkl = tf.keras.layers\n tfd = tfp.distributions\n tfpl = tfp.layers\n\n event_size = 7\n\n model = tfk.Sequential([\n tfkl.Dense(2),\n tfpl.DistributionLambda(\n make_distribution_fn=lambda t: tfd.Normal(\n loc=t[..., 0:1], scale=tf.exp(t[..., 1:2])),\n convert_to_tensor_fn=lambda s: s.sample(5))\n ])\n # ==> Normal (batch_shape=[1]) instance parametrized by mean and log scale.\n ```\n\n \"\"\"\n\n def __init__(self,\n make_distribution_fn,\n convert_to_tensor_fn=tfd.Distribution.sample,\n **kwargs):\n \"\"\"Create a `DistributionLambda` Keras layer.\n\n Args:\n make_distribution_fn: Python `callable` that takes previous layer outputs\n and returns a `tfd.Distribution` instance.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object. For examples, see\n `class` docstring.\n Default value: `tfd.Distribution.sample`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n # TODO(b/120440642): See if something like this code block is needed.\n # if output_shape is None:\n # def default_output_shape(keras_input_shape):\n # output_shape = map(\n # _expand_rank_at_least_one,\n # [sample_shape, keras_input_shape[:-1], event_shape])\n # return tf.concat(output_shape, axis=0)\n # output_shape = default_output_shape\n\n if isinstance(convert_to_tensor_fn, property):\n convert_to_tensor_fn = convert_to_tensor_fn.fget\n\n def _fn(*fargs, **fkwargs):\n \"\"\"Wraps `make_distribution_fn` to return both dist and concrete value.\"\"\"\n distribution = dtc._TensorCoercible( # pylint: disable=protected-access\n distribution=make_distribution_fn(*fargs, **fkwargs),\n convert_to_tensor_fn=convert_to_tensor_fn)\n value = tf.convert_to_tensor(value=distribution)\n # TODO(b/126056144): Remove silent handle once we identify how/why Keras\n # is losing the distribution handle for activity_regularizer.\n value._tfp_distribution = distribution # pylint: disable=protected-access\n # TODO(b/120153609): Keras is incorrectly presuming everything is a\n # `tf.Tensor`. Closing this bug entails ensuring Keras only accesses\n # `tf.Tensor` properties after calling `tf.convert_to_tensor`.\n distribution.shape = value.shape\n distribution.get_shape = value.get_shape\n return distribution, value\n\n super(DistributionLambda, self).__init__(_fn, **kwargs)\n\n # We'll need to keep track of who's calling who since the functional\n # API has a different way of injecting `_keras_history` than the\n # `keras.Sequential` way.\n self._enter_dunder_call = False\n\n def __call__(self, inputs, *args, **kwargs):\n self._enter_dunder_call = True\n distribution, _ = super(DistributionLambda, self).__call__(\n inputs, *args, **kwargs)\n self._enter_dunder_call = False\n return distribution\n\n def call(self, inputs, *args, **kwargs):\n distribution, value = super(DistributionLambda, self).call(\n inputs, *args, **kwargs)\n if self._enter_dunder_call:\n # Its critical to return both distribution and concretization\n # so Keras can inject `_keras_history` to both. This is what enables\n # either to be used as an input to another Keras `Model`.\n return distribution, value\n return distribution\n\n\n# TODO(b/120160878): Add more shape validation logic to each layer. Consider\n# also adding additional functionality to help the user determine the\n# appropriate size of the parameterizing vector.\n\n\nclass MultivariateNormalTriL(DistributionLambda):\n \"\"\"A `d`-variate MVNTriL Keras layer from `d + d * (d + 1) // 2` params.\n\n Typical choices for `convert_to_tensor_fn` include:\n\n - `tfd.Distribution.sample`\n - `tfd.Distribution.mean`\n - `tfd.Distribution.mode`\n - `lambda s: s.mean() + 0.1 * s.stddev()`\n\n\n #### Example\n\n ```python\n tfk = tf.keras\n tfkl = tf.keras.layers\n tfd = tfp.distributions\n tfpl = tfp.layers\n\n # Load data.\n n = int(1e3)\n scale_tril = np.array([[1.6180, 0.],\n [-2.7183, 3.1416]]).astype(np.float32)\n x = tfd.Normal(loc=0, scale=1).sample([n, 2])\n eps = tfd.Normal(loc=0, scale=0.01).sample([n, 2])\n y = tf.matmul(x, scale_tril) + eps\n\n # Create model.\n d = tf.dimension_value(y.shape[-1])\n model = tfk.Sequential([\n tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(d)),\n tfpl.MultivariateNormalTriL(d),\n ])\n\n # Fit.\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.02),\n loss=lambda y, model: -model.log_prob(y),\n metrics=[])\n batch_size = 100\n model.fit(x, y,\n batch_size=batch_size,\n epochs=500,\n steps_per_epoch=n // batch_size,\n verbose=True,\n shuffle=True)\n model.get_weights()[0][:, :2]\n # ==> [[ 1.61842895e+00 1.34138885e-04]\n # [ -2.71818233e+00 3.14186454e+00]]\n ```\n\n \"\"\"\n\n def __init__(self,\n event_size,\n convert_to_tensor_fn=tfd.Distribution.sample,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `MultivariateNormalTriL` layer.\n\n Args:\n event_size: Scalar `int` representing the size of single draw from this\n distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object. For examples, see\n `class` docstring.\n Default value: `tfd.Distribution.sample`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(MultivariateNormalTriL, self).__init__(\n lambda t: type(self).new(t, event_size, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, event_size, validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL',\n [params, event_size]):\n params = tf.convert_to_tensor(value=params, name='params')\n scale_tril = tfb.ScaleTriL(\n diag_shift=np.array(1e-5, params.dtype.as_numpy_dtype()),\n validate_args=validate_args)\n return tfd.MultivariateNormalTriL(\n loc=params[..., :event_size],\n scale_tril=scale_tril(params[..., event_size:]),\n validate_args=validate_args)\n\n @staticmethod\n def params_size(event_size, name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL_params_size',\n [event_size]):\n return event_size + event_size * (event_size + 1) // 2\n\n\nclass OneHotCategorical(DistributionLambda):\n \"\"\"A `d`-variate OneHotCategorical Keras layer from `d` params.\n\n Typical choices for `convert_to_tensor_fn` include:\n\n - `tfd.Distribution.sample`\n - `tfd.Distribution.mean`\n - `tfd.Distribution.mode`\n - `tfd.OneHotCategorical.logits`\n\n\n #### Example\n\n ```python\n tfk = tf.keras\n tfkl = tf.keras.layers\n tfd = tfp.distributions\n tfpl = tfp.layers\n\n # Load data.\n n = int(1e4)\n scale_noise = 0.01\n x = tfd.Normal(loc=0, scale=1).sample([n, 2])\n eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])\n y = tfd.OneHotCategorical(\n logits=tf.pad(0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps,\n paddings=[[0, 0], [1, 0]]),\n dtype=tf.float32).sample()\n\n # Create model.\n d = tf.dimension_value(y.shape[-1])\n model = tfk.Sequential([\n tfk.layers.Dense(tfpl.OneHotCategorical.params_size(d) - 1),\n tfk.layers.Lambda(lambda x: tf.pad(x, paddings=[[0, 0], [1, 0]])),\n tfpl.OneHotCategorical(d),\n ])\n\n # Fit.\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),\n loss=lambda y, model: -model.log_prob(y),\n metrics=[])\n batch_size = 100\n model.fit(x, y,\n batch_size=batch_size,\n epochs=10,\n steps_per_epoch=n // batch_size,\n shuffle=True)\n model.get_weights()\n # ==> [np.array([[1.6180],\n # [-2.7183]], np.float32),\n # np.array([0.3142], np.float32)] # Within 15% rel. error.\n ```\n\n \"\"\"\n\n def __init__(self,\n event_size,\n convert_to_tensor_fn=tfd.Distribution.sample,\n sample_dtype=None,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `OneHotCategorical` layer.\n\n Args:\n event_size: Scalar `int` representing the size of single draw from this\n distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object. For examples, see\n `class` docstring.\n Default value: `tfd.Distribution.sample`.\n sample_dtype: `dtype` of samples produced by this distribution.\n Default value: `None` (i.e., previous layer's `dtype`).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(OneHotCategorical, self).__init__(\n lambda t: type(self).new(t, event_size, sample_dtype, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, event_size, dtype=None, validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'OneHotCategorical',\n [params, event_size]):\n return tfd.OneHotCategorical(\n logits=params,\n dtype=dtype or params.dtype.base_dtype,\n validate_args=validate_args)\n\n @staticmethod\n def params_size(event_size, name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n return event_size\n\n\nclass CategoricalMixtureOfOneHotCategorical(DistributionLambda):\n \"\"\"A OneHotCategorical mixture Keras layer from `k * (1 + d)` params.\n\n `k` (i.e., `num_components`) represents the number of component\n `OneHotCategorical` distributions and `d` (i.e., `event_size`) represents the\n number of categories within each `OneHotCategorical` distribution.\n\n Typical choices for `convert_to_tensor_fn` include:\n\n - `tfd.Distribution.sample`\n - `tfd.Distribution.mean`\n - `tfd.Distribution.mode`\n - `lambda s: s.log_mean()`\n\n\n #### Example\n\n ```python\n tfk = tf.keras\n tfkl = tf.keras.layers\n tfd = tfp.distributions\n tfpl = tfp.layers\n\n # Load data.\n n = int(1e4)\n scale_noise = 0.01\n x = tfd.Normal(loc=0, scale=1).sample([n, 2])\n eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])\n y = tfd.OneHotCategorical(\n logits=tf.pad(0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps,\n paddings=[[0, 0], [1, 0]]),\n dtype=tf.float32).sample()\n\n # Create model.\n d = tf.dimension_value(y.shape[-1])\n k = 2\n model = tfk.Sequential([\n tfkl.Dense(tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)),\n tfpl.CategoricalMixtureOfOneHotCategorical(d, k),\n ])\n\n # Fit.\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),\n loss=lambda y, model: -tf.reduce_mean(model.log_prob(y)),\n metrics=[])\n batch_size = 100\n model.fit(x, y,\n batch_size=batch_size,\n epochs=10,\n steps_per_epoch=n // batch_size,\n shuffle=True)\n print(model.get_weights())\n ```\n\n \"\"\"\n\n def __init__(self,\n event_size,\n num_components,\n convert_to_tensor_fn=tfd.Distribution.sample,\n sample_dtype=None,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `CategoricalMixtureOfOneHotCategorical` layer.\n\n Args:\n event_size: Scalar `int` representing the size of single draw from this\n distribution.\n num_components: Scalar `int` representing the number of mixture\n components. Must be at least 1. (If `num_components=1`, it's more\n efficient to use the `OneHotCategorical` layer.)\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object. For examples, see\n `class` docstring.\n Default value: `tfd.Distribution.sample`.\n sample_dtype: `dtype` of samples produced by this distribution.\n Default value: `None` (i.e., previous layer's `dtype`).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(CategoricalMixtureOfOneHotCategorical, self).__init__(\n lambda t: type(self).new( # pylint: disable=g-long-lambda\n t, event_size, num_components, sample_dtype, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, event_size, num_components,\n dtype=None, validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical',\n [params, event_size, num_components]):\n dist = MixtureSameFamily.new(\n params,\n num_components,\n OneHotCategorical(\n event_size,\n validate_args=False, # So we can eval on simplex interior.\n name=name),\n validate_args=validate_args,\n name=name)\n # pylint: disable=protected-access\n dist._mean = functools.partial(\n _eval_all_one_hot, tfd.Distribution.prob, dist)\n dist.log_mean = functools.partial(\n _eval_all_one_hot, tfd.Distribution.log_prob, dist)\n # pylint: enable=protected-access\n return dist\n\n @staticmethod\n def params_size(event_size, num_components, name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n with tf.compat.v1.name_scope(\n name, 'CategoricalMixtureOfOneHotCategorical_params_size',\n [event_size, num_components]):\n return MixtureSameFamily.params_size(\n num_components,\n OneHotCategorical.params_size(event_size, name=name),\n name=name)\n\n\nclass IndependentBernoulli(DistributionLambda):\n \"\"\"An Independent-Bernoulli Keras layer from `prod(event_shape)` params.\n\n Typical choices for `convert_to_tensor_fn` include:\n\n - `tfd.Distribution.sample`\n - `tfd.Distribution.mean`\n - `tfd.Distribution.mode`\n - `tfd.Bernoulli.logits`\n\n\n #### Example\n\n ```python\n tfk = tf.keras\n tfkl = tf.keras.layers\n tfd = tfp.distributions\n tfpl = tfp.layers\n\n # Load data.\n n = int(1e4)\n scale_tril = np.array([[1.6180, 0.],\n [-2.7183, 3.1416]]).astype(np.float32)\n scale_noise = 0.01\n x = tfd.Normal(loc=0, scale=1).sample([n, 2])\n eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 2])\n y = tfd.Bernoulli(logits=tf.reshape(\n tf.matmul(x, scale_tril) + eps,\n shape=[n, 1, 2, 1])).sample()\n\n # Create model.\n event_shape = y.shape[1:].as_list()\n model = tfk.Sequential([\n tfkl.Dense(tfpl.IndependentBernoulli.params_size(event_shape)),\n tfpl.IndependentBernoulli(event_shape),\n ])\n\n # Fit.\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),\n loss=lambda y, model: -model.log_prob(y),\n metrics=[])\n batch_size = 100\n model.fit(x, y,\n batch_size=batch_size,\n epochs=10,\n steps_per_epoch=n // batch_size,\n shuffle=True)\n print(model.get_weights())\n # ==> [np.array([[1.6180, 0.],\n # [-2.7183, 3.1416]], np.float32),\n # array([0., 0.], np.float32)] # Within 15% rel. error.\n ```\n\n \"\"\"\n\n def __init__(self,\n event_shape=(),\n convert_to_tensor_fn=tfd.Distribution.sample,\n sample_dtype=None,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `IndependentBernoulli` layer.\n\n Args:\n event_shape: integer vector `Tensor` representing the shape of single\n draw from this distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object. For examples, see\n `class` docstring.\n Default value: `tfd.Distribution.sample`.\n sample_dtype: `dtype` of samples produced by this distribution.\n Default value: `None` (i.e., previous layer's `dtype`).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(IndependentBernoulli, self).__init__(\n lambda t: type(self).new(t, event_shape, sample_dtype, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, event_shape=(), dtype=None, validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentBernoulli',\n [params, event_shape]):\n params = tf.convert_to_tensor(value=params, name='params')\n event_shape = dist_util.expand_to_vector(\n tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32),\n tensor_name='event_shape')\n new_shape = tf.concat([\n tf.shape(input=params)[:-1],\n event_shape,\n ], axis=0)\n dist = tfd.Independent(\n tfd.Bernoulli(\n logits=tf.reshape(params, new_shape),\n dtype=dtype or params.dtype.base_dtype,\n validate_args=validate_args),\n reinterpreted_batch_ndims=tf.size(input=event_shape),\n validate_args=validate_args)\n dist._logits = dist.distribution._logits # pylint: disable=protected-access\n dist._probs = dist.distribution._probs # pylint: disable=protected-access\n dist.logits = tfd.Bernoulli.logits\n dist.probs = tfd.Bernoulli.probs\n return dist\n\n @staticmethod\n def params_size(event_shape=(), name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentBernoulli_params_size',\n [event_shape]):\n event_shape = tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32)\n return _event_size(\n event_shape, name=name or 'IndependentBernoulli_params_size')\n\n\ndef _eval_all_one_hot(fn, dist, name=None):\n \"\"\"OneHotCategorical helper computing probs, cdf, etc over its support.\"\"\"\n with tf.compat.v1.name_scope(name, 'eval_all_one_hot'):\n event_size = dist.event_shape_tensor()[-1]\n batch_ndims = tf.size(input=dist.batch_shape_tensor())\n # Reshape `eye(d)` to: `[d] + [1]*batch_ndims + [d]`.\n x = tf.reshape(\n tf.eye(event_size, dtype=dist.dtype),\n shape=tf.pad(\n tensor=tf.ones(batch_ndims, tf.int32),\n paddings=[[1, 1]],\n constant_values=event_size))\n # Compute `fn(x)` then cyclically left-transpose one dim.\n perm = tf.pad(tensor=tf.range(1, batch_ndims + 1), paddings=[[0, 1]])\n return tf.transpose(a=fn(dist, x), perm=perm)\n\n\nclass IndependentLogistic(DistributionLambda):\n \"\"\"An independent logistic Keras layer.\n\n ### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Create a stochastic encoder -- e.g., for use in a variational auto-encoder.\n input_shape = [28, 28, 1]\n encoded_shape = 2\n encoder = tfk.Sequential([\n tfkl.InputLayer(input_shape=input_shape),\n tfkl.Flatten(),\n tfkl.Dense(10, activation='relu'),\n tfkl.Dense(tfpl.IndependentLogistic.params_size(encoded_shape)),\n tfpl.IndependentLogistic(encoded_shape)\n ])\n ```\n\n \"\"\"\n\n def __init__(self,\n event_shape=(),\n convert_to_tensor_fn=tfd.Distribution.sample,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `IndependentLogistic` layer.\n\n Args:\n event_shape: integer vector `Tensor` representing the shape of single\n draw from this distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object.\n Default value: `tfd.Distribution.sample`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(IndependentLogistic, self).__init__(\n lambda t: type(self).new(t, event_shape, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, event_shape=(), validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentLogistic',\n [params, event_shape]):\n params = tf.convert_to_tensor(value=params, name='params')\n event_shape = dist_util.expand_to_vector(\n tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32),\n tensor_name='event_shape')\n output_shape = tf.concat([\n tf.shape(input=params)[:-1],\n event_shape,\n ],\n axis=0)\n loc_params, scale_params = tf.split(params, 2, axis=-1)\n return tfd.Independent(\n tfd.Logistic(\n loc=tf.reshape(loc_params, output_shape),\n scale=tf.math.softplus(tf.reshape(scale_params, output_shape)),\n validate_args=validate_args),\n reinterpreted_batch_ndims=tf.size(input=event_shape),\n validate_args=validate_args)\n\n @staticmethod\n def params_size(event_shape=(), name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentLogistic_params_size',\n [event_shape]):\n event_shape = tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32)\n return 2 * _event_size(\n event_shape, name=name or 'IndependentLogistic_params_size')\n\n\nclass IndependentNormal(DistributionLambda):\n \"\"\"An independent normal Keras layer.\n\n ### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Create a stochastic encoder -- e.g., for use in a variational auto-encoder.\n input_shape = [28, 28, 1]\n encoded_shape = 2\n encoder = tfk.Sequential([\n tfkl.InputLayer(input_shape=input_shape),\n tfkl.Flatten(),\n tfkl.Dense(10, activation='relu'),\n tfkl.Dense(tfpl.IndependentNormal.params_size(encoded_shape)),\n tfpl.IndependentNormal(encoded_shape)\n ])\n ```\n\n \"\"\"\n\n def __init__(self,\n event_shape=(),\n convert_to_tensor_fn=tfd.Distribution.sample,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `IndependentNormal` layer.\n\n Args:\n event_shape: integer vector `Tensor` representing the shape of single\n draw from this distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object.\n Default value: `tfd.Distribution.sample`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(IndependentNormal, self).__init__(\n lambda t: type(self).new(t, event_shape, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, event_shape=(), validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentNormal',\n [params, event_shape]):\n params = tf.convert_to_tensor(value=params, name='params')\n event_shape = dist_util.expand_to_vector(\n tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32),\n tensor_name='event_shape')\n output_shape = tf.concat([\n tf.shape(input=params)[:-1],\n event_shape,\n ],\n axis=0)\n loc_params, scale_params = tf.split(params, 2, axis=-1)\n return tfd.Independent(\n tfd.Normal(\n loc=tf.reshape(loc_params, output_shape),\n scale=tf.math.softplus(tf.reshape(scale_params, output_shape)),\n validate_args=validate_args),\n reinterpreted_batch_ndims=tf.size(input=event_shape),\n validate_args=validate_args)\n\n @staticmethod\n def params_size(event_shape=(), name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentNormal_params_size',\n [event_shape]):\n event_shape = tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32)\n return 2 * _event_size(\n event_shape, name=name or 'IndependentNormal_params_size')\n\n\nclass IndependentPoisson(DistributionLambda):\n \"\"\"An independent Poisson Keras layer.\n\n ### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Create example data.\n n = 2000\n d = 4\n x = tfd.Uniform(low=1., high=10.).sample([n, d])\n w = [[3.14], [2.72], [-1.62], [0.577]]\n log_rate = tf.matmul(x, w) - 0.141\n y = tfd.Poisson(log_rate=log_rate).sample()\n\n # Poisson regression model.\n model = tfk.Sequential([\n tfkl.Dense(tfpl.IndependentPoisson.params_size(1)),\n tfpl.IndependentPoisson(1)\n ])\n\n # Fit.\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.05),\n loss=lambda y, model: -model.log_prob(y),\n metrics=[])\n batch_size = 50\n model.fit(x, y,\n batch_size=batch_size,\n epochs=20,\n steps_per_epoch=n // batch_size,\n verbose=True,\n shuffle=True)\n print(model.get_weights())\n ```\n\n \"\"\"\n\n def __init__(self,\n event_shape=(),\n convert_to_tensor_fn=tfd.Distribution.sample,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `IndependentPoisson` layer.\n\n Args:\n event_shape: integer vector `Tensor` representing the shape of single\n draw from this distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object.\n Default value: `tfd.Distribution.sample`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(IndependentPoisson, self).__init__(\n lambda t: type(self).new(t, event_shape, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, event_shape=(), validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentPoisson',\n [params, event_shape]):\n params = tf.convert_to_tensor(value=params, name='params')\n event_shape = dist_util.expand_to_vector(\n tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32),\n tensor_name='event_shape')\n output_shape = tf.concat([\n tf.shape(input=params)[:-1],\n event_shape,\n ],\n axis=0)\n return tfd.Independent(\n tfd.Poisson(\n log_rate=tf.reshape(params, output_shape),\n validate_args=validate_args),\n reinterpreted_batch_ndims=tf.size(input=event_shape),\n validate_args=validate_args)\n\n @staticmethod\n def params_size(event_shape=(), name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n with tf.compat.v1.name_scope(name, 'IndependentPoisson_params_size',\n [event_shape]):\n event_shape = tf.convert_to_tensor(\n value=event_shape, name='event_shape', dtype_hint=tf.int32)\n return _event_size(\n event_shape, name=name or 'IndependentPoisson_params_size')\n\n\nclass KLDivergenceRegularizer(tf.keras.regularizers.Regularizer):\n \"\"\"Regularizer that adds a KL divergence penalty to the model loss.\n\n When using Monte Carlo approximation (e.g., `use_exact=False`), it is presumed\n that the input distribution's concretization (i.e.,\n `tf.convert_to_tensor(distribution)`) corresponds to a random sample. To\n override this behavior, set `test_points_fn`.\n\n #### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Create a variational encoder and add a KL Divergence penalty to the\n # loss that encourages marginal coherence with a unit-MVN (the \"prior\").\n input_shape = [28, 28, 1]\n encoded_size = 2\n variational_encoder = tfk.Sequential([\n tfkl.InputLayer(input_shape=input_shape),\n tfkl.Flatten(),\n tfkl.Dense(10, activation='relu'),\n tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size)),\n tfpl.MultivariateNormalTriL(\n encoded_size,\n lambda s: s.sample(10),\n activity_regularizer=tfpl.KLDivergenceRegularizer(\n tfd.MultivariateNormalDiag(loc=tf.zeros(encoded_size)),\n weight=num_train_samples)),\n ])\n ```\n\n \"\"\"\n\n def __init__(self,\n distribution_b,\n use_exact_kl=False,\n test_points_reduce_axis=(), # `None` == \"all\"; () == \"none\".\n test_points_fn=tf.convert_to_tensor,\n weight=None):\n \"\"\"Initialize the `KLDivergenceRegularizer` regularizer.\n\n Args:\n distribution_b: distribution instance corresponding to `b` as in\n `KL[a, b]`. The previous layer's output is presumed to be a\n `Distribution` instance and is `a`).\n use_exact_kl: Python `bool` indicating if KL divergence should be\n calculated exactly via `tfp.distributions.kl_divergence` or via Monte\n Carlo approximation.\n Default value: `False`.\n test_points_reduce_axis: `int` vector or scalar representing dimensions\n over which to `reduce_mean` while calculating the Monte Carlo\n approximation of the KL divergence. As is with all `tf.reduce_*` ops,\n `None` means reduce over all dimensions; `()` means reduce over none of\n them.\n Default value: `()` (i.e., no reduction).\n test_points_fn: Python `callable` taking a `Distribution` instance and\n returning a `Tensor` used for random test points to approximate the KL\n divergence.\n Default value: `tf.convert_to_tensor`.\n weight: Multiplier applied to the calculated KL divergence for each Keras\n batch member.\n Default value: `None` (i.e., do not weight each batch member).\n \"\"\"\n super(KLDivergenceRegularizer, self).__init__()\n self._kl_divergence_fn = _make_kl_divergence_fn(\n distribution_b,\n use_exact_kl=use_exact_kl,\n test_points_reduce_axis=test_points_reduce_axis,\n test_points_fn=test_points_fn,\n weight=weight)\n\n def __call__(self, distribution_a):\n # TODO(b/126056144): Remove reacquisition of distribution handle once we\n # identify how/why Keras lost it.\n if hasattr(distribution_a, '_tfp_distribution'):\n distribution_a = distribution_a._tfp_distribution # pylint: disable=protected-access\n return self._kl_divergence_fn(distribution_a)\n\n\n# TODO(b/120307671): Once this bug is resolved, consider deprecating\n# `KLDivergenceAddLoss` and instead having users do:\n# `activity_regularizer=tfp.layers.KLDivergenceRegularizer`\n\n\nclass KLDivergenceAddLoss(tf.keras.layers.Layer):\n \"\"\"Pass-through layer that adds a KL divergence penalty to the model loss.\n\n When using Monte Carlo approximation (e.g., `use_exact=False`), it is presumed\n that the input distribution's concretization (i.e.,\n `tf.convert_to_tensor(distribution)`) corresponds to a random sample. To\n override this behavior, set `test_points_fn`.\n\n #### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Create a variational encoder and add a KL Divergence penalty to the\n # loss that encourages marginal coherence with a unit-MVN (the \"prior\").\n input_shape = [28, 28, 1]\n encoded_size = 2\n variational_encoder = tfk.Sequential([\n tfkl.InputLayer(input_shape=input_shape),\n tfkl.Flatten(),\n tfkl.Dense(10, activation='relu'),\n tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size)),\n tfpl.MultivariateNormalTriL(encoded_size, lambda s: s.sample(10)),\n tfpl.KLDivergenceAddLoss(\n tfd.MultivariateNormalDiag(loc=tf.zeros(encoded_size)),\n weight=num_train_samples),\n ])\n ```\n\n \"\"\"\n\n def __init__(self,\n distribution_b,\n use_exact_kl=False,\n test_points_reduce_axis=None,\n test_points_fn=tf.convert_to_tensor,\n weight=None,\n **kwargs):\n \"\"\"Initialize the `KLDivergenceAddLoss` (placeholder) layer.\n\n Args:\n distribution_b: distribution instance corresponding to `b` as in\n `KL[a, b]`. The previous layer's output is presumed to be a\n `Distribution` instance and is `a`).\n use_exact_kl: Python `bool` indicating if KL divergence should be\n calculated exactly via `tfp.distributions.kl_divergence` or via Monte\n Carlo approximation.\n Default value: `False`.\n test_points_reduce_axis: `int` vector or scalar representing dimensions\n over which to `reduce_mean` while calculating the Monte Carlo\n approximation of the KL divergence. As is with all `tf.reduce_*` ops,\n `None` means reduce over all dimensions; `()` means reduce over none of\n them.\n Default value: `()` (i.e., no reduction).\n test_points_fn: Python `callable` taking a `Distribution` instance and\n returning a `Tensor` used for random test points to approximate the KL\n divergence.\n Default value: `tf.convert_to_tensor`.\n weight: Multiplier applied to the calculated KL divergence for each Keras\n batch member.\n Default value: `None` (i.e., do not weight each batch member).\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(KLDivergenceAddLoss, self).__init__(**kwargs)\n self.is_placeholder = True\n # TODO(b/120307671): Call `_make_kl_divergence_fn` directly once this bug is\n # closed. Chaining things this way means we can have just one unit-test for\n # both `KLDivergenceAddLoss` and `KLDivergenceRegularizer`. That this is\n # good is because its not possible to idiomatically test\n # `KLDivergenceRegularizer` because of b/120307671.\n self._kl_divergence_fn = KLDivergenceRegularizer(\n distribution_b,\n use_exact_kl=use_exact_kl,\n test_points_reduce_axis=test_points_reduce_axis,\n test_points_fn=test_points_fn,\n weight=weight).__call__\n\n def call(self, distribution_a):\n self.add_loss(self._kl_divergence_fn(distribution_a),\n inputs=[distribution_a])\n return distribution_a\n\n\ndef _make_kl_divergence_fn(\n distribution_b,\n use_exact_kl=False,\n test_points_reduce_axis=(), # `None` == \"all\"; () == \"none\".\n test_points_fn=tf.convert_to_tensor,\n weight=None):\n \"\"\"Creates a callable computing `KL[a,b]` from `a`, a `tfd.Distribution`.\"\"\"\n\n if use_exact_kl is None:\n kl_divergence_fn = tfd.kl_divergence\n else:\n # Closure over: test_points_fn, test_points_reduce_axis.\n def kl_divergence_fn(distribution_a, distribution_b):\n z = test_points_fn(distribution_a)\n return tf.reduce_mean(\n input_tensor=distribution_a.log_prob(z) - distribution_b.log_prob(z),\n axis=test_points_reduce_axis)\n\n # Closure over: distribution_b, kl_divergence_fn, weight.\n def _fn(distribution_a):\n \"\"\"Closure that computes KLDiv as a function of `a` as in `KL[a, b]`.\"\"\"\n with tf.compat.v1.name_scope('kldivergence_loss'):\n # TODO(b/119756336): Due to eager/graph Jacobian graph caching bug\n # we add here the capability for deferred construction of the prior.\n # This capability can probably be removed once b/119756336 is resolved.\n distribution_b_ = (distribution_b() if callable(distribution_b)\n else distribution_b)\n kl = kl_divergence_fn(distribution_a, distribution_b_)\n if weight is not None:\n kl = tf.cast(weight, dtype=kl.dtype) * kl\n # Losses appended with the model.add_loss and are expected to be a single\n # scalar, unlike model.loss, which is expected to be the loss per sample.\n # Therefore, we reduce over all dimensions, regardless of the shape.\n # We take the sum because (apparently) Keras will add this to the *post*\n # `reduce_sum` (total) loss.\n # TODO(b/126259176): Add end-to-end Keras/TFP test to ensure the API's\n # align, particularly wrt how losses are aggregated (across batch\n # members).\n return tf.reduce_sum(input_tensor=kl, name='batch_total_kl_divergence')\n\n return _fn\n\n\nclass MixtureSameFamily(DistributionLambda):\n \"\"\"A mixture (same-family) Keras layer.\n\n ### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Load data -- graph of a [cardioid](https://en.wikipedia.org/wiki/Cardioid).\n n = 2000\n t = tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1])\n r = 2 * (1 - tf.cos(t))\n x = r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])\n y = r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])\n\n # Model the distribution of y given x with a Mixture Density Network.\n event_shape = [1]\n num_components = 5\n params_size = tfpl.MixtureSameFamily.params_size(\n num_components,\n component_params_size=tfpl.IndependentNormal.params_size(event_shape))\n model = tfk.Sequential([\n tfkl.Dense(12, activation='relu'),\n tfkl.Dense(params_size, activation=None),\n tfpl.MixtureSameFamily(num_components, tfpl.IndependentNormal(event_shape)),\n ])\n\n # Fit.\n batch_size = 100\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.02),\n loss=lambda y, model: -model.log_prob(y))\n model.fit(x, y,\n batch_size=batch_size,\n epochs=20,\n steps_per_epoch=n // batch_size)\n ```\n\n \"\"\"\n\n def __init__(self,\n num_components,\n component_layer,\n convert_to_tensor_fn=tfd.Distribution.sample,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `MixtureSameFamily` distribution layer.\n\n Args:\n num_components: Number of component distributions in the mixture\n distribution.\n component_layer: Python `callable` that, given a tensor of shape\n `batch_shape + [num_components, component_params_size]`, returns a\n `tfd.Distribution`-like instance that implements the component\n distribution (with batch shape `batch_shape + [num_components]`) --\n e.g., a TFP distribution layer.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object.\n Default value: `tfd.Distribution.sample`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(MixtureSameFamily, self).__init__(\n lambda t: type(self).new( # pylint: disable=g-long-lambda\n t, num_components, component_layer, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, num_components, component_layer,\n validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n with tf.compat.v1.name_scope(name, 'MixtureSameFamily',\n [params, num_components, component_layer]):\n params = tf.convert_to_tensor(value=params, name='params')\n num_components = tf.convert_to_tensor(\n value=num_components, name='num_components', dtype_hint=tf.int32)\n\n components_dist = component_layer(\n tf.reshape(\n params[..., num_components:],\n tf.concat([tf.shape(input=params)[:-1], [num_components, -1]],\n axis=0)))\n mixture_dist = tfd.Categorical(logits=params[..., :num_components])\n return tfd.MixtureSameFamily(\n mixture_dist,\n components_dist,\n # TODO(b/120154797): Change following to `validate_args=True` after\n # fixing: \"ValueError: `mixture_distribution` must have scalar\n # `event_dim`s.\" assertion in MixtureSameFamily.\n validate_args=False)\n\n @staticmethod\n def params_size(num_components, component_params_size, name=None):\n \"\"\"Number of `params` needed to create a `MixtureSameFamily` distribution.\n\n Arguments:\n num_components: Number of component distributions in the mixture\n distribution.\n component_params_size: Number of parameters needed to create a single\n component distribution.\n name: The name to use for the op to compute the number of parameters\n (if such an op needs to be created).\n\n Returns:\n params_size: The number of parameters needed to create the mixture\n distribution.\n \"\"\"\n with tf.compat.v1.name_scope(name, 'MixtureSameFamily_params_size',\n [num_components, component_params_size]):\n num_components = tf.convert_to_tensor(\n value=num_components, name='num_components', dtype_hint=tf.int32)\n component_params_size = tf.convert_to_tensor(\n value=component_params_size, name='component_params_size')\n\n num_components = dist_util.prefer_static_value(num_components)\n component_params_size = dist_util.prefer_static_value(\n component_params_size)\n\n return num_components + num_components * component_params_size\n\n\nclass MixtureNormal(DistributionLambda):\n \"\"\"A mixture distribution Keras layer, with independent normal components.\n\n ### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Load data -- graph of a [cardioid](https://en.wikipedia.org/wiki/Cardioid).\n n = 2000\n t = tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1])\n r = 2 * (1 - tf.cos(t))\n x = r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])\n y = r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])\n\n # Model the distribution of y given x with a Mixture Density Network.\n event_shape = [1]\n num_components = 5\n params_size = tfpl.MixtureNormal.params_size(num_components, event_shape)\n model = tfk.Sequential([\n tfkl.Dense(12, activation='relu'),\n tfkl.Dense(params_size, activation=None),\n tfpl.MixtureNormal(num_components, event_shape)\n ])\n\n # Fit.\n batch_size = 100\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.02),\n loss=lambda y, model: -model.log_prob(y))\n model.fit(x, y,\n batch_size=batch_size,\n epochs=20,\n steps_per_epoch=n // batch_size)\n ```\n\n \"\"\"\n\n def __init__(self,\n num_components,\n event_shape=(),\n convert_to_tensor_fn=tfd.Distribution.sample,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `MixtureNormal` distribution layer.\n\n Args:\n num_components: Number of component distributions in the mixture\n distribution.\n event_shape: integer vector `Tensor` representing the shape of single\n draw from this distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object.\n Default value: `tfd.Distribution.sample`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(MixtureNormal, self).__init__(\n lambda t: type(self).new(t, num_components, event_shape, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, num_components, event_shape=(),\n validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n return MixtureSameFamily.new(\n params,\n num_components,\n IndependentNormal(event_shape, validate_args=validate_args, name=name),\n validate_args=validate_args,\n name=name)\n\n @staticmethod\n def params_size(num_components, event_shape=(), name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n return MixtureSameFamily.params_size(\n num_components,\n IndependentNormal.params_size(event_shape, name=name),\n name=name)\n\n\nclass MixtureLogistic(DistributionLambda):\n \"\"\"A mixture distribution Keras layer, with independent logistic components.\n\n ### Example\n\n ```python\n tfd = tfp.distributions\n tfpl = tfp.layers\n tfk = tf.keras\n tfkl = tf.keras.layers\n\n # Load data -- graph of a [cardioid](https://en.wikipedia.org/wiki/Cardioid).\n n = 2000\n t = tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1])\n r = 2 * (1 - tf.cos(t))\n x = r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])\n y = r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])\n\n # Model the distribution of y given x with a Mixture Density Network.\n event_shape = [1]\n num_components = 5\n params_size = tfpl.MixtureLogistic.params_size(num_components, event_shape)\n model = tfk.Sequential([\n tfkl.Dense(12, activation='relu'),\n tfkl.Dense(params_size, activation=None),\n tfpl.MixtureLogistic(num_components, event_shape)\n ])\n\n # Fit.\n batch_size = 100\n model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.02),\n loss=lambda y, model: -model.log_prob(y))\n model.fit(x, y,\n batch_size=batch_size,\n epochs=20,\n steps_per_epoch=n // batch_size)\n ```\n\n \"\"\"\n\n def __init__(self,\n num_components,\n event_shape=(),\n convert_to_tensor_fn=tfd.Distribution.sample,\n validate_args=False,\n **kwargs):\n \"\"\"Initialize the `MixtureLogistic` distribution layer.\n\n Args:\n num_components: Number of component distributions in the mixture\n distribution.\n event_shape: integer vector `Tensor` representing the shape of single\n draw from this distribution.\n convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n instance and returns a `tf.Tensor`-like object.\n Default value: `tfd.Distribution.sample`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n \"\"\"\n super(MixtureLogistic, self).__init__(\n lambda t: type(self).new(t, num_components, event_shape, validate_args),\n convert_to_tensor_fn,\n **kwargs)\n\n @staticmethod\n def new(params, num_components, event_shape=(),\n validate_args=False, name=None):\n \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n return MixtureSameFamily.new(\n params,\n num_components,\n IndependentLogistic(\n event_shape, validate_args=validate_args, name=name),\n validate_args=validate_args,\n name=name)\n\n @staticmethod\n def params_size(num_components, event_shape=(), name=None):\n \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n return MixtureSameFamily.params_size(\n num_components,\n IndependentLogistic.params_size(event_shape, name=name),\n name=name)\n\n\nclass VariationalGaussianProcess(DistributionLambda):\n \"\"\"A VariationalGaussianProcess Layer.\n\n Create a VariationalGaussianProcess distribtuion whose `index_points` are the\n inputs to the layer. Parameterized by number of inducing points and a\n `kernel_provider`, which should be a `tf.keras.Layer` with an @property that\n late-binds variable parameters to a\n `tfp.positive_semidefinite_kernel.PositiveSemidefiniteKernel` instance (this\n requirement has to do with the way that variables must be created in a keras\n model). The `mean_fn` is an optional argument which, if omitted, will be\n automatically configured to be a constant function with trainable variable\n output.\n \"\"\"\n\n def __init__(\n self,\n num_inducing_points,\n kernel_provider,\n event_shape=(1,),\n inducing_index_points_initializer=None,\n mean_fn=None,\n jitter=1e-6,\n name=None):\n \"\"\"Construct a VariationalGaussianProcess Layer.\n\n Args:\n num_inducing_points: number of inducing points in the\n VariationalGaussianProcess distribution.\n kernel_provider: a `Layer` instance equipped with an @property, which\n yields a `PositiveSemidefiniteKernel` instance. The latter is used to\n parameterize the constructed VariationalGaussianProcess distribution\n returned by calling the layer.\n event_shape: the shape of the output of the layer. This translates to a\n batch of underlying VariationalGaussianProcess distribtuions. For\n example, `event_shape = [3]` means we are modeling a batch of 3\n distributions over functions. We can think of this as a distrbution over\n 3-dimensional vector-valued functions.\n inducing_index_points_initializer: a `tf.keras.initializer.Initializer`\n used to initialize the trainable `inducing_index_points` variables.\n Training VGP's is pretty sensitive to choice of initial inducing index\n point locations. A reasonable heuristic is to scatter them near the\n data, not too close to each other.\n mean_fn: a callable that maps layer inputs to mean function values. Passed\n to the mean_fn parameter of VariationalGaussianProcess distribution. If\n omitted, defaults to a constant function with trainable variable value.\n jitter: a small term added to the diagonal of various kernel matrices for\n numerical stability.\n name: name to give to this layer and the scope of ops and variables it\n contains.\n \"\"\"\n super(VariationalGaussianProcess, self).__init__(\n lambda x: VariationalGaussianProcess.new( # pylint: disable=g-long-lambda\n x,\n kernel_provider=self._kernel_provider,\n event_shape=self._event_shape,\n inducing_index_points=self._inducing_index_points,\n variational_inducing_observations_loc=(\n self._variational_inducing_observations_loc),\n variational_inducing_observations_scale=(\n self._variational_inducing_observations_scale),\n mean_fn=self._mean_fn,\n observation_noise_variance=tf.nn.softplus(\n self._unconstrained_observation_noise_variance),\n jitter=self._jitter))\n\n tmp_kernel = kernel_provider.kernel\n self._dtype = tmp_kernel.dtype.as_numpy_dtype\n self._feature_ndims = tmp_kernel.feature_ndims\n self._num_inducing_points = num_inducing_points\n self._event_shape = tf.TensorShape(event_shape)\n self._mean_fn = mean_fn\n self._jitter = jitter\n self._inducing_index_points_initializer = inducing_index_points_initializer\n self._kernel_provider = kernel_provider\n\n def build(self, input_shape):\n input_feature_shape = input_shape[-self._feature_ndims:]\n\n inducing_index_points_shape = (\n self._event_shape.as_list() +\n [self._num_inducing_points] +\n input_feature_shape.as_list())\n\n if self._mean_fn is None:\n self.mean = self.add_variable(\n initializer=tf.compat.v1.initializers.constant([0.]),\n dtype=self._dtype,\n name='mean')\n self._mean_fn = lambda x: self.mean\n\n self._unconstrained_observation_noise_variance = self.add_variable(\n initializer=tf.compat.v1.initializers.constant(-10.),\n dtype=self._dtype,\n name='observation_noise_variance')\n\n self._inducing_index_points = self.add_variable(\n name='inducing_index_points',\n shape=inducing_index_points_shape,\n initializer=self._inducing_index_points_initializer,\n dtype=self._dtype)\n\n self._variational_inducing_observations_loc = self.add_variable(\n name='variational_inducing_observations_loc',\n shape=self._event_shape.as_list() + [self._num_inducing_points],\n initializer=tf.compat.v1.initializers.zeros(),\n dtype=self._dtype)\n\n eyes = (np.ones(self._event_shape.as_list() + [1, 1]) *\n np.eye(self._num_inducing_points, dtype=self._dtype))\n self._variational_inducing_observations_scale = self.add_variable(\n name='variational_inducing_observations_scale',\n shape=(self._event_shape.as_list() +\n [self._num_inducing_points, self._num_inducing_points]),\n initializer=tf.compat.v1.initializers.constant(1e-5 * eyes))\n\n @staticmethod\n def new(x,\n kernel_provider,\n event_shape,\n inducing_index_points,\n mean_fn,\n variational_inducing_observations_loc,\n variational_inducing_observations_scale,\n observation_noise_variance,\n jitter=1e-6,\n name=None):\n vgp = tfd.VariationalGaussianProcess(\n kernel=kernel_provider.kernel,\n index_points=x,\n inducing_index_points=inducing_index_points,\n variational_inducing_observations_loc=(\n variational_inducing_observations_loc),\n variational_inducing_observations_scale=(\n variational_inducing_observations_scale),\n mean_fn=mean_fn,\n observation_noise_variance=observation_noise_variance,\n jitter=jitter)\n ind = tfd.Independent(vgp, reinterpreted_batch_ndims=1)\n bij = tfb.Transpose(rightmost_transposed_ndims=2)\n d = tfd.TransformedDistribution(ind, bijector=bij)\n def _transposed_variational_loss(y, kl_weight=1.):\n loss = vgp.variational_loss(bij.forward(y), kl_weight=kl_weight)\n return loss\n d.variational_loss = _transposed_variational_loss\n return d\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for the Independent distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util as tfp_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\ndef try_import(name): # pylint: disable=invalid-name\n module = None\n try:\n module = importlib.import_module(name)\n except ImportError as e:\n tf.compat.v1.logging.warning(\"Could not import %s: %s\" % (name, str(e)))\n return module\n\nstats = try_import(\"scipy.stats\")\n\ntfd = tfp.distributions\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ProductDistributionTest(tf.test.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def testSampleAndLogProbUnivariate(self):\n loc = np.float32([-1., 1])\n scale = np.float32([0.1, 0.5])\n ind = tfd.Independent(\n distribution=tfd.Normal(loc=loc, scale=scale),\n reinterpreted_batch_ndims=1)\n\n x = ind.sample([4, 5], seed=tfp_test_util.test_seed(hardcoded_seed=42))\n log_prob_x = ind.log_prob(x)\n x_, actual_log_prob_x = self.evaluate([x, log_prob_x])\n\n self.assertEqual([], ind.batch_shape)\n self.assertEqual([2], ind.event_shape)\n self.assertEqual([4, 5, 2], x.shape)\n self.assertEqual([4, 5], log_prob_x.shape)\n\n expected_log_prob_x = stats.norm(loc, scale).logpdf(x_).sum(-1)\n self.assertAllClose(\n expected_log_prob_x, actual_log_prob_x, rtol=1e-5, atol=0.)\n\n def testSampleAndLogProbMultivariate(self):\n loc = np.float32([[-1., 1], [1, -1]])\n scale = np.float32([1., 0.5])\n ind = tfd.Independent(\n distribution=tfd.MultivariateNormalDiag(\n loc=loc, scale_identity_multiplier=scale),\n reinterpreted_batch_ndims=1)\n\n x = ind.sample([4, 5], seed=tfp_test_util.test_seed())\n log_prob_x = ind.log_prob(x)\n x_, actual_log_prob_x = self.evaluate([x, log_prob_x])\n\n self.assertEqual([], ind.batch_shape)\n self.assertEqual([2, 2], ind.event_shape)\n self.assertEqual([4, 5, 2, 2], x.shape)\n self.assertEqual([4, 5], log_prob_x.shape)\n\n expected_log_prob_x = stats.norm(loc,\n scale[:, None]).logpdf(x_).sum(-1).sum(-1)\n self.assertAllClose(\n expected_log_prob_x, actual_log_prob_x, rtol=1e-6, atol=0.)\n\n def testCdfMultivariate(self):\n ind = tfd.Independent(\n distribution=tfd.Normal(loc=tf.zeros([3]), scale=1.),\n reinterpreted_batch_ndims=1)\n\n cdfs = ind.cdf([[-50., 0., 0.], [0., 0., 0.], [50., 0., 0.], [50., 0., 50.],\n [50., 50., 50.]])\n log_cdfs = ind.log_cdf([[0., 0., 0.], [50., 0., 0.], [50., 0., 50.],\n [50., 50., 50.]])\n cdfs_, log_cdfs_ = self.evaluate([cdfs, log_cdfs])\n self.assertAllClose([0, .5**3, .5**2, .5, 1.], cdfs_)\n self.assertAllClose([np.log(.5) * 3, np.log(.5) * 2, np.log(.5), 0.],\n log_cdfs_)\n\n def testSampleConsistentStats(self):\n loc = np.float32([[-1., 1], [1, -1]])\n scale = np.float32([1., 0.5])\n n_samp = 1e4\n ind = tfd.Independent(\n distribution=tfd.MultivariateNormalDiag(\n loc=loc, scale_identity_multiplier=scale),\n reinterpreted_batch_ndims=1)\n\n x = ind.sample(int(n_samp), seed=tfp_test_util.test_seed(hardcoded_seed=42))\n sample_mean = tf.reduce_mean(input_tensor=x, axis=0)\n sample_var = tf.reduce_mean(\n input_tensor=tf.math.squared_difference(x, sample_mean), axis=0)\n sample_std = tf.sqrt(sample_var)\n sample_entropy = -tf.reduce_mean(input_tensor=ind.log_prob(x), axis=0)\n\n [\n sample_mean_,\n sample_var_,\n sample_std_,\n sample_entropy_,\n actual_mean_,\n actual_var_,\n actual_std_,\n actual_entropy_,\n actual_mode_,\n ] = self.evaluate([\n sample_mean,\n sample_var,\n sample_std,\n sample_entropy,\n ind.mean(),\n ind.variance(),\n ind.stddev(),\n ind.entropy(),\n ind.mode(),\n ])\n\n self.assertAllClose(sample_mean_, actual_mean_, rtol=0.02, atol=0.)\n self.assertAllClose(sample_var_, actual_var_, rtol=0.04, atol=0.)\n self.assertAllClose(sample_std_, actual_std_, rtol=0.02, atol=0.)\n self.assertAllClose(sample_entropy_, actual_entropy_, rtol=0.01, atol=0.)\n self.assertAllClose(loc, actual_mode_, rtol=1e-6, atol=0.)\n\n def test_event_ndims_is_static_when_possible(self):\n ind = tfd.Independent(\n distribution=tfd.Normal(\n loc=tf.compat.v1.placeholder_with_default(input=[2.], shape=None),\n scale=tf.compat.v1.placeholder_with_default(input=1., shape=None)),\n reinterpreted_batch_ndims=1)\n # Even though `event_shape` is not static, event_ndims must equal\n # `reinterpreted_batch_ndims + distribution.event_shape.ndims`.\n self.assertEqual(ind.event_shape.ndims, 1)\n\n def testKLRaises(self):\n ind1 = tfd.Independent(\n distribution=tfd.Normal(\n loc=np.float32([-1., 1]), scale=np.float32([0.1, 0.5])),\n reinterpreted_batch_ndims=1)\n ind2 = tfd.Independent(\n distribution=tfd.Normal(\n loc=np.float32(-1), scale=np.float32(0.5)),\n reinterpreted_batch_ndims=0)\n\n with self.assertRaisesRegexp(\n ValueError, \"Event shapes do not match\"):\n tfd.kl_divergence(ind1, ind2)\n\n ind1 = tfd.Independent(\n distribution=tfd.Normal(\n loc=np.float32([-1., 1]), scale=np.float32([0.1, 0.5])),\n reinterpreted_batch_ndims=1)\n ind2 = tfd.Independent(\n distribution=tfd.MultivariateNormalDiag(\n loc=np.float32([-1., 1]), scale_diag=np.float32([0.1, 0.5])),\n reinterpreted_batch_ndims=0)\n\n with self.assertRaisesRegexp(\n NotImplementedError, \"different event shapes\"):\n tfd.kl_divergence(ind1, ind2)\n\n def testKLScalarToMultivariate(self):\n normal1 = tfd.Normal(\n loc=np.float32([-1., 1]), scale=np.float32([0.1, 0.5]))\n ind1 = tfd.Independent(distribution=normal1, reinterpreted_batch_ndims=1)\n\n normal2 = tfd.Normal(\n loc=np.float32([-3., 3]), scale=np.float32([0.3, 0.3]))\n ind2 = tfd.Independent(distribution=normal2, reinterpreted_batch_ndims=1)\n\n normal_kl = tfd.kl_divergence(normal1, normal2)\n ind_kl = tfd.kl_divergence(ind1, ind2)\n self.assertAllClose(\n self.evaluate(tf.reduce_sum(input_tensor=normal_kl, axis=-1)),\n self.evaluate(ind_kl))\n\n def testKLIdentity(self):\n normal1 = tfd.Normal(\n loc=np.float32([-1., 1]), scale=np.float32([0.1, 0.5]))\n # This is functionally just a wrapper around normal1,\n # and doesn't change any outputs.\n ind1 = tfd.Independent(distribution=normal1, reinterpreted_batch_ndims=0)\n\n normal2 = tfd.Normal(\n loc=np.float32([-3., 3]), scale=np.float32([0.3, 0.3]))\n # This is functionally just a wrapper around normal2,\n # and doesn't change any outputs.\n ind2 = tfd.Independent(distribution=normal2, reinterpreted_batch_ndims=0)\n\n normal_kl = tfd.kl_divergence(normal1, normal2)\n ind_kl = tfd.kl_divergence(ind1, ind2)\n self.assertAllClose(\n self.evaluate(normal_kl), self.evaluate(ind_kl))\n\n def testKLMultivariateToMultivariate(self):\n # (1, 1, 2) batch of MVNDiag\n mvn1 = tfd.MultivariateNormalDiag(\n loc=np.float32([[[[-1., 1, 3.], [2., 4., 3.]]]]),\n scale_diag=np.float32([[[0.2, 0.1, 5.], [2., 3., 4.]]]))\n ind1 = tfd.Independent(distribution=mvn1, reinterpreted_batch_ndims=2)\n\n # (1, 1, 2) batch of MVNDiag\n mvn2 = tfd.MultivariateNormalDiag(\n loc=np.float32([[[[-2., 3, 2.], [1., 3., 2.]]]]),\n scale_diag=np.float32([[[0.1, 0.5, 3.], [1., 2., 1.]]]))\n\n ind2 = tfd.Independent(distribution=mvn2, reinterpreted_batch_ndims=2)\n\n mvn_kl = tfd.kl_divergence(mvn1, mvn2)\n ind_kl = tfd.kl_divergence(ind1, ind2)\n self.assertAllClose(\n self.evaluate(tf.reduce_sum(input_tensor=mvn_kl, axis=[-1, -2])),\n self.evaluate(ind_kl))\n\n def _testMnistLike(self, static_shape):\n sample_shape = [4, 5]\n batch_shape = [10]\n image_shape = [28, 28, 1]\n logits = 3 * self._rng.random_sample(\n batch_shape + image_shape).astype(np.float32) - 1\n\n def expected_log_prob(x, logits):\n return (x * logits - np.log1p(np.exp(logits))).sum(-1).sum(-1).sum(-1)\n\n logits_ph = tf.compat.v1.placeholder_with_default(\n input=logits, shape=logits.shape if static_shape else None)\n ind = tfd.Independent(\n distribution=tfd.Bernoulli(logits=logits_ph))\n x = ind.sample(sample_shape, seed=tfp_test_util.test_seed())\n log_prob_x = ind.log_prob(x)\n [\n x_,\n actual_log_prob_x,\n ind_batch_shape,\n ind_event_shape,\n x_shape,\n log_prob_x_shape,\n ] = self.evaluate([\n x,\n log_prob_x,\n ind.batch_shape_tensor(),\n ind.event_shape_tensor(),\n tf.shape(input=x),\n tf.shape(input=log_prob_x),\n ])\n\n if static_shape:\n ind_batch_shape = ind.batch_shape\n ind_event_shape = ind.event_shape\n x_shape = x.shape\n log_prob_x_shape = log_prob_x.shape\n\n self.assertAllEqual(batch_shape, ind_batch_shape)\n self.assertAllEqual(image_shape, ind_event_shape)\n self.assertAllEqual(sample_shape + batch_shape + image_shape, x_shape)\n self.assertAllEqual(sample_shape + batch_shape, log_prob_x_shape)\n self.assertAllClose(\n expected_log_prob(x_, logits), actual_log_prob_x, rtol=1e-6, atol=0.)\n\n def testMnistLikeStaticShape(self):\n self._testMnistLike(static_shape=True)\n\n def testMnistLikeDynamicShape(self):\n self._testMnistLike(static_shape=False)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.where",
"tensorflow.math.lgamma",
"tensorflow.debugging.assert_same_float_dtype",
"tensorflow.math.igammac",
"tensorflow.broadcast_static_shape",
"tensorflow.square",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.compat.v1.name_scope",
"tensorflow.TensorShape",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.constant",
"tensorflow.random.gamma",
"tensorflow.ones",
"tensorflow.math.log",
"tensorflow.math.digamma",
"tensorflow.compat.v1.assert_positive",
"tensorflow.nn.softplus"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.reduce_sum",
"tensorflow.compat.v1.initializers.zeros",
"tensorflow.cast",
"tensorflow.compat.v1.initializers.constant",
"numpy.eye",
"tensorflow.compat.v1.name_scope",
"tensorflow.TensorShape",
"tensorflow.python.keras.utils.tf_utils.register_symbolic_tensor_type",
"tensorflow.shape",
"tensorflow.reduce_prod",
"tensorflow.get_static_value",
"tensorflow.split",
"tensorflow.size",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.eye",
"tensorflow.ones",
"numpy.prod",
"tensorflow.nn.softplus"
],
[
"numpy.log",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.zeros",
"tensorflow.math.squared_difference",
"tensorflow.test.main",
"numpy.float32",
"numpy.exp",
"tensorflow.sqrt",
"numpy.random.RandomState",
"tensorflow.compat.v1.placeholder_with_default"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brynhayder/metapop | [
"2a5f25a904cba7133c398c9ce7fff6ad7a5d8705"
] | [
"src/plot.py"
] | [
"from argparse import ArgumentParser\nimport os\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nfilenames = \"susceptible.csv exposed.csv infected.csv recovered.csv\".split()\n\n\ndef title(ax, region):\n return ax.set_title(region, x=0.95, y=0.9, ha=\"right\", va=\"top\")\n\n\ndef legend(fig, ax):\n lins, labs = ax.get_legend_handles_labels()\n return fig.legend(\n lins, labs, ncol=len(labs), bbox_to_anchor=(0.5, 0.05), loc=\"center\"\n )\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Plot Results\")\n parser.add_argument(\n \"folder\",\n type=str,\n help=(\n \"Path to csv file to plot.\"\n \" Must contain files susceptible.csv, exposed.csv,\"\n \" infected.csv and recovered.csv.\"\n \" These files should be of csv type, comma delimited\"\n \" and with the same number of columns (the regions).\"\n \" The first row will be read as region names.\"\n \" We will assume that there is no index column.\"\n ),\n )\n parser.add_argument(\n \"--rt\",\n type=str,\n help=(\n \"Path to rt csv used for simulation.\"\n \" If given we will plot the R_t timeseries.\"\n ),\n default=None,\n )\n args = parser.parse_args()\n\n outputs = pd.concat(\n {\n k.replace(\".csv\", \"\"): pd.read_csv(\n os.path.join(args.folder, k), header=None\n )\n for k in filenames\n },\n axis=1,\n ).swaplevel(axis=1)\n\n regions = outputs.columns.levels[0]\n\n if args.rt is not None:\n rt = pd.read_csv(os.path.join(args.rt), header=None)\n npop = outputs.groupby(level=0, axis=1).sum()\n rts = rt * outputs.swaplevel(axis=1)[\"susceptible\"] / npop\n xaxis = outputs.index\n fig, axarr = plt.subplots(len(regions), 1, sharex=True, squeeze=False)\n for ax, region in zip(axarr.flat, regions):\n ax.plot(xaxis, rts[region], label=\"R_t\", zorder=100)\n ax.plot(xaxis[:-1], rt[region], label=\"R_0\", alpha=0.5)\n ax.axhline(1, ls=\"--\", alpha=0.5, label=\"R_t=1\", color=\"k\")\n ax.set_ylabel(\"Reproduction\")\n ax.set_xlabel(\"Days\")\n ax.grid(alpha=0.25)\n title(ax, region)\n legend(fig, ax)\n plt.tight_layout()\n plt.subplots_adjust(hspace=0.1)\n\n fig, axarr = plt.subplots(len(regions), 1, sharex=True, sharey=False, squeeze=False)\n for ax, region in zip(axarr.flat, regions):\n title(ax, region)\n outputs[region]['infected'].plot(ax=ax, legend=False)\n ax.set_ylabel(\"Population\")\n ax.grid(alpha=0.2)\n ax.set_xlabel(\"Timesteps\")\n legend(fig, ax)\n plt.subplots_adjust(hspace=0.05)\n\n plt.show()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dequadras/pandas | [
"8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa",
"8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa",
"8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa"
] | [
"pandas/tests/io/generate_legacy_storage_files.py",
"pandas/tests/indexing/test_floats.py",
"pandas/io/json/_normalize.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\nself-contained to write legacy storage pickle files\n\nTo use this script. Create an environment where you want\ngenerate pickles, say its for 0.20.3, with your pandas clone\nin ~/pandas\n\n. activate pandas_0.20.3\ncd ~/\n\n$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \\\n pandas/pandas/tests/io/data/legacy_pickle/0.20.3/ pickle\n\nThis script generates a storage file for the current arch, system,\nand python version\n pandas version: 0.20.3\n output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/\n storage format: pickle\ncreated pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle\n\nThe idea here is you are using the *current* version of the\ngenerate_legacy_storage_files with an *older* version of pandas to\ngenerate a pickle file. We will then check this file into a current\nbranch, and test using test_pickle.py. This will load the *older*\npickles and test versus the current data that is generated\n(with master). These are then compared.\n\nIf we have cases where we changed the signature (e.g. we renamed\noffset -> freq in Timestamp). Then we have to conditionally execute\nin the generate_legacy_storage_files.py to make it\nrun under the older AND the newer version.\n\n\"\"\"\n\nfrom datetime import timedelta\nfrom distutils.version import LooseVersion\nimport os\nimport pickle\nimport platform as pl\nimport sys\n\nimport numpy as np\n\nimport pandas\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n MultiIndex,\n NaT,\n Period,\n RangeIndex,\n Series,\n Timestamp,\n bdate_range,\n date_range,\n period_range,\n timedelta_range,\n)\n\nfrom pandas.tseries.offsets import (\n FY5253,\n BusinessDay,\n BusinessHour,\n CustomBusinessDay,\n DateOffset,\n Day,\n Easter,\n Hour,\n LastWeekOfMonth,\n Minute,\n MonthBegin,\n MonthEnd,\n QuarterBegin,\n QuarterEnd,\n SemiMonthBegin,\n SemiMonthEnd,\n Week,\n WeekOfMonth,\n YearBegin,\n YearEnd,\n)\n\ntry:\n # TODO: remove try/except when 0.24.0 is the legacy version.\n from pandas.arrays import SparseArray\nexcept ImportError:\n from pandas.core.sparse.api import SparseArray\n\n\n_loose_version = LooseVersion(pandas.__version__)\n\n\ndef _create_sp_series():\n nan = np.nan\n\n # nan-based\n arr = np.arange(15, dtype=np.float64)\n arr[7:12] = nan\n arr[-1:] = nan\n\n bseries = Series(SparseArray(arr, kind=\"block\"))\n bseries.name = \"bseries\"\n return bseries\n\n\ndef _create_sp_tsseries():\n nan = np.nan\n\n # nan-based\n arr = np.arange(15, dtype=np.float64)\n arr[7:12] = nan\n arr[-1:] = nan\n\n date_index = bdate_range(\"1/1/2011\", periods=len(arr))\n bseries = Series(SparseArray(arr, kind=\"block\"), index=date_index)\n bseries.name = \"btsseries\"\n return bseries\n\n\ndef _create_sp_frame():\n nan = np.nan\n\n data = {\n \"A\": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],\n \"B\": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],\n \"C\": np.arange(10).astype(np.int64),\n \"D\": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],\n }\n\n dates = bdate_range(\"1/1/2011\", periods=10)\n return DataFrame(data, index=dates).apply(SparseArray)\n\n\ndef create_data():\n \"\"\" create the pickle data \"\"\"\n data = {\n \"A\": [0.0, 1.0, 2.0, 3.0, np.nan],\n \"B\": [0, 1, 0, 1, 0],\n \"C\": [\"foo1\", \"foo2\", \"foo3\", \"foo4\", \"foo5\"],\n \"D\": date_range(\"1/1/2009\", periods=5),\n \"E\": [0.0, 1, Timestamp(\"20100101\"), \"foo\", 2.0],\n }\n\n scalars = dict(timestamp=Timestamp(\"20130101\"), period=Period(\"2012\", \"M\"))\n\n index = dict(\n int=Index(np.arange(10)),\n date=date_range(\"20130101\", periods=10),\n period=period_range(\"2013-01-01\", freq=\"M\", periods=10),\n float=Index(np.arange(10, dtype=np.float64)),\n uint=Index(np.arange(10, dtype=np.uint64)),\n timedelta=timedelta_range(\"00:00:00\", freq=\"30T\", periods=10),\n )\n\n index[\"range\"] = RangeIndex(10)\n\n if _loose_version >= LooseVersion(\"0.21\"):\n from pandas import interval_range\n\n index[\"interval\"] = interval_range(0, periods=10)\n\n mi = dict(\n reg2=MultiIndex.from_tuples(\n tuple(\n zip(\n *[\n [\"bar\", \"bar\", \"baz\", \"baz\", \"foo\", \"foo\", \"qux\", \"qux\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n )\n ),\n names=[\"first\", \"second\"],\n )\n )\n\n series = dict(\n float=Series(data[\"A\"]),\n int=Series(data[\"B\"]),\n mixed=Series(data[\"E\"]),\n ts=Series(\n np.arange(10).astype(np.int64), index=date_range(\"20130101\", periods=10)\n ),\n mi=Series(\n np.arange(5).astype(np.float64),\n index=MultiIndex.from_tuples(\n tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=[\"one\", \"two\"]\n ),\n ),\n dup=Series(np.arange(5).astype(np.float64), index=[\"A\", \"B\", \"C\", \"D\", \"A\"]),\n cat=Series(Categorical([\"foo\", \"bar\", \"baz\"])),\n dt=Series(date_range(\"20130101\", periods=5)),\n dt_tz=Series(date_range(\"20130101\", periods=5, tz=\"US/Eastern\")),\n period=Series([Period(\"2000Q1\")] * 5),\n )\n\n mixed_dup_df = DataFrame(data)\n mixed_dup_df.columns = list(\"ABCDA\")\n frame = dict(\n float=DataFrame({\"A\": series[\"float\"], \"B\": series[\"float\"] + 1}),\n int=DataFrame({\"A\": series[\"int\"], \"B\": series[\"int\"] + 1}),\n mixed=DataFrame({k: data[k] for k in [\"A\", \"B\", \"C\", \"D\"]}),\n mi=DataFrame(\n {\"A\": np.arange(5).astype(np.float64), \"B\": np.arange(5).astype(np.int64)},\n index=MultiIndex.from_tuples(\n tuple(\n zip(\n *[\n [\"bar\", \"bar\", \"baz\", \"baz\", \"baz\"],\n [\"one\", \"two\", \"one\", \"two\", \"three\"],\n ]\n )\n ),\n names=[\"first\", \"second\"],\n ),\n ),\n dup=DataFrame(\n np.arange(15).reshape(5, 3).astype(np.float64), columns=[\"A\", \"B\", \"A\"]\n ),\n cat_onecol=DataFrame({\"A\": Categorical([\"foo\", \"bar\"])}),\n cat_and_float=DataFrame(\n {\n \"A\": Categorical([\"foo\", \"bar\", \"baz\"]),\n \"B\": np.arange(3).astype(np.int64),\n }\n ),\n mixed_dup=mixed_dup_df,\n dt_mixed_tzs=DataFrame(\n {\n \"A\": Timestamp(\"20130102\", tz=\"US/Eastern\"),\n \"B\": Timestamp(\"20130603\", tz=\"CET\"),\n },\n index=range(5),\n ),\n dt_mixed2_tzs=DataFrame(\n {\n \"A\": Timestamp(\"20130102\", tz=\"US/Eastern\"),\n \"B\": Timestamp(\"20130603\", tz=\"CET\"),\n \"C\": Timestamp(\"20130603\", tz=\"UTC\"),\n },\n index=range(5),\n ),\n )\n\n cat = dict(\n int8=Categorical(list(\"abcdefg\")),\n int16=Categorical(np.arange(1000)),\n int32=Categorical(np.arange(10000)),\n )\n\n timestamp = dict(\n normal=Timestamp(\"2011-01-01\"),\n nat=NaT,\n tz=Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n )\n\n timestamp[\"freq\"] = Timestamp(\"2011-01-01\", freq=\"D\")\n timestamp[\"both\"] = Timestamp(\"2011-01-01\", tz=\"Asia/Tokyo\", freq=\"M\")\n\n off = {\n \"DateOffset\": DateOffset(years=1),\n \"DateOffset_h_ns\": DateOffset(hour=6, nanoseconds=5824),\n \"BusinessDay\": BusinessDay(offset=timedelta(seconds=9)),\n \"BusinessHour\": BusinessHour(normalize=True, n=6, end=\"15:14\"),\n \"CustomBusinessDay\": CustomBusinessDay(weekmask=\"Mon Fri\"),\n \"SemiMonthBegin\": SemiMonthBegin(day_of_month=9),\n \"SemiMonthEnd\": SemiMonthEnd(day_of_month=24),\n \"MonthBegin\": MonthBegin(1),\n \"MonthEnd\": MonthEnd(1),\n \"QuarterBegin\": QuarterBegin(1),\n \"QuarterEnd\": QuarterEnd(1),\n \"Day\": Day(1),\n \"YearBegin\": YearBegin(1),\n \"YearEnd\": YearEnd(1),\n \"Week\": Week(1),\n \"Week_Tues\": Week(2, normalize=False, weekday=1),\n \"WeekOfMonth\": WeekOfMonth(week=3, weekday=4),\n \"LastWeekOfMonth\": LastWeekOfMonth(n=1, weekday=3),\n \"FY5253\": FY5253(n=2, weekday=6, startingMonth=7, variation=\"last\"),\n \"Easter\": Easter(),\n \"Hour\": Hour(1),\n \"Minute\": Minute(1),\n }\n\n return dict(\n series=series,\n frame=frame,\n index=index,\n scalars=scalars,\n mi=mi,\n sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()),\n sp_frame=dict(float=_create_sp_frame()),\n cat=cat,\n timestamp=timestamp,\n offsets=off,\n )\n\n\ndef create_pickle_data():\n data = create_data()\n\n return data\n\n\ndef platform_name():\n return \"_\".join(\n [\n str(pandas.__version__),\n str(pl.machine()),\n str(pl.system().lower()),\n str(pl.python_version()),\n ]\n )\n\n\ndef write_legacy_pickles(output_dir):\n\n version = pandas.__version__\n\n print(\n \"This script generates a storage file for the current arch, system, \"\n \"and python version\"\n )\n print(\" pandas version: {0}\".format(version))\n print(\" output dir : {0}\".format(output_dir))\n print(\" storage format: pickle\")\n\n pth = \"{0}.pickle\".format(platform_name())\n\n fh = open(os.path.join(output_dir, pth), \"wb\")\n pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)\n fh.close()\n\n print(\"created pickle file: {pth}\".format(pth=pth))\n\n\ndef write_legacy_file():\n # force our cwd to be the first searched\n sys.path.insert(0, \".\")\n\n if not (3 <= len(sys.argv) <= 4):\n exit(\n \"Specify output directory and storage type: generate_legacy_\"\n \"storage_files.py <output_dir> <storage_type> \"\n )\n\n output_dir = str(sys.argv[1])\n storage_type = str(sys.argv[2])\n\n if storage_type == \"pickle\":\n write_legacy_pickles(output_dir=output_dir)\n else:\n exit(\"storage_type must be one of {'pickle'}\")\n\n\nif __name__ == \"__main__\":\n write_legacy_file()\n",
"import numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series\nimport pandas._testing as tm\n\n\nclass TestFloatIndexers:\n def check(self, result, original, indexer, getitem):\n \"\"\"\n comparator for results\n we need to take care if we are indexing on a\n Series or a frame\n \"\"\"\n if isinstance(original, Series):\n expected = original.iloc[indexer]\n else:\n if getitem:\n expected = original.iloc[:, indexer]\n else:\n expected = original.iloc[indexer]\n\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"index_func\",\n [\n tm.makeStringIndex,\n tm.makeUnicodeIndex,\n tm.makeCategoricalIndex,\n tm.makeDateIndex,\n tm.makeTimedeltaIndex,\n tm.makePeriodIndex,\n tm.makeIntIndex,\n tm.makeRangeIndex,\n ],\n )\n def test_scalar_error(self, index_func):\n\n # GH 4892\n # float_indexers should raise exceptions\n # on appropriate Index types & accessors\n # this duplicates the code below\n # but is specifically testing for the error\n # message\n\n i = index_func(5)\n\n s = Series(np.arange(len(i)), index=i)\n\n msg = \"Cannot index by location index\"\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0]\n\n msg = (\n \"cannot do positional indexing on {klass} with these \"\n r\"indexers \\[3\\.0\\] of type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0] = 0\n\n @pytest.mark.parametrize(\n \"index_func\",\n [\n tm.makeStringIndex,\n tm.makeUnicodeIndex,\n tm.makeCategoricalIndex,\n tm.makeDateIndex,\n tm.makeTimedeltaIndex,\n tm.makePeriodIndex,\n ],\n )\n def test_scalar_non_numeric(self, index_func):\n\n # GH 4892\n # float_indexers should raise exceptions\n # on appropriate Index types & accessors\n\n i = index_func(5)\n\n for s in [\n Series(np.arange(len(i)), index=i),\n DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),\n ]:\n\n # getting\n for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:\n\n # gettitem on a DataFrame is a KeyError as it is indexing\n # via labels on the columns\n if getitem and isinstance(s, DataFrame):\n error = KeyError\n msg = r\"^3(\\.0)?$\"\n else:\n error = TypeError\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float|\"\n \"Cannot index by location index with a \"\n \"non-integer key\".format(klass=type(i).__name__)\n )\n with pytest.raises(error, match=msg):\n idxr(s)[3.0]\n\n # label based can be a TypeError or KeyError\n if s.index.inferred_type in {\n \"categorical\",\n \"string\",\n \"unicode\",\n \"mixed\",\n }:\n error = KeyError\n msg = r\"^3\\.0$\"\n else:\n error = TypeError\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(error, match=msg):\n s.loc[3.0]\n\n # contains\n assert 3.0 not in s\n\n # setting with a float fails with iloc\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0] = 0\n\n # setting with an indexer\n if s.index.inferred_type in [\"categorical\"]:\n # Value or Type Error\n pass\n elif s.index.inferred_type in [\"datetime64\", \"timedelta64\", \"period\"]:\n\n # these should prob work\n # and are inconsistent between series/dataframe ATM\n # for idxr in [lambda x: x]:\n # s2 = s.copy()\n #\n # with pytest.raises(TypeError):\n # idxr(s2)[3.0] = 0\n pass\n\n else:\n\n s2 = s.copy()\n s2.loc[3.0] = 10\n assert s2.index.is_object()\n\n for idxr in [lambda x: x]:\n s2 = s.copy()\n idxr(s2)[3.0] = 0\n assert s2.index.is_object()\n\n # fallsback to position selection, series only\n s = Series(np.arange(len(i)), index=i)\n s[3]\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[3.0]\n\n def test_scalar_with_mixed(self):\n\n s2 = Series([1, 2, 3], index=[\"a\", \"b\", \"c\"])\n s3 = Series([1, 2, 3], index=[\"a\", \"b\", 1.5])\n\n # lookup in a pure stringstr\n # with an invalid indexer\n for idxr in [lambda x: x, lambda x: x.iloc]:\n\n msg = (\n r\"cannot do label indexing \"\n r\"on {klass} with these indexers \\[1\\.0\\] of \"\n r\"type float|\"\n \"Cannot index by location index with a non-integer key\".format(\n klass=Index.__name__\n )\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s2)[1.0]\n\n with pytest.raises(KeyError, match=r\"^1\\.0$\"):\n s2.loc[1.0]\n\n result = s2.loc[\"b\"]\n expected = 2\n assert result == expected\n\n # mixed index so we have label\n # indexing\n for idxr in [lambda x: x]:\n\n msg = (\n r\"cannot do label indexing \"\n r\"on {klass} with these indexers \\[1\\.0\\] of \"\n r\"type float\".format(klass=Index.__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s3)[1.0]\n\n result = idxr(s3)[1]\n expected = 2\n assert result == expected\n\n msg = \"Cannot index by location index with a non-integer key\"\n with pytest.raises(TypeError, match=msg):\n s3.iloc[1.0]\n with pytest.raises(KeyError, match=r\"^1\\.0$\"):\n s3.loc[1.0]\n\n result = s3.loc[1.5]\n expected = 3\n assert result == expected\n\n @pytest.mark.parametrize(\n \"index_func\", [tm.makeIntIndex, tm.makeRangeIndex],\n )\n @pytest.mark.parametrize(\"klass\", [Series, DataFrame])\n def test_scalar_integer(self, index_func, klass):\n\n # test how scalar float indexers work on int indexes\n\n # integer index\n i = index_func(5)\n\n if klass is Series:\n obj = Series(np.arange(len(i)))\n else:\n obj = DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i)\n\n # coerce to equal int\n for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:\n\n result = idxr(obj)[3.0]\n self.check(result, obj, 3, getitem)\n\n # coerce to equal int\n for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:\n\n if isinstance(obj, Series):\n\n def compare(x, y):\n assert x == y\n\n expected = 100\n else:\n compare = tm.assert_series_equal\n if getitem:\n expected = Series(100, index=range(len(obj)), name=3)\n else:\n expected = Series(100.0, index=range(len(obj)), name=3)\n\n s2 = obj.copy()\n idxr(s2)[3.0] = 100\n\n result = idxr(s2)[3.0]\n compare(result, expected)\n\n result = idxr(s2)[3]\n compare(result, expected)\n\n # contains\n # coerce to equal int\n assert 3.0 in obj\n\n def test_scalar_float(self):\n\n # scalar float indexers work on a float index\n index = Index(np.arange(5.0))\n for s in [\n Series(np.arange(len(index)), index=index),\n DataFrame(\n np.random.randn(len(index), len(index)), index=index, columns=index\n ),\n ]:\n\n # assert all operations except for iloc are ok\n indexer = index[3]\n for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:\n\n # getting\n result = idxr(s)[indexer]\n self.check(result, s, 3, getitem)\n\n # setting\n s2 = s.copy()\n\n result = idxr(s2)[indexer]\n self.check(result, s, 3, getitem)\n\n # random integer is a KeyError\n with pytest.raises(KeyError, match=r\"^3\\.5$\"):\n idxr(s)[3.5]\n\n # contains\n assert 3.0 in s\n\n # iloc succeeds with an integer\n expected = s.iloc[3]\n s2 = s.copy()\n\n s2.iloc[3] = expected\n result = s2.iloc[3]\n self.check(result, s, 3, False)\n\n # iloc raises with a float\n msg = \"Cannot index by location index with a non-integer key\"\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0]\n\n msg = (\n r\"cannot do positional indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=Float64Index.__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s2.iloc[3.0] = 0\n\n @pytest.mark.parametrize(\n \"index_func\",\n [\n tm.makeStringIndex,\n tm.makeUnicodeIndex,\n tm.makeDateIndex,\n tm.makeTimedeltaIndex,\n tm.makePeriodIndex,\n ],\n )\n def test_slice_non_numeric(self, index_func):\n\n # GH 4892\n # float_indexers should raise exceptions\n # on appropriate Index types & accessors\n\n index = index_func(5)\n for s in [\n Series(range(5), index=index),\n DataFrame(np.random.randn(5, 2), index=index),\n ]:\n\n # getitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n msg = (\n \"cannot do positional indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[l]\n\n for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:\n\n msg = (\n \"cannot do (slice|positional) indexing \"\n r\"on {klass} with these indexers \"\n r\"\\[(3|4)(\\.0)?\\] \"\n r\"of type (float|int)\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s)[l]\n\n # setitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n msg = (\n \"cannot do positional indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[l] = 0\n\n for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:\n msg = (\n \"cannot do (slice|positional) indexing \"\n r\"on {klass} with these indexers \"\n r\"\\[(3|4)(\\.0)?\\] \"\n r\"of type (float|int)\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s)[l] = 0\n\n def test_slice_integer(self):\n\n # same as above, but for Integer based indexes\n # these coerce to a like integer\n # oob indicates if we are out of bounds\n # of positional indexing\n for index, oob in [\n (Int64Index(range(5)), False),\n (RangeIndex(5), False),\n (Int64Index(range(5)) + 10, True),\n ]:\n\n # s is an in-range index\n s = Series(range(5), index=index)\n\n # getitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n for idxr in [lambda x: x.loc]:\n\n result = idxr(s)[l]\n\n # these are all label indexing\n # except getitem which is positional\n # empty\n if oob:\n indexer = slice(0, 0)\n else:\n indexer = slice(3, 5)\n self.check(result, s, indexer, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # getitem out-of-bounds\n for l in [slice(-6, 6), slice(-6.0, 6.0)]:\n\n for idxr in [lambda x: x.loc]:\n result = idxr(s)[l]\n\n # these are all label indexing\n # except getitem which is positional\n # empty\n if oob:\n indexer = slice(0, 0)\n else:\n indexer = slice(-6, 6)\n self.check(result, s, indexer, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[-6\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[slice(-6.0, 6.0)]\n\n # getitem odd floats\n for l, res1 in [\n (slice(2.5, 4), slice(3, 5)),\n (slice(2, 3.5), slice(2, 4)),\n (slice(2.5, 3.5), slice(3, 4)),\n ]:\n\n for idxr in [lambda x: x.loc]:\n\n result = idxr(s)[l]\n if oob:\n res = slice(0, 0)\n else:\n res = res1\n\n self.check(result, s, res, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(2|3)\\.5\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # setitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n for idxr in [lambda x: x.loc]:\n sc = s.copy()\n idxr(sc)[l] = 0\n result = idxr(sc)[l].values.ravel()\n assert (result == 0).all()\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l] = 0\n\n def test_integer_positional_indexing(self):\n \"\"\" make sure that we are raising on positional indexing\n w.r.t. an integer index\n \"\"\"\n s = Series(range(2, 6), index=range(2, 6))\n\n result = s[2:4]\n expected = s.iloc[2:4]\n tm.assert_series_equal(result, expected)\n\n for idxr in [lambda x: x, lambda x: x.iloc]:\n\n for l in [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]:\n\n klass = RangeIndex\n msg = (\n \"cannot do (slice|positional) indexing \"\n r\"on {klass} with these indexers \\[(2|4)\\.0\\] of \"\n \"type float\".format(klass=klass.__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s)[l]\n\n @pytest.mark.parametrize(\n \"index_func\", [tm.makeIntIndex, tm.makeRangeIndex],\n )\n def test_slice_integer_frame_getitem(self, index_func):\n\n # similar to above, but on the getitem dim (of a DataFrame)\n index = index_func(5)\n\n s = DataFrame(np.random.randn(5, 2), index=index)\n\n def f(idxr):\n\n # getitem\n for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]:\n\n result = idxr(s)[l]\n indexer = slice(0, 2)\n self.check(result, s, indexer, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(0|1)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # getitem out-of-bounds\n for l in [slice(-10, 10), slice(-10.0, 10.0)]:\n\n result = idxr(s)[l]\n self.check(result, s, slice(-10, 10), True)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[-10\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[slice(-10.0, 10.0)]\n\n # getitem odd floats\n for l, res in [\n (slice(0.5, 1), slice(1, 2)),\n (slice(0, 0.5), slice(0, 1)),\n (slice(0.5, 1.5), slice(1, 2)),\n ]:\n\n result = idxr(s)[l]\n self.check(result, s, res, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[0\\.5\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # setitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n sc = s.copy()\n idxr(sc)[l] = 0\n result = idxr(sc)[l].values.ravel()\n assert (result == 0).all()\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l] = 0\n\n f(lambda x: x.loc)\n\n def test_slice_float(self):\n\n # same as above, but for floats\n index = Index(np.arange(5.0)) + 0.1\n for s in [\n Series(range(5), index=index),\n DataFrame(np.random.randn(5, 2), index=index),\n ]:\n\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n expected = s.iloc[3:4]\n for idxr in [lambda x: x.loc, lambda x: x]:\n\n # getitem\n result = idxr(s)[l]\n if isinstance(s, Series):\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_frame_equal(result, expected)\n # setitem\n s2 = s.copy()\n idxr(s2)[l] = 0\n result = idxr(s2)[l].values.ravel()\n assert (result == 0).all()\n\n def test_floating_index_doc_example(self):\n\n index = Index([1.5, 2, 3, 4.5, 5])\n s = Series(range(5), index=index)\n assert s[3] == 2\n assert s.loc[3] == 2\n assert s.loc[3] == 2\n assert s.iloc[3] == 3\n\n def test_floating_misc(self):\n\n # related 236\n # scalar/slicing of a float index\n s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)\n\n # label based slicing\n result1 = s[1.0:3.0]\n result2 = s.loc[1.0:3.0]\n result3 = s.loc[1.0:3.0]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n\n # exact indexing when found\n result1 = s[5.0]\n result2 = s.loc[5.0]\n result3 = s.loc[5.0]\n assert result1 == result2\n assert result1 == result3\n\n result1 = s[5]\n result2 = s.loc[5]\n result3 = s.loc[5]\n assert result1 == result2\n assert result1 == result3\n\n assert s[5.0] == s[5]\n\n # value not found (and no fallbacking at all)\n\n # scalar integers\n with pytest.raises(KeyError, match=r\"^4$\"):\n s.loc[4]\n with pytest.raises(KeyError, match=r\"^4$\"):\n s.loc[4]\n with pytest.raises(KeyError, match=r\"^4$\"):\n s[4]\n\n # fancy floats/integers create the correct entry (as nan)\n # fancy tests\n expected = Series([2, 0], index=Float64Index([5.0, 0.0]))\n for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float\n tm.assert_series_equal(s[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n\n expected = Series([2, 0], index=Index([5, 0], dtype=\"int64\"))\n for fancy_idx in [[5, 0], np.array([5, 0])]: # int\n tm.assert_series_equal(s[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n\n # all should return the same as we are slicing 'the same'\n result1 = s.loc[2:5]\n result2 = s.loc[2.0:5.0]\n result3 = s.loc[2.0:5]\n result4 = s.loc[2.1:5]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n # previously this did fallback indexing\n result1 = s[2:5]\n result2 = s[2.0:5.0]\n result3 = s[2.0:5]\n result4 = s[2.1:5]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n result1 = s.loc[2:5]\n result2 = s.loc[2.0:5.0]\n result3 = s.loc[2.0:5]\n result4 = s.loc[2.1:5]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n # combined test\n result1 = s.loc[2:5]\n result2 = s.loc[2:5]\n result3 = s[2:5]\n\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n\n # list selection\n result1 = s[[0.0, 5, 10]]\n result2 = s.loc[[0.0, 5, 10]]\n result3 = s.loc[[0.0, 5, 10]]\n result4 = s.iloc[[0, 2, 4]]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s[[1.6, 5, 10]]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[[1.6, 5, 10]]\n\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s[[0, 1, 2]]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[[0, 1, 2]]\n\n result1 = s.loc[[2.5, 5]]\n result2 = s.loc[[2.5, 5]]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))\n\n result1 = s[[2.5]]\n result2 = s.loc[[2.5]]\n result3 = s.loc[[2.5]]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, Series([1], index=[2.5]))\n\n def test_floating_tuples(self):\n # see gh-13509\n s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name=\"foo\")\n\n result = s[0.0]\n assert result == (1, 1)\n\n expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name=\"foo\")\n s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name=\"foo\")\n\n result = s[0.0]\n tm.assert_series_equal(result, expected)\n\n def test_float64index_slicing_bug(self):\n # GH 5557, related to slicing a float index\n ser = {\n 256: 2321.0,\n 1: 78.0,\n 2: 2716.0,\n 3: 0.0,\n 4: 369.0,\n 5: 0.0,\n 6: 269.0,\n 7: 0.0,\n 8: 0.0,\n 9: 0.0,\n 10: 3536.0,\n 11: 0.0,\n 12: 24.0,\n 13: 0.0,\n 14: 931.0,\n 15: 0.0,\n 16: 101.0,\n 17: 78.0,\n 18: 9643.0,\n 19: 0.0,\n 20: 0.0,\n 21: 0.0,\n 22: 63761.0,\n 23: 0.0,\n 24: 446.0,\n 25: 0.0,\n 26: 34773.0,\n 27: 0.0,\n 28: 729.0,\n 29: 78.0,\n 30: 0.0,\n 31: 0.0,\n 32: 3374.0,\n 33: 0.0,\n 34: 1391.0,\n 35: 0.0,\n 36: 361.0,\n 37: 0.0,\n 38: 61808.0,\n 39: 0.0,\n 40: 0.0,\n 41: 0.0,\n 42: 6677.0,\n 43: 0.0,\n 44: 802.0,\n 45: 0.0,\n 46: 2691.0,\n 47: 0.0,\n 48: 3582.0,\n 49: 0.0,\n 50: 734.0,\n 51: 0.0,\n 52: 627.0,\n 53: 70.0,\n 54: 2584.0,\n 55: 0.0,\n 56: 324.0,\n 57: 0.0,\n 58: 605.0,\n 59: 0.0,\n 60: 0.0,\n 61: 0.0,\n 62: 3989.0,\n 63: 10.0,\n 64: 42.0,\n 65: 0.0,\n 66: 904.0,\n 67: 0.0,\n 68: 88.0,\n 69: 70.0,\n 70: 8172.0,\n 71: 0.0,\n 72: 0.0,\n 73: 0.0,\n 74: 64902.0,\n 75: 0.0,\n 76: 347.0,\n 77: 0.0,\n 78: 36605.0,\n 79: 0.0,\n 80: 379.0,\n 81: 70.0,\n 82: 0.0,\n 83: 0.0,\n 84: 3001.0,\n 85: 0.0,\n 86: 1630.0,\n 87: 7.0,\n 88: 364.0,\n 89: 0.0,\n 90: 67404.0,\n 91: 9.0,\n 92: 0.0,\n 93: 0.0,\n 94: 7685.0,\n 95: 0.0,\n 96: 1017.0,\n 97: 0.0,\n 98: 2831.0,\n 99: 0.0,\n 100: 2963.0,\n 101: 0.0,\n 102: 854.0,\n 103: 0.0,\n 104: 0.0,\n 105: 0.0,\n 106: 0.0,\n 107: 0.0,\n 108: 0.0,\n 109: 0.0,\n 110: 0.0,\n 111: 0.0,\n 112: 0.0,\n 113: 0.0,\n 114: 0.0,\n 115: 0.0,\n 116: 0.0,\n 117: 0.0,\n 118: 0.0,\n 119: 0.0,\n 120: 0.0,\n 121: 0.0,\n 122: 0.0,\n 123: 0.0,\n 124: 0.0,\n 125: 0.0,\n 126: 67744.0,\n 127: 22.0,\n 128: 264.0,\n 129: 0.0,\n 260: 197.0,\n 268: 0.0,\n 265: 0.0,\n 269: 0.0,\n 261: 0.0,\n 266: 1198.0,\n 267: 0.0,\n 262: 2629.0,\n 258: 775.0,\n 257: 0.0,\n 263: 0.0,\n 259: 0.0,\n 264: 163.0,\n 250: 10326.0,\n 251: 0.0,\n 252: 1228.0,\n 253: 0.0,\n 254: 2769.0,\n 255: 0.0,\n }\n\n # smoke test for the repr\n s = Series(ser)\n result = s.value_counts()\n str(result)\n",
"# ---------------------------------------------------------------------\n# JSON normalization routines\n\nfrom collections import defaultdict\nimport copy\nfrom typing import Any, DefaultDict, Dict, Iterable, List, Optional, Union\n\nimport numpy as np\n\nfrom pandas._libs.writers import convert_json_to_lines\nfrom pandas.util._decorators import deprecate\n\nimport pandas as pd\nfrom pandas import DataFrame\n\n\ndef convert_to_line_delimits(s):\n \"\"\"\n Helper function that converts JSON lists to line delimited JSON.\n \"\"\"\n # Determine we have a JSON list to turn to lines otherwise just return the\n # json object, only lists can\n if not s[0] == \"[\" and s[-1] == \"]\":\n return s\n s = s[1:-1]\n\n return convert_json_to_lines(s)\n\n\ndef nested_to_record(\n ds,\n prefix: str = \"\",\n sep: str = \".\",\n level: int = 0,\n max_level: Optional[int] = None,\n):\n \"\"\"\n A simplified json_normalize\n\n Converts a nested dict into a flat dict (\"record\"), unlike json_normalize,\n it does not attempt to extract a subset of the data.\n\n Parameters\n ----------\n ds : dict or list of dicts\n prefix: the prefix, optional, default: \"\"\n sep : str, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n level: int, optional, default: 0\n The number of levels in the json string.\n\n max_level: int, optional, default: None\n The max depth to normalize.\n\n .. versionadded:: 0.25.0\n\n Returns\n -------\n d - dict or list of dicts, matching `ds`\n\n Examples\n --------\n\n IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),\n nested=dict(e=dict(c=1,d=2),d=2)))\n Out[52]:\n {'dict1.c': 1,\n 'dict1.d': 2,\n 'flat1': 1,\n 'nested.d': 2,\n 'nested.e.c': 1,\n 'nested.e.d': 2}\n \"\"\"\n singleton = False\n if isinstance(ds, dict):\n ds = [ds]\n singleton = True\n new_ds = []\n for d in ds:\n new_d = copy.deepcopy(d)\n for k, v in d.items():\n # each key gets renamed with prefix\n if not isinstance(k, str):\n k = str(k)\n if level == 0:\n newkey = k\n else:\n newkey = prefix + sep + k\n\n # flatten if type is dict and\n # current dict level < maximum level provided and\n # only dicts gets recurse-flattened\n # only at level>1 do we rename the rest of the keys\n if not isinstance(v, dict) or (\n max_level is not None and level >= max_level\n ):\n if level != 0: # so we skip copying for top level, common case\n v = new_d.pop(k)\n new_d[newkey] = v\n continue\n else:\n v = new_d.pop(k)\n new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))\n new_ds.append(new_d)\n\n if singleton:\n return new_ds[0]\n return new_ds\n\n\ndef _json_normalize(\n data: Union[Dict, List[Dict]],\n record_path: Optional[Union[str, List]] = None,\n meta: Optional[Union[str, List[Union[str, List[str]]]]] = None,\n meta_prefix: Optional[str] = None,\n record_prefix: Optional[str] = None,\n errors: str = \"raise\",\n sep: str = \".\",\n max_level: Optional[int] = None,\n) -> \"DataFrame\":\n \"\"\"\n Normalize semi-structured JSON data into a flat table.\n\n Parameters\n ----------\n data : dict or list of dicts\n Unserialized JSON objects.\n record_path : str or list of str, default None\n Path in each object to list of records. If not passed, data will be\n assumed to be an array of records.\n meta : list of paths (str or list of str), default None\n Fields to use as metadata for each record in resulting table.\n meta_prefix : str, default None\n If True, prefix records with dotted (?) path, e.g. foo.bar.field if\n meta is ['foo', 'bar'].\n record_prefix : str, default None\n If True, prefix records with dotted (?) path, e.g. foo.bar.field if\n path to records is ['foo', 'bar'].\n errors : {'raise', 'ignore'}, default 'raise'\n Configures error handling.\n\n * 'ignore' : will ignore KeyError if keys listed in meta are not\n always present.\n * 'raise' : will raise KeyError if keys listed in meta are not\n always present.\n sep : str, default '.'\n Nested records will generate names separated by sep.\n e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar.\n max_level : int, default None\n Max number of levels(depth of dict) to normalize.\n if None, normalizes all levels.\n\n .. versionadded:: 0.25.0\n\n Returns\n -------\n frame : DataFrame\n Normalize semi-structured JSON data into a flat table.\n\n Examples\n --------\n\n >>> from pandas.io.json import json_normalize\n >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},\n ... {'name': {'given': 'Mose', 'family': 'Regner'}},\n ... {'id': 2, 'name': 'Faye Raker'}]\n >>> json_normalize(data)\n id name name.family name.first name.given name.last\n 0 1.0 NaN NaN Coleen NaN Volk\n 1 NaN NaN Regner NaN Mose NaN\n 2 2.0 Faye Raker NaN NaN NaN NaN\n\n >>> data = [{'id': 1,\n ... 'name': \"Cole Volk\",\n ... 'fitness': {'height': 130, 'weight': 60}},\n ... {'name': \"Mose Reg\",\n ... 'fitness': {'height': 130, 'weight': 60}},\n ... {'id': 2, 'name': 'Faye Raker',\n ... 'fitness': {'height': 130, 'weight': 60}}]\n >>> json_normalize(data, max_level=0)\n fitness id name\n 0 {'height': 130, 'weight': 60} 1.0 Cole Volk\n 1 {'height': 130, 'weight': 60} NaN Mose Reg\n 2 {'height': 130, 'weight': 60} 2.0 Faye Raker\n\n Normalizes nested data up to level 1.\n\n >>> data = [{'id': 1,\n ... 'name': \"Cole Volk\",\n ... 'fitness': {'height': 130, 'weight': 60}},\n ... {'name': \"Mose Reg\",\n ... 'fitness': {'height': 130, 'weight': 60}},\n ... {'id': 2, 'name': 'Faye Raker',\n ... 'fitness': {'height': 130, 'weight': 60}}]\n >>> json_normalize(data, max_level=1)\n fitness.height fitness.weight id name\n 0 130 60 1.0 Cole Volk\n 1 130 60 NaN Mose Reg\n 2 130 60 2.0 Faye Raker\n\n >>> data = [{'state': 'Florida',\n ... 'shortname': 'FL',\n ... 'info': {'governor': 'Rick Scott'},\n ... 'counties': [{'name': 'Dade', 'population': 12345},\n ... {'name': 'Broward', 'population': 40000},\n ... {'name': 'Palm Beach', 'population': 60000}]},\n ... {'state': 'Ohio',\n ... 'shortname': 'OH',\n ... 'info': {'governor': 'John Kasich'},\n ... 'counties': [{'name': 'Summit', 'population': 1234},\n ... {'name': 'Cuyahoga', 'population': 1337}]}]\n >>> result = json_normalize(data, 'counties', ['state', 'shortname',\n ... ['info', 'governor']])\n >>> result\n name population state shortname info.governor\n 0 Dade 12345 Florida FL Rick Scott\n 1 Broward 40000 Florida FL Rick Scott\n 2 Palm Beach 60000 Florida FL Rick Scott\n 3 Summit 1234 Ohio OH John Kasich\n 4 Cuyahoga 1337 Ohio OH John Kasich\n\n >>> data = {'A': [1, 2]}\n >>> json_normalize(data, 'A', record_prefix='Prefix.')\n Prefix.0\n 0 1\n 1 2\n\n Returns normalized data with columns prefixed with the given string.\n \"\"\"\n\n def _pull_field(js: Dict[str, Any], spec: Union[List, str]) -> Iterable:\n result = js # type: ignore\n if isinstance(spec, list):\n for field in spec:\n result = result[field]\n else:\n result = result[spec]\n\n if not isinstance(result, Iterable):\n if pd.isnull(result):\n result = [] # type: ignore\n else:\n raise TypeError(\n f\"{js} has non iterable value {result} for path {spec}. \"\n \"Must be iterable or null.\"\n )\n\n return result\n\n if isinstance(data, list) and not data:\n return DataFrame()\n\n # A bit of a hackjob\n if isinstance(data, dict):\n data = [data]\n\n if record_path is None:\n if any([isinstance(x, dict) for x in y.values()] for y in data):\n # naive normalization, this is idempotent for flat records\n # and potentially will inflate the data considerably for\n # deeply nested structures:\n # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}\n #\n # TODO: handle record value which are lists, at least error\n # reasonably\n data = nested_to_record(data, sep=sep, max_level=max_level)\n return DataFrame(data)\n elif not isinstance(record_path, list):\n record_path = [record_path]\n\n if meta is None:\n meta = []\n elif not isinstance(meta, list):\n meta = [meta]\n\n _meta = [m if isinstance(m, list) else [m] for m in meta]\n\n # Disastrously inefficient for now\n records: List = []\n lengths = []\n\n meta_vals: DefaultDict = defaultdict(list)\n meta_keys = [sep.join(val) for val in _meta]\n\n def _recursive_extract(data, path, seen_meta, level=0):\n if isinstance(data, dict):\n data = [data]\n if len(path) > 1:\n for obj in data:\n for val, key in zip(_meta, meta_keys):\n if level + 1 == len(val):\n seen_meta[key] = _pull_field(obj, val[-1])\n\n _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)\n else:\n for obj in data:\n recs = _pull_field(obj, path[0])\n recs = [\n nested_to_record(r, sep=sep, max_level=max_level)\n if isinstance(r, dict)\n else r\n for r in recs\n ]\n\n # For repeating the metadata later\n lengths.append(len(recs))\n for val, key in zip(_meta, meta_keys):\n if level + 1 > len(val):\n meta_val = seen_meta[key]\n else:\n try:\n meta_val = _pull_field(obj, val[level:])\n except KeyError as e:\n if errors == \"ignore\":\n meta_val = np.nan\n else:\n raise KeyError(\n \"Try running with errors='ignore' as key \"\n f\"{e} is not always present\"\n )\n meta_vals[key].append(meta_val)\n records.extend(recs)\n\n _recursive_extract(data, record_path, {}, level=0)\n\n result = DataFrame(records)\n\n if record_prefix is not None:\n result = result.rename(columns=lambda x: f\"{record_prefix}{x}\")\n\n # Data types, a problem\n for k, v in meta_vals.items():\n if meta_prefix is not None:\n k = meta_prefix + k\n\n if k in result:\n raise ValueError(\n f\"Conflicting metadata name {k}, need distinguishing prefix \"\n )\n result[k] = np.array(v, dtype=object).repeat(lengths)\n return result\n\n\njson_normalize = deprecate(\n \"pandas.io.json.json_normalize\", _json_normalize, \"1.0.0\", \"pandas.json_normalize\"\n)\n"
] | [
[
"pandas.tseries.offsets.Hour",
"pandas.tseries.offsets.Day",
"pandas.Series",
"pandas.tseries.offsets.QuarterBegin",
"pandas.RangeIndex",
"pandas.tseries.offsets.MonthBegin",
"pandas.DataFrame",
"pandas.tseries.offsets.BusinessHour",
"pandas.tseries.offsets.FY5253",
"pandas.Timestamp",
"numpy.arange",
"pandas.tseries.offsets.DateOffset",
"pandas.tseries.offsets.Easter",
"pandas.core.sparse.api.SparseArray",
"pandas.tseries.offsets.YearBegin",
"pandas.tseries.offsets.SemiMonthBegin",
"pandas.interval_range",
"pandas.tseries.offsets.SemiMonthEnd",
"pandas.bdate_range",
"pandas.Categorical",
"pandas.tseries.offsets.LastWeekOfMonth",
"pandas.date_range",
"pandas.tseries.offsets.YearEnd",
"pandas.timedelta_range",
"pandas.tseries.offsets.CustomBusinessDay",
"pandas.period_range",
"pandas.tseries.offsets.WeekOfMonth",
"pandas.tseries.offsets.Week",
"pandas.Period",
"pandas.tseries.offsets.Minute",
"pandas.tseries.offsets.QuarterEnd",
"pandas.tseries.offsets.MonthEnd"
],
[
"pandas._testing.assert_almost_equal",
"pandas.Series",
"pandas.RangeIndex",
"numpy.arange",
"pandas.Index",
"pandas.Float64Index",
"pandas._testing.assert_frame_equal",
"numpy.random.randn",
"pandas._testing.assert_series_equal",
"numpy.array"
],
[
"pandas._libs.writers.convert_json_to_lines",
"pandas.isnull",
"pandas.DataFrame",
"pandas.util._decorators.deprecate",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
egryaznov/nlpmovies | [
"4f2c5ffbcff12f279dc2622471e1b19175607f67"
] | [
"Assignment 2/Second Part/russian_classifier.py"
] | [
"# IMDB Movie Review Sentiment Classification\n# Second Assignment Solution\n# NLP Course, Innopolis University, Spring 2017\n# Author: Evgeny Gryaznov\n\nimport numpy\nimport ru_otzyv as ru\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\n\n\ndef compile_model(topn=20000, max_review_length=300, embedding_vector_length=300, dropout_value=0.3):\n \"\"\" Builds, compiles and trains the LSTM model for russian moive review\n classification problem.\n Keyword arguments:\n params -- a dictionary of parameters for the model. Currently the\n following entries are suported:\n 'top_words' -- the maximal length of a vocabulary\n 'max_review_length' -- the maximal length of a review\n 'embedding_vector_length' -- the length of the input vectors after\n applying `embedding` techique.\n 'dropout_value' -- the percentage of units that will be dropped.\n Returns:\n A tuple: [model, history], where `model` is created model and `history`\n its history of epoches.\"\"\"\n# Fix random seed for reproducibility...\n numpy.random.seed(7)\n# Compiling the model...\n model = Sequential()\n model.add(Embedding(topn, embedding_vector_length, input_length=max_review_length))\n model.add(LSTM(100, dropout_W=dropout_value, dropout_U=dropout_value))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef train_model(model, x_train, y_train, nb_epochs=5):\n history = model.fit(x_train, y_train, validation_split=0.33, epochs=nb_epochs, batch_size=64)\n return history\n\n\n# Final evaluation of the model\ndef evaluate(model, x_test, y_test):\n \"\"\" Evaluates model on the given test data and returns specified metrics.\n Keyword arguments:\n model -- trained LSTM model.\n x_test -- padded and cooked test review data.\n y_test -- padded and cooked test rating data.\n Returns:\n A tuple of scores.\n \"\"\"\n scores = model.evaluate(x_test, y_test, verbose=0)\n return scores\n\n\ndef predict(model, review_filename, vocab):\n \"\"\" Predicts the rating of the given review.\n Keyword arguments:\n model -- trained LSTM model that will do the prediction.\n rivew_filename -- a name of the file where the text of the review\n is stored.\n vocab -- a compiled vocabulary of Russian tokens extracted from the\n dataset.\n Returns:\n The predicted rating of the review.\n \"\"\"\n review = ''\n with open('sample-reviews/' + review_filename, 'r') as f:\n review = f.read()\n x = sequence.pad_sequences([ru.digitize(review, vocab)], maxlen=300)\n predicted_rating = model.predict(x)\n return predicted_rating\n\n\ndef build_and_evaluate(topn=20000, max_review_length=300):\n \"\"\" Run this function to compile, train, evaluate and assess our LSTM\n model in one shot!\n Returns:\n Completed LSTM that you can play with.\n \"\"\"\n# Load the dataset but only keep the top n words, discarding others\n print('Preparing the dataset...')\n x_train, y_train, x_test, y_test = ru.cook_data(topn=topn)\n print(' Padding sequences...')\n# truncate and pad input sequences so they can fit into LSTM layer\n x_test = sequence.pad_sequences(x_test, maxlen=max_review_length)\n x_train = sequence.pad_sequences(x_train, maxlen=max_review_length)\n# Compile and train our LSTM\n print('Dataset preparation complete!\\nCompiling the model...')\n my_lstm = compile_model(topn=topn, max_review_length=max_review_length)\n print('Mode compilation complete!\\nTraining the model...')\n history = train_model(my_lstm, x_train, y_train, nb_epochs=4)\n# Plot the history of training\n print('Model training complete!\\nEvaluating performance...')\n plot_loss(history)\n plot_accuracy(history)\n# Evaluate the accuracy of our model\n scores = evaluate(my_lstm, x_test, y_test)\n print(\"Final Test Data Accuracy: %.2f%%\" % (scores[1] * 100))\n return my_lstm\n\n\ndef plot_loss(history):\n \"\"\" Plots the values of a loss function through training time (epoches). \"\"\"\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Loss of the Model')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n\ndef plot_accuracy(history):\n \"\"\" Plots the accuracy of a model through training time (epoches). \"\"\"\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Accuracy of the Model')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n\nmy_lstm = build_and_evaluate(topn=20000)\nprint('-' * 30)\nprint('Loading vocabulary...')\nvocab = ru.load('ru-vocab.json')\n# Play with the model a little...\nreview_filename = 'positive_review0.txt'\nprint('Starting prediction...')\npredicted_rating = predict(my_lstm, review_filename, vocab)\nprint('Predicted rating for this review is: ' + str(predicted_rating))\n\n\n# batch normalization -- ??\n# проверить распределение данных -- DONE\n# балансировка данных: дублирование сэмплов -- DONE, Acc + 2%\n# validation set -- ??\n# голосование алгоритмов -- не буду делать\n# TODO -- поменьше тренировку, побольше test: 70 на 30\n# seaborn -- OK\n# return subsequences true -- ??\n# TODO -- softmax -- categorical crossentropy\n# TODO -- RMSprop\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
remtav/SpaceNet7_Multi-Temporal_Solutions | [
"ee535c61fc22bffa45331519239c6d1b044b1514",
"ee535c61fc22bffa45331519239c6d1b044b1514"
] | [
"1-lxastro0/code_local/utils/create_dataset.py",
"1-lxastro0/code_local/sample_creation.py"
] | [
"import collections\nimport logging\nimport os\nimport warnings\nfrom pathlib import Path\nfrom typing import List, Union\n\nimport h5py\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nimport models.coordconv\nfrom utils.utils import get_key_def, ordereddict_eval, compare_config_yamls\nfrom utils.geoutils import get_key_recursive\n\n# These two import statements prevent exception when using eval(metadata) in SegmentationDataset()'s __init__()\nfrom rasterio.crs import CRS\nfrom affine import Affine\n\nlogging.getLogger(__name__)\n\n\ndef append_to_dataset(dataset, sample):\n \"\"\"\n Append a new sample to a provided dataset. The dataset has to be expanded before we can add value to it.\n :param dataset:\n :param sample: data to append\n :return: Index of the newly added sample.\n \"\"\"\n old_size = dataset.shape[0] # this function always appends samples on the first axis\n dataset.resize(old_size + 1, axis=0)\n dataset[old_size, ...] = sample\n return old_size\n\n\ndef create_files_and_datasets(samples_size: int, number_of_bands: int, meta_map, samples_folder: Union[str, Path], params):\n \"\"\"\n Function to create the hdfs files (trn, val and tst).\n :param samples_size: size of individual hdf5 samples to be created\n :param number_of_bands: number of bands in imagery\n :param meta_map:\n :param samples_folder: (str) Path to the output folder.\n :param params: (dict) Parameters found in the yaml config file.\n :return: (hdf5 datasets) trn, val ant tst datasets.\n \"\"\"\n real_num_bands = number_of_bands - MetaSegmentationDataset.get_meta_layer_count(meta_map)\n assert real_num_bands > 0, \"invalid number of bands when accounting for meta layers\"\n hdf5_files = []\n for subset in [\"trn\", \"val\", \"tst\"]:\n hdf5_file = h5py.File(os.path.join(samples_folder, f\"{subset}_samples.hdf5\"), \"w\")\n hdf5_file.create_dataset(\"sat_img\", (0, samples_size, samples_size, real_num_bands), np.uint16,\n maxshape=(None, samples_size, samples_size, real_num_bands))\n hdf5_file.create_dataset(\"map_img\", (0, samples_size, samples_size), np.int16,\n maxshape=(None, samples_size, samples_size))\n hdf5_file.create_dataset(\"meta_idx\", (0, 1), dtype=np.int16, maxshape=(None, 1))\n try:\n hdf5_file.create_dataset(\"metadata\", (0, 1), dtype=h5py.string_dtype(), maxshape=(None, 1))\n hdf5_file.create_dataset(\"sample_metadata\", (0, 1), dtype=h5py.string_dtype(), maxshape=(None, 1))\n hdf5_file.create_dataset(\"params\", (0, 1), dtype=h5py.string_dtype(), maxshape=(None, 1))\n append_to_dataset(hdf5_file[\"params\"], repr(params))\n except AttributeError:\n logging.exception(f'Update h5py to version 2.10 or higher')\n raise\n hdf5_files.append(hdf5_file)\n return hdf5_files\n\n\nclass SegmentationDataset(Dataset):\n \"\"\"Semantic segmentation dataset based on HDF5 parsing.\"\"\"\n\n def __init__(self, work_folder,\n dataset_type,\n num_bands,\n max_sample_count=None,\n dontcare=None,\n radiom_transform=None,\n geom_transform=None,\n totensor_transform=None,\n params=None,\n debug=False):\n # note: if 'max_sample_count' is None, then it will be read from the dataset at runtime\n self.work_folder = work_folder\n self.max_sample_count = max_sample_count\n self.dataset_type = dataset_type\n self.num_bands = num_bands\n self.metadata = []\n self.radiom_transform = radiom_transform\n self.geom_transform = geom_transform\n self.totensor_transform = totensor_transform\n self.debug = debug\n self.dontcare = dontcare\n self.hdf5_path = os.path.join(self.work_folder, self.dataset_type + \"_samples.hdf5\")\n with h5py.File(self.hdf5_path, \"r\") as hdf5_file:\n for i in range(hdf5_file[\"metadata\"].shape[0]):\n metadata = hdf5_file[\"metadata\"][i, ...]\n if isinstance(metadata, np.ndarray) and len(metadata) == 1:\n metadata = metadata[0]\n metadata = ordereddict_eval(metadata)\n self.metadata.append(metadata)\n if self.max_sample_count is None:\n self.max_sample_count = hdf5_file[\"sat_img\"].shape[0]\n\n # load yaml used to generate samples\n hdf5_params = hdf5_file['params'][0, 0]\n hdf5_params = ordereddict_eval(hdf5_params)\n\n if dataset_type == 'trn' and isinstance(hdf5_params, dict) and isinstance(metadata, dict):\n # check match between current yaml and sample yaml for crucial parameters\n try:\n compare_config_yamls(hdf5_params, params)\n except TypeError:\n logging.exception(\"Couldn't compare current yaml with hdf5 yaml\")\n\n def __len__(self):\n return self.max_sample_count\n\n def _remap_labels(self, map_img):\n # note: will do nothing if 'dontcare' is not set in constructor, or set to non-zero value # TODO: seems like a temporary patch... dontcare should never be == 0, right ?\n if self.dontcare is None or self.dontcare != 0:\n return map_img\n # for now, the current implementation only handles the original 'dontcare' value as zero\n # to keep the impl simple, we just reduce all indices by one so that 'dontcare' becomes -1\n assert map_img.dtype == np.int8 or map_img.dtype == np.int16 or map_img.dtype == np.int32\n map_img -= 1\n return map_img\n\n def __getitem__(self, index):\n with h5py.File(self.hdf5_path, \"r\") as hdf5_file:\n sat_img = np.float32(hdf5_file[\"sat_img\"][index, ...])\n assert self.num_bands <= sat_img.shape[-1]\n map_img = self._remap_labels(hdf5_file[\"map_img\"][index, ...])\n meta_idx = int(hdf5_file[\"meta_idx\"][index])\n metadata = self.metadata[meta_idx]\n sample_metadata = hdf5_file[\"sample_metadata\"][index, ...][0]\n sample_metadata = eval(sample_metadata.decode('UTF-8'))\n if isinstance(metadata, np.ndarray) and len(metadata) == 1:\n metadata = metadata[0]\n elif isinstance(metadata, bytes):\n metadata = metadata.decode('UTF-8')\n try:\n metadata = eval(metadata)\n metadata.update(sample_metadata)\n except TypeError:\n pass # FI\n # where bandwise array has no data values, set as np.nan\n # sat_img[sat_img == metadata['nodata']] = np.nan # TODO: problem with lack of dynamic range. See: https://rasterio.readthedocs.io/en/latest/topics/masks.html\n\n sample = {\"sat_img\": sat_img, \"map_img\": map_img, \"metadata\": metadata,\n \"hdf5_path\": self.hdf5_path}\n\n if self.radiom_transform: # radiometric transforms should always precede geometric ones\n sample = self.radiom_transform(sample)\n if self.geom_transform: # rotation, geometric scaling, flip and crop. Will also put channels first and convert to torch tensor from numpy.\n sample = self.geom_transform(sample)\n\n sample = self.totensor_transform(sample)\n\n if self.debug:\n # assert no new class values in map_img\n initial_class_ids = set(np.unique(map_img))\n if self.dontcare is not None:\n initial_class_ids.add(self.dontcare)\n final_class_ids = set(np.unique(sample['map_img'].numpy()))\n if not final_class_ids.issubset(initial_class_ids):\n logging.debug(f\"WARNING: Class ids for label before and after augmentations don't match. \"\n f\"Ignore if overwritting ignore_index in ToTensorTarget\")\n sample['index'] = index\n return sample\n\n\nclass MetaSegmentationDataset(SegmentationDataset):\n \"\"\"Semantic segmentation dataset interface that appends metadata under new tensor layers.\"\"\"\n\n metadata_handling_modes = [\"const_channel\", \"scaled_channel\"]\n\n def __init__(self, work_folder,\n dataset_type,\n num_bands,\n meta_map,\n max_sample_count=None,\n dontcare=None,\n radiom_transform=None,\n geom_transform=True,\n totensor_transform=True,\n debug=False):\n assert meta_map is None or isinstance(meta_map, dict), \"unexpected metadata mapping object type\"\n assert meta_map is None or all([isinstance(k, str) and v in self.metadata_handling_modes for k, v in meta_map.items()]), \\\n \"unexpected metadata key type or value handling mode\"\n super().__init__(work_folder=work_folder, dataset_type=dataset_type, num_bands=num_bands,\n max_sample_count=max_sample_count,\n dontcare=dontcare,\n radiom_transform=radiom_transform,\n geom_transform=geom_transform,\n totensor_transform=totensor_transform,\n debug=debug)\n assert all([isinstance(m, (dict, collections.OrderedDict)) for m in self.metadata]), \\\n \"cannot use provided metadata object type with meta-mapping dataset interface\"\n self.meta_map = meta_map\n\n @staticmethod\n def append_meta_layers(tensor, meta_map, metadata):\n if meta_map:\n assert isinstance(metadata, (dict, collections.OrderedDict)), \"unexpected metadata type\"\n for meta_key, mode in meta_map.items():\n meta_val = get_key_recursive(meta_key, metadata)\n if mode == \"const_channel\":\n assert np.isscalar(meta_val), \"constant channel-wise assignment requires scalar value\"\n layer = np.full(tensor.shape[0:2], meta_val, dtype=np.float32)\n tensor = np.insert(tensor, tensor.shape[2], layer, axis=2)\n elif mode == \"scaled_channel\":\n assert np.isscalar(meta_val), \"scaled channel-wise coords assignment requires scalar value\"\n layers = models.coordconv.get_coords_map(tensor.shape[0], tensor.shape[1]) * meta_val\n tensor = np.insert(tensor, tensor.shape[2], layers, axis=2)\n # else...\n return tensor\n\n @staticmethod\n def get_meta_layer_count(meta_map):\n meta_layers = 0\n if meta_map:\n for meta_key, mode in meta_map.items():\n if mode == \"const_channel\":\n meta_layers += 1\n elif mode == \"scaled_channel\":\n meta_layers += 2\n return meta_layers\n\n def __getitem__(self, index):\n # put metadata layer in util func for inf script?\n with h5py.File(self.hdf5_path, \"r\") as hdf5_file:\n sat_img = hdf5_file[\"sat_img\"][index, ...]\n assert self.num_bands <= sat_img.shape[-1]\n map_img = self._remap_labels(hdf5_file[\"map_img\"][index, ...])\n meta_idx = int(hdf5_file[\"meta_idx\"][index])\n metadata = self.metadata[meta_idx]\n sample_metadata = hdf5_file[\"sample_metadata\"][index, ...]\n if isinstance(metadata, np.ndarray) and len(metadata) == 1:\n metadata = metadata[0]\n sample_metadata = sample_metadata[0]\n if isinstance(metadata, str):\n metadata = eval(metadata)\n sample_metadata = eval(sample_metadata)\n metadata.update(sample_metadata)\n assert meta_idx != -1, f\"metadata unvailable in sample #{index}\"\n sat_img = self.append_meta_layers(sat_img, self.meta_map, self.metadata[meta_idx])\n sample = {\"sat_img\": sat_img, \"map_img\": map_img, \"metadata\": metadata}\n if self.radiom_transform: # radiometric transforms should always precede geometric ones\n sample = self.radiom_transform(sample) # TODO: test this for MetaSegmentationDataset\n sample[\"sat_img\"] = self.append_meta_layers(sat_img, self.meta_map, metadata) # Overwrite sat_img with sat_img with metalayers\n if self.geom_transform:\n sample = self.geom_transform(sample) # rotation, geometric scaling, flip and crop. Will also put channels first and convert to torch tensor from numpy.\n sample = self.totensor_transform(sample) # TODO: test this for MetaSegmentationDataset\n return sample",
"import argparse\nfrom datetime import datetime\nimport logging\nimport os\nimport shutil\n\nimport numpy as np\n\nnp.random.seed(1234) # Set random seed for reproducibility\nimport warnings\nimport rasterio\nimport time\nimport json\n\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom collections import OrderedDict, Counter\nfrom typing import List\n\nfrom utils.create_dataset import create_files_and_datasets\nfrom utils.utils import get_key_def, pad, pad_diff, read_csv, add_metadata_from_raster_to_sample, get_git_hash\nfrom utils.geoutils import vector_to_raster\n# clip_raster_with_gpkg\nfrom utils.readers import read_parameters, image_reader_as_array\nfrom utils.verifications import validate_num_classes, validate_raster, assert_crs_match, \\\n validate_features_from_gpkg\nfrom rasterio.features import is_valid_geom\nfrom rasterio.mask import mask\nfrom rasterio.windows import Window\nfrom rasterio.plot import reshape_as_image\n\nlogging.getLogger(__name__)\n\n\ndef validate_class_prop_dict(actual_classes_dict, config_dict):\n \"\"\"\n Populate dictionary containing class values found in vector data with values (thresholds) from sample/class_prop\n parameter in config file\n\n actual_classes_dict: dict\n Dictionary where each key is a class found in vector data. Value is not relevant (should be 0)\n\n config_dict:\n Dictionary with class ids (keys and thresholds (values) from config file\n\n \"\"\"\n # Validation of class proportion parameters (assert types).\n if not isinstance(config_dict, dict):\n warnings.warn(f\"Class_proportion parameter should be a dictionary. Got type {type(config_dict)}. \"\n f\"Ignore if parameter was omitted)\")\n return None\n\n for key, value in config_dict.items():\n try:\n assert isinstance(key, str)\n int(key)\n except (ValueError, AssertionError):\n f\"Class should be a string castable as an integer. Got {key} of type {type(key)}\"\n assert isinstance(value, int), f\"Class value should be an integer, got {value} of type {type(value)}\"\n\n # Populate actual classes dictionary with values from config\n for key, value in config_dict.items():\n if int(key) in actual_classes_dict.keys():\n actual_classes_dict[int(key)] = value\n else:\n warnings.warn(f\"Class {key} not found in provided vector data.\")\n\n return actual_classes_dict.copy()\n\n\ndef getFeatures(gdf):\n \"\"\"Function to parse features from GeoDataFrame in such a manner that rasterio wants them\"\"\"\n import json\n return [json.loads(gdf.to_json())['features'][0]['geometry']]\n\n\ndef clip_raster_with_gpkg(raster, gpkg, debug=False):\n \"\"\"Clips input raster to limits of vector data in gpkg. Adapted from: https://automating-gis-processes.github.io/CSC18/lessons/L6/clipping-raster.html\n raster: Rasterio file handle holding the (already opened) input raster\n gpkg: Path and name of reference GeoPackage\n debug: if True, output raster as given by this function is saved to disk\n \"\"\"\n from shapely.geometry import box # geopandas and shapely become a project dependency only during sample creation\n import geopandas as gpd\n import fiona\n # Get extent of gpkg data with fiona\n with fiona.open(gpkg, 'r') as src:\n gpkg_crs = src.crs\n assert gpkg_crs == raster.crs\n minx, miny, maxx, maxy = src.bounds # ouest, nord, est, sud\n\n # Create a bounding box with Shapely\n bbox = box(minx, miny, maxx, maxy)\n\n # Insert the bbox into a GeoDataFrame\n geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0]) # , crs=gpkg_crs['init'])\n\n # Re-project into the same coordinate system as the raster data\n # geo = geo.to_crs(crs=raster.crs.data)\n\n # Get the geometry coordinates by using the function.\n coords = getFeatures(geo)\n\n # clip the raster with the polygon\n out_tif = Path(raster.name).parent / f\"{Path(raster.name).stem}_clipped{Path(raster.name).suffix}\"\n if os.path.isfile(out_tif):\n return out_tif\n else:\n try:\n out_img, out_transform = mask(dataset=raster, shapes=coords, crop=True)\n out_meta = raster.meta.copy()\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_img.shape[1],\n \"width\": out_img.shape[2],\n \"transform\": out_transform})\n with rasterio.open(out_tif, \"w\", **out_meta) as dest:\n print(f\"writing clipped raster to {out_tif}\")\n dest.write(out_img)\n return out_tif\n except ValueError as e: # if gpkg's extent outside raster: \"ValueError: Input shapes do not overlap raster.\"\n # TODO: warning or exception? if warning, except must be set in images_to_samples\n warnings.warn(f\"e\\n {raster.name}\\n{gpkg}\")\n\n\ndef process_raster_img(rst_pth, gpkg_pth):\n with rasterio.open(rst_pth) as src:\n #rst_array = clip_raster_with_gpkg(src, gpkg_pth)\n rst_array = src.read()\n return rst_array, src\n\n\ndef reorder_bands(a: List[str], b: List[str]):\n read_band_order = []\n for band in a:\n if band in b:\n read_band_order.insert(a.index(band) + 1, b.index(band) + 1)\n # print(f'{a.index(band)},{band}, {b.index(band)}')\n return read_band_order\n\n\ndef gen_img_samples(rst_pth, tile_size, overlap, *band_order):\n if overlap > 25:\n logging.warning(\"high overlap >25%, note that automatic train/val split creates very similar samples in both sets\")\n dist_samples = round(tile_size * (1 - (overlap / 100)))\n\n with rasterio.open(rst_pth) as src:\n for row in range(0, src.height, dist_samples):\n for column in range(0, src.width, dist_samples):\n window = Window.from_slices(slice(row, row + tile_size),\n slice(column, column + tile_size))\n if band_order:\n window_array = reshape_as_image(src.read(band_order[0], window=window))\n else:\n window_array = reshape_as_image(src.read(window=window))\n\n if window_array.shape[0] < tile_size or window_array.shape[1] < tile_size:\n padding = pad_diff(window_array.shape[0], window_array.shape[1], tile_size, tile_size)\n window_array = pad(window_array, padding, fill=np.nan)\n\n yield window_array\n\n\ndef process_vector_label(rst_pth, gpkg_pth):\n if rst_pth is not None:\n with rasterio.open(rst_pth) as src:\n np_label = vector_to_raster(vector_file=gpkg_pth,\n input_image=src,\n out_shape=(src.height, src.width),\n attribute_name='properties/Quatreclasses',\n fill=0,\n target_ids=[1, '1', 2, '2', 3, '3', 4, '4'],\n merge_all=True,\n )\n return np_label\n\n\ndef gen_label_samples(np_label, overlap, tile_size):\n if overlap > 25:\n logging.warning(\"high overlap >25%, note that automatic train/val split creates very similar samples in both sets\")\n dist_samples = round(tile_size * (1 - (overlap / 100)))\n\n h, w = np_label.shape\n for row in range(0, h, dist_samples):\n for column in range(0, w, dist_samples):\n target = np_label[row:row + tile_size, column:column + tile_size]\n target_row = target.shape[0]\n target_col = target.shape[1]\n if target_row < tile_size or target_col < tile_size:\n padding = pad_diff(target_row, target_col, tile_size,\n tile_size) # array, actual height, actual width, desired size\n target = pad(target, padding, fill=-1)\n indices = (row, column)\n yield target, indices\n\n\ndef minimum_annotated_percent(target_background_percent, min_annotated_percent):\n if not min_annotated_percent:\n return True\n elif float(target_background_percent) <= 100 - min_annotated_percent:\n return True\n\n return False\n\n\ndef append_to_dataset(dataset, sample):\n \"\"\"\n Append a new sample to a provided dataset. The dataset has to be expanded before we can add value to it.\n :param dataset:\n :param sample: data to append\n :return: Index of the newly added sample.\n \"\"\"\n old_size = dataset.shape[0] # this function always appends samples on the first axis\n dataset.resize(old_size + 1, axis=0)\n dataset[old_size, ...] = sample\n return old_size\n\n\ndef class_proportion(target, sample_size: int, class_min_prop: dict):\n if not class_min_prop:\n return True\n sample_total = sample_size ** 2\n for key, value in class_min_prop.items():\n if key not in np.unique(target):\n target_prop_classwise = 0\n else:\n target_prop_classwise = (round((np.bincount(target.clip(min=0).flatten())[key] / sample_total) * 100, 1))\n if target_prop_classwise < value:\n return False\n return True\n\n\ndef add_to_datasets(dataset,\n samples_file,\n val_percent,\n val_sample_file,\n data,\n target,\n sample_metadata,\n metadata_idx,\n dict_classes):\n \"\"\" Add sample to Hdf5 (trn, val or tst) and computes pixel classes(%). \"\"\"\n val = False\n if dataset == 'trn':\n random_val = np.random.randint(1, 100)\n if random_val > val_percent:\n pass\n else:\n val = True\n samples_file = val_sample_file\n append_to_dataset(samples_file[\"sat_img\"], data)\n append_to_dataset(samples_file[\"map_img\"], target)\n append_to_dataset(samples_file[\"sample_metadata\"], repr(sample_metadata))\n append_to_dataset(samples_file[\"meta_idx\"], metadata_idx)\n\n # adds pixel count to pixel_classes dict for each class in the image\n for key, value in enumerate(np.bincount(target.clip(min=0).flatten())):\n cls_keys = dict_classes.keys()\n if key in cls_keys:\n dict_classes[key] += value\n elif key not in cls_keys and value > 0:\n raise ValueError(f\"A class value was written ({key}) that was not defined in the classes ({cls_keys}).\")\n\n return val\n\n\ndef sample_prep(src, data, target, indices, gpkg_classes, sample_size, sample_type, samples_count, samples_file,\n num_classes,\n val_percent,\n val_sample_file,\n min_annot_perc=None,\n class_prop=None,\n dontcare=-1\n ):\n added_samples = 0\n excl_samples = 0\n\n # print('gpkg_classes', gpkg_classes)\n pixel_classes = {key: 0 for key in gpkg_classes}\n background_val = 0\n pixel_classes[background_val] = 0\n class_prop = validate_class_prop_dict(pixel_classes, class_prop)\n pixel_classes[dontcare] = 0\n\n image_metadata = add_metadata_from_raster_to_sample(sat_img_arr=data,\n raster_handle=src,\n meta_map={},\n raster_info={})\n # Save label's per class pixel count to image metadata\n image_metadata['source_label_bincount'] = {class_num: count for class_num, count in\n enumerate(np.bincount(target.clip(min=0).flatten()))\n if count > 0} # TODO: add this to add_metadata_from[...] function?\n\n if sample_type == 'trn':\n idx_samples = samples_count['trn']\n append_to_dataset(val_sample_file[\"metadata\"], repr(image_metadata))\n elif sample_type == 'tst':\n idx_samples = samples_count['tst']\n else:\n raise ValueError(f\"Sample type must be trn or tst. Provided type is {sample_type}\")\n\n idx_samples_v = samples_count['val']\n # Adds raster metadata to the dataset. All samples created by tiling below will point to that metadata by index\n metadata_idx = append_to_dataset(samples_file[\"metadata\"], repr(image_metadata))\n u, count = np.unique(target, return_counts=True)\n # print('class:', u, 'count:', count)\n target_background_percent = round(count[0] / np.sum(count) * 100 if 0 in u else 0, 1)\n sample_metadata = {'sample_indices': indices}\n val = False\n if minimum_annotated_percent(target_background_percent, min_annot_perc) and \\\n class_proportion(target, sample_size, class_prop):\n val = add_to_datasets(dataset=sample_type,\n samples_file=samples_file,\n val_percent=val_percent,\n val_sample_file=val_sample_file,\n data=data,\n target=target,\n sample_metadata=sample_metadata,\n metadata_idx=metadata_idx,\n dict_classes=pixel_classes)\n if val:\n idx_samples_v += 1\n else:\n idx_samples += 1\n added_samples += 1\n else:\n excl_samples += 1\n\n target_class_num = np.max(u)\n if num_classes < target_class_num:\n num_classes = target_class_num\n\n sample_type_ = 'val' if val else sample_type\n # assert added_samples > 0, \"No sample added for current raster. Problems may occur with use of metadata\"\n\n if sample_type == 'tst':\n samples_count['tst'] = idx_samples\n else:\n samples_count['trn'] = idx_samples\n samples_count['val'] = idx_samples_v\n\n # return the appended samples count and number of classes.\n # print('pixel_classes', pixel_classes)\n # print(samples_count, num_classes)\n\n return samples_count, num_classes, pixel_classes\n\n\ndef class_pixel_ratio(pixel_classes: dict, source_data: str, file_path: str):\n with open(file_path, 'a+') as f:\n pixel_total = sum(pixel_classes.values())\n print(f'\\n****{source_data}****\\n', file=f)\n for i in pixel_classes:\n prop = round((pixel_classes[i] / pixel_total) * 100, 1) if pixel_total > 0 else 0\n print(f'{source_data}_class', i, ':', prop, '%', file=f)\n print(f'\\n****{source_data}****\\n', file=f)\n\n\ndef main(params):\n \"\"\"\n Training and validation datasets preparation.\n :param params: (dict) Parameters found in the yaml config file.\n\n \"\"\"\n start_time = time.time()\n\n # MANDATORY PARAMETERS\n num_classes = get_key_def('num_classes', params['global'], expected_type=int)\n num_bands = get_key_def('number_of_bands', params['global'], expected_type=int)\n csv_file = get_key_def('prep_csv_file', params['sample'], expected_type=str)\n\n # OPTIONAL PARAMETERS\n # basics\n debug = get_key_def('debug_mode', params['global'], False)\n task = get_key_def('task', params['global'], 'segmentation', expected_type=str)\n if task == 'classification':\n raise ValueError(f\"Got task {task}. Expected 'segmentation'.\")\n elif not task == 'segmentation':\n raise ValueError(f\"images_to_samples.py isn't necessary for classification tasks\")\n data_path = Path(get_key_def('data_path', params['global'], './data', expected_type=str))\n Path.mkdir(data_path, exist_ok=True, parents=True)\n val_percent = get_key_def('val_percent', params['sample'], default=10, expected_type=int)\n\n # mlflow logging\n mlflow_uri = get_key_def('mlflow_uri', params['global'], default=\"./mlruns\")\n experiment_name = get_key_def('mlflow_experiment_name', params['global'], default='gdl-training', expected_type=str)\n\n # parameters to set hdf5 samples directory\n data_path = Path(get_key_def('data_path', params['global'], './data', expected_type=str))\n samples_size = get_key_def(\"samples_size\", params[\"global\"], default=1024, expected_type=int)\n overlap = get_key_def(\"overlap\", params[\"sample\"], default=5, expected_type=int)\n min_annot_perc = get_key_def('min_annotated_percent', params['sample']['sampling_method'], default=0,\n expected_type=int)\n if not data_path.is_dir():\n raise FileNotFoundError(f'Could not locate data path {data_path}')\n samples_folder_name = (f'samples{samples_size}_overlap{overlap}_min-annot{min_annot_perc}_{num_bands}bands'\n f'_{experiment_name}')\n\n # other optional parameters\n dontcare = get_key_def(\"ignore_index\", params[\"training\"], -1)\n meta_map = get_key_def('meta_map', params['global'], default={})\n metadata = None\n targ_ids = get_key_def('target_ids', params['sample'], None, expected_type=List)\n class_prop = get_key_def('class_proportion', params['sample']['sampling_method'], None, expected_type=dict)\n mask_reference = get_key_def('mask_reference', params['sample'], default=False, expected_type=bool)\n\n # add git hash from current commit to parameters if available. Parameters will be saved to hdf5s\n params['global']['git_hash'] = get_git_hash()\n\n list_params = params['read_img']\n source_pan = get_key_def('pan', list_params['source'], default=False, expected_type=bool)\n source_mul = get_key_def('mul', list_params['source'], default=False, expected_type=bool)\n mul_band_order = get_key_def('mulband', list_params['source'], default=[], expected_type=list)\n prep_band = get_key_def('band', list_params['prep'], default=[], expected_type=list)\n tst_set = get_key_def('benchmark', list_params, default=[], expected_type=list)\n in_pth = Path(get_key_def('input_file', list_params, default='data_file.json', expected_type=str))\n data_pth = in_pth.parent.parent\n gpkg_status = 'all'\n\n smpls_dir = data_path.joinpath(samples_folder_name)\n if smpls_dir.is_dir():\n if debug:\n # Move existing data folder with a random suffix.\n last_mod_time_suffix = datetime.fromtimestamp(smpls_dir.stat().st_mtime).strftime('%Y%m%d-%H%M%S')\n shutil.move(smpls_dir, data_path.joinpath(f'{str(smpls_dir)}_{last_mod_time_suffix}'))\n else:\n raise FileExistsError(f'Data path exists: {smpls_dir}. Remove it or use a different experiment_name.')\n Path.mkdir(smpls_dir, exist_ok=False) # TODO: what if we want to append samples to existing hdf5?\n\n trn_hdf5, val_hdf5, tst_hdf5 = create_files_and_datasets(samples_size=samples_size,\n number_of_bands=num_bands,\n meta_map=meta_map,\n samples_folder=smpls_dir,\n params=params)\n\n number_samples = {'trn': 0, 'val': 0, 'tst': 0}\n number_classes = 0\n\n pixel_pan_counter = Counter()\n pixel_mul_counter = Counter()\n pixel_prep_counter = Counter()\n filename = smpls_dir.joinpath('class_distribution.txt')\n\n with open(Path(in_pth), 'r') as fin:\n dict_images = json.load(fin)\n\n for i_dict in tqdm(dict_images['all_images'], desc=f'Writing samples to {smpls_dir}'):\n\n if source_pan:\n if not len(i_dict['pan_img']) == 0 and i_dict['gpkg']:\n if gpkg_status == 'all':\n if 'corr' or 'prem' in i_dict['gpkg'].keys():\n gpkg = list(i_dict['gpkg'].values())[0]\n gpkg_classes = validate_num_classes(gpkg, params['global']['num_classes'],\n 'properties/Quatreclasses',\n dontcare, target_ids=targ_ids)\n for img_pan in i_dict['pan_img']:\n assert_crs_match(img_pan, gpkg)\n rst_pth, r_ = process_raster_img(data_pth/img_pan, gpkg)\n np_label = process_vector_label(data_pth/rst_pth, gpkg)\n if np_label is not None:\n if Path(gpkg).stem in tst_set:\n sample_type = 'tst'\n out_file = tst_hdf5\n else:\n sample_type = 'trn'\n out_file = trn_hdf5\n val_file = val_hdf5\n src = r_\n pan_label_gen = gen_label_samples(np_label, overlap, samples_size)\n pan_img_gen = gen_img_samples(rst_pth, samples_size, overlap)\n else:\n continue\n for pan_img, pan_label in zip(pan_img_gen, pan_label_gen):\n number_samples, number_classes, class_pixels_pan = sample_prep(src, pan_img, pan_label[0],\n pan_label[1], gpkg_classes,\n samples_size, sample_type,\n number_samples, out_file,\n number_classes,\n val_percent, val_file,\n min_annot_perc,\n class_prop=class_prop,\n dontcare=dontcare)\n pixel_pan_counter.update(class_pixels_pan)\n\n if source_mul:\n if not len(i_dict['mul_img']) == 0 and i_dict['gpkg']:\n band_order = reorder_bands(i_dict['mul_band'], mul_band_order)\n if gpkg_status == 'all':\n if 'corr' or 'prem' in i_dict['gpkg'].keys():\n gpkg = list(i_dict['gpkg'].values())[0]\n gpkg_classes = validate_num_classes(gpkg, params['global']['num_classes'],\n 'properties/Quatreclasses',\n dontcare, target_ids=targ_ids)\n for img_mul in i_dict['mul_img']:\n assert_crs_match(img_mul, gpkg)\n rst_pth, r_ = process_raster_img(img_mul, gpkg)\n np_label = process_vector_label(rst_pth, gpkg)\n if np_label is not None:\n if Path(gpkg).stem in tst_set:\n sample_type = 'tst'\n out_file = tst_hdf5\n else:\n sample_type = 'trn'\n out_file = trn_hdf5\n val_file = val_hdf5\n src = r_\n\n mul_label_gen = gen_label_samples(np_label, overlap, samples_size)\n mul_img_gen = gen_img_samples(rst_pth, samples_size, overlap, band_order)\n else:\n continue\n for mul_img, mul_label in zip(mul_img_gen, mul_label_gen):\n number_samples, number_classes, class_pixels_mul = sample_prep(src, mul_img, mul_label[0],\n mul_label[1], gpkg_classes,\n samples_size, sample_type,\n number_samples, out_file,\n number_classes,\n val_percent, val_file,\n min_annot_perc,\n class_prop=class_prop,\n dontcare=dontcare)\n pixel_mul_counter.update(class_pixels_mul)\n\n if prep_band:\n bands_gen_list = []\n if set(prep_band).issubset({'R', 'G', 'B', 'N'}):\n for ib in prep_band:\n if i_dict[f'{ib}_band'] and i_dict['gpkg']:\n if gpkg_status == 'all':\n if 'corr' or 'prem' in i_dict['gpkg'].keys():\n gpkg = list(i_dict['gpkg'].values())[0]\n gpkg = data_pth/gpkg\n band = data_pth / i_dict[f'{ib}_band']\n gpkg_classes = validate_num_classes(gpkg, params['global']['num_classes'],\n 'properties/Quatreclasses',\n dontcare, target_ids=targ_ids)\n assert_crs_match(band, gpkg)\n r_, rst_pth = process_raster_img(band, gpkg)\n np_label = process_vector_label(band, gpkg)\n prep_img_gen = gen_img_samples(band, samples_size, overlap)\n bands_gen_list.append(prep_img_gen)\n\n if np_label is not None:\n if Path(gpkg).stem in tst_set:\n sample_type = 'tst'\n out_file = tst_hdf5\n else:\n sample_type = 'trn'\n out_file = trn_hdf5\n val_file = val_hdf5\n src = r_\n prep_label_gen = gen_label_samples(np_label, overlap, samples_size)\n if len(prep_band) and len(bands_gen_list) == 1:\n for b1, prep_label in zip(bands_gen_list[0], prep_label_gen):\n prep_img = b1\n number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,\n prep_label[0],\n prep_label[1], gpkg_classes,\n samples_size, sample_type,\n number_samples, out_file,\n number_classes,\n val_percent, val_file,\n min_annot_perc,\n class_prop=class_prop,\n dontcare=dontcare)\n pixel_prep_counter.update(class_pixels_prep)\n\n elif len(prep_band) and len(bands_gen_list) == 2:\n for b1, b2, prep_label in zip(*bands_gen_list, prep_label_gen):\n prep_img = np.dstack(np.array([b1, b2]))\n number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,\n prep_label[0],\n prep_label[1], gpkg_classes,\n samples_size, sample_type,\n number_samples, out_file,\n number_classes,\n val_percent, val_file,\n min_annot_perc,\n class_prop=class_prop,\n dontcare=dontcare)\n pixel_prep_counter.update(class_pixels_prep)\n\n elif len(prep_band) and len(bands_gen_list) == 3:\n for b1, b2, b3, prep_label in zip(*bands_gen_list, prep_label_gen):\n prep_img = np.dstack(np.array([b1, b2, b3]))\n number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,\n prep_label[0],\n prep_label[1], gpkg_classes,\n samples_size, sample_type,\n number_samples, out_file,\n number_classes,\n val_percent, val_file,\n min_annot_perc,\n class_prop=class_prop,\n dontcare=dontcare)\n pixel_prep_counter.update(class_pixels_prep)\n\n elif len(prep_band) and len(bands_gen_list) == 4:\n for b1, b2, b3, b4, prep_label in zip(*bands_gen_list, prep_label_gen):\n prep_img = np.dstack(np.array([b1, b2, b3, b4]))\n number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,\n prep_label[0],\n prep_label[1], gpkg_classes,\n samples_size, sample_type,\n number_samples, out_file,\n number_classes,\n val_percent, val_file,\n min_annot_perc,\n class_prop=class_prop,\n dontcare=dontcare)\n pixel_prep_counter.update(class_pixels_prep)\n else:\n continue\n trn_hdf5.close()\n val_hdf5.close()\n tst_hdf5.close()\n\n class_pixel_ratio(pixel_pan_counter, 'pan_source', filename)\n class_pixel_ratio(pixel_mul_counter, 'mul_source', filename)\n class_pixel_ratio(pixel_prep_counter, 'prep_source', filename)\n print(\"Number of samples created: \", number_samples, number_classes)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Sample preparation')\n parser.add_argument('ParamFile', metavar='DIR',\n help='Path to training parameters stored in yaml')\n args = parser.parse_args()\n params = read_parameters(args.ParamFile)\n start_time = time.time()\n tqdm.write(f'\\n\\nStarting images to samples preparation with {args.ParamFile}\\n\\n')\n main(params)\n print(\"Elapsed time:{}\".format(time.time() - start_time))\n"
] | [
[
"numpy.unique",
"numpy.full",
"numpy.insert",
"numpy.isscalar",
"numpy.float32"
],
[
"numpy.random.seed",
"numpy.unique",
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
artek0chumak/hivemind | [
"c6b2b2d84ccfc890314a2bfece8eef238372d410",
"762f116ffcd6c194b888ed64c8a82033cc97dce7"
] | [
"hivemind/compression/quantization.py",
"hivemind/optim/optimizer.py"
] | [
"import math\nimport os\nfrom abc import ABC, abstractmethod\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\n\nfrom hivemind.compression.base import CompressionBase, CompressionInfo\nfrom hivemind.proto import runtime_pb2\n\nEXECUTOR = ThreadPoolExecutor(max_workers=int(os.environ.get(\"QUANTIZATION_THREADS\", 128)))\n\n\nclass Quantization(CompressionBase, ABC):\n codebook_dtype, indices_dtype = np.float32, np.uint8\n\n @abstractmethod\n def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert tensor into a pair of (indices, codebook)\"\"\"\n ...\n\n def compress(self, tensor: torch.Tensor, info: CompressionInfo, allow_inplace: bool = False) -> runtime_pb2.Tensor:\n quantized, codebook = self.quantize(tensor.detach(), allow_inplace=allow_inplace)\n return runtime_pb2.Tensor(\n compression=self.compression_type,\n buffer=b\"\".join((np.int64(len(codebook)).tobytes(), codebook.tobytes(), quantized.tobytes())),\n size=tensor.shape,\n dtype=tensor.numpy().dtype.name,\n requires_grad=tensor.requires_grad,\n )\n\n def extract(self, serialized_tensor: runtime_pb2.Tensor) -> torch.Tensor:\n codebook_size = int(np.frombuffer(serialized_tensor.buffer, count=1, dtype=np.int64))\n codebook = np.frombuffer(serialized_tensor.buffer, offset=8, count=codebook_size, dtype=self.codebook_dtype)\n quantized = np.frombuffer(serialized_tensor.buffer, offset=8 + codebook.nbytes, dtype=self.indices_dtype)\n quantized = torch.as_tensor(quantized, dtype=torch.int64).reshape(tuple(serialized_tensor.size))\n codebook = torch.as_tensor(np.asarray(codebook, dtype=serialized_tensor.dtype))\n return codebook[quantized]\n\n def estimate_compression_ratio(self, info: CompressionInfo) -> float:\n return self.n_bits / torch.finfo(info.descriptor.dtype).bits\n\n @property\n def n_bits(self):\n return self.indices_dtype(1).itemsize * 8\n\n @property\n def n_bins(self):\n return 2**self.n_bits\n\n\nclass Uniform8BitQuantization(Quantization):\n RANGE_IN_SIGMAS: int = 6\n compression_type = runtime_pb2.UNIFORM_8BIT\n\n def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:\n offset = self.n_bins // 2\n shift = tensor.mean()\n centered_tensor = tensor.sub_(shift) if allow_inplace else tensor - shift\n std_unbiased = centered_tensor.norm() / math.sqrt(centered_tensor.numel() - 1)\n scale = self.RANGE_IN_SIGMAS * std_unbiased / self.n_bins\n quantized = torch.quantize_per_tensor(centered_tensor, scale, offset, torch.quint8).int_repr()\n lookup = average_buckets(tensor, quantized, self.n_bins)\n return np.asarray(quantized, dtype=self.indices_dtype), np.asarray(lookup, dtype=self.codebook_dtype)\n\n\nclass Quantile8BitQuantization(Quantization):\n compression_type = runtime_pb2.QUANTILE_8BIT\n\n def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:\n tensor = tensor.detach().float()\n borders = torch.as_tensor(quantile_qq_approximation(tensor.numpy(), self.n_bins + 1)[1:-1])\n quantized = torch.clamp_(torch.bucketize(tensor, borders), 0, self.n_bins - 1)\n codebook = average_buckets(tensor, quantized, self.n_bins)\n return quantized.numpy().astype(np.uint8), codebook.numpy()\n\n\ndef average_buckets(tensor: torch.Tensor, quant_weight: torch.Tensor, n_bins: int):\n \"\"\"Return the average value in each bucket\"\"\"\n bin_sums = torch.zeros(n_bins).scatter_add_(0, quant_weight.flatten().long(), tensor.flatten())\n bin_counts = torch.clamp_min_(torch.bincount(quant_weight.flatten(), minlength=n_bins), 1)\n lookup = bin_sums / bin_counts\n return lookup\n\n\ndef get_chunk_size(num_elements: int, min_chunk_size: int) -> int:\n \"\"\"Adjust chunk_size to minimize imbalance between chunk sizes\"\"\"\n if min_chunk_size >= num_elements:\n return min_chunk_size\n leftover_elements = num_elements % min_chunk_size\n num_chunks = num_elements // min_chunk_size\n return min_chunk_size + (leftover_elements - 1) // num_chunks + 1\n\n\ndef quantile_qq_approximation(array: np.ndarray, n_quantiles: int, min_chunk_size: int = 10**5) -> np.ndarray:\n \"\"\"Estimate uniform quantiles of data using quantile-of-quantiles. Runs in parallel.\"\"\"\n if not array.data.c_contiguous and array.data.f_contiguous:\n array = array.T\n array = np.ascontiguousarray(array.reshape(-1))\n quantiles = np.linspace(0.0, 1.0, num=n_quantiles, dtype=array.dtype)\n chunk_size = get_chunk_size(len(array), min_chunk_size)\n num_chunks = (len(array) - 1) // chunk_size + 1\n partition_quantiles = np.empty((num_chunks, len(quantiles)), dtype=array.dtype)\n\n jobs = []\n for i in range(num_chunks):\n chunk = slice(chunk_size * i, chunk_size * (i + 1))\n jobs.append(EXECUTOR.submit(np.quantile, array[chunk], quantiles, out=partition_quantiles[i]))\n\n for job in jobs:\n job.result()\n return np.quantile(partition_quantiles, quantiles)\n",
"from __future__ import annotations\n\nimport logging\nimport os\nimport time\nfrom functools import partial\nfrom typing import Callable, Optional, Sequence, Union\n\nimport torch\n\nfrom hivemind.averaging.control import AveragingStage, StepControl\nfrom hivemind.compression import CompressionBase, NoCompression\nfrom hivemind.dht import DHT\nfrom hivemind.optim.grad_averager import GradientAverager\nfrom hivemind.optim.grad_scaler import GradScaler\nfrom hivemind.optim.progress_tracker import LocalTrainingProgress, ProgressTracker\nfrom hivemind.optim.state_averager import (\n LRSchedulerBase,\n OptimizerFactory,\n Parameters,\n ParamGroups,\n SchedulerFactory,\n TorchOptimizer,\n TrainingStateAverager,\n)\nfrom hivemind.utils import PerformanceEMA, get_dht_time, get_logger\n\nlogger = get_logger(__name__)\n\n\nclass Optimizer(torch.optim.Optimizer):\n \"\"\"\n hivemind.Optimizer wraps your regular PyTorch Optimizer for training collaboratively with peers.\n\n By default, Optimizer is configured to be exactly **equivalent to synchronous training** with target_batch_size.\n There are advanced options make training semi-asynchronous (delay_optimizer_step and delay_gradient_averaging)\n or even fully asynchronous (use_local_updates=True).\n\n :example: The Optimizer can be used as a drop-in replacement for a regular PyTorch Optimizer:\n\n >>> model = transformers.AutoModel(\"albert-xxlarge-v2\")\n >>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, start=True)\n >>> opt = hivemind.Optimizer(dht=dht, run_id=\"run_42\", batch_size_per_step=4, target_batch_size=4096,\n >>> params=model.parameters(), optimizer=lambda params: torch.optim.Adam(params))\n >>> while True:\n >>> loss = compute_loss_on_batch(model, batch_size=4)\n >>> opt.zero_grad()\n >>> loss.backward()\n >>> opt.step() # <-- train collaboratively with any peers that use the same prefix (run_42)\n\n By default, peers will perform the following steps:\n\n * accumulate a minibatch of gradients towards the (global) target batch size, without updating parameters yet;\n * after peers collectively accumulate target_batch_size, average gradients with peers and perform optimizer step;\n * if your peer lags behind the rest of the swarm, it will download parameters and optimizer state from others;\n\n Unlike regular training, your device may join midway through training, when other peers already made some progress.\n For this reason, any learning rate schedulers, curriculum and other **time-dependent features should be based on**\n ``optimizer.local_epoch`` (and not the number ot calls to opt.step). Otherwise, peers that joined training late\n may end up having different learning rates. To do so automatically, specify ``scheduler=...`` parameter below.\n\n :What is an epoch?: Optimizer uses the term ``epoch`` to describe intervals between synchronizations. One epoch\n coresponds to processing certain number of training samples (``target_batch_size``) in total across all peers.\n Like in PyTorch LR Scheduler, **epoch does not necessarily correspond to a full pass over the training data.**\n At the end of epoch, peers perform synchronous actions such as averaging gradients for a global optimizer update,\n updating the learning rate scheduler or simply averaging parameters (if using local updates).\n The purpose of this is to ensure that changing the number of peers does not require changing hyperparameters.\n For instance, if the number of peers doubles, they will run all-reduce more frequently to adjust for faster training.\n\n :Configuration guide: This guide will help you set up your first collaborative training run. It covers the most\n important basic options, but ignores features that require significant changes to the training code.\n\n >>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=IF_BEHIND_FIREWALL_OR_VERY_UNRELIABLE, start=True)\n >>> opt = hivemind.Optimizer(\n >>> dht=dht, run_id=\"a_unique_name_that_every_participant_will_see_when_training\",\n >>> batch_size_per_step=ACTUAL_BATCH_SIZE_OF_THIS_PEER, target_batch_size=LARGE_GLOBAL_BATCH,\n >>> # ^--- Each global optimzier step will use gradients from 1x-1.1x of target_batch_size (due to latency);\n >>> # It is recommended to train with very large batch sizes to reduce the % of time spent on communication.\n >>>\n >>> params=params, optimizer=lambda params: AnyPyTorchOptimizer(params, **hyperparams_for_target_batch_size),\n >>> # tune learning rate for your target_batch_size. Here's a good reference: https://arxiv.org/abs/1904.00962\n >>> scheduler=lambda opt: AnyPyTorchScheduler(opt, **hyperparams_for_target_batch_size),\n >>> # scheduler.step will be called automatically each time when peers collectively accumulate target_batch_size\n >>>\n >>> offload_optimizer=True, # saves GPU memory, but increases RAM usage; Generally a good practice to use this.\n >>> delay_grad_averaging=OPTIONAL, delay_optimizer_step=OPTIONAL, # train faster, but with 1 round of staleness;\n >>> # setting both to True is equivalent to Delayed Parameter Updates (see https://arxiv.org/abs/2101.06840)\n >>>\n >>> grad_compression=hivemind.Float16Compression(), state_averaging_compression=hivemind.Float16Compression(),\n >>> # ^-- it is usually fine to use pure 16-bit or even lower precision during communication with no precaution;\n >>> # See hivemind/examples/albert for an working example of mixed 8/16-bit compression.\n >>>\n >>> matchmaking_time=15.0, # 3-5s for small local runs, 10-15s for training over the internet or with many peers\n >>> averaging_timeout=60.0, # around of 2x the actual time it takes to run all-reduce\n >>> verbose=True # periodically report the training progress to the console (e.g. \"Averaged with N peers\")\n >>> ) # and you're done!\n\n\n :param dht: a running hivemind.DHT instance connected to other peers.\n :param run_id: a unique identifier of this training run, used as a common prefix for all DHT keys.\n **Note:** peers with the same run_id should *generally* train the same model and use compatible configurations.\n Some options can be safely changed by individual peers: ``batch_size_per_step``, ``client_mode``, ``auxiliary``,\n ``reuse_grad_buffers``, ``offload_optimizer``, and ``verbose``. In some cases, other options may also be tuned\n individually by each peer, but they should be changed with caution to avoid deadlocks or convergence issues.\n\n :param target_batch_size: global batch size that must be accumulated before the swarm transitions to the next epoch.\n The actual batch may be *slightly* larger due asynchrony (e.g. peers submit more gradients in the last second).\n :param batch_size_per_step: you should accumulate gradients over this many samples between calls to optimizer.step.\n\n :param params: parameters or param groups for the optimizer; required if optimizer is a callable(params).\n :param optimizer: a callable(parameters) -> pytorch.optim.Optimizer or a pre-initialized PyTorch optimizer.\n **Note:** some advanced options like offload_optimizer, delay_optimizer_step, or delay_grad_averaging require\n and require the callable and will not work if hivemind.optimizer is created with a pre-existing PyTorch Optimizer.\n :param scheduler: callable(optimizer) -> PyTorch LRScheduler or a pre-initialized PyTorch scheduler.\n The learning rate scheduler will adjust learning rate based on global epoch, not the number of\n local calls to optimizer.step; this is required to keep different peers synchronized.\n\n :param matchmaking_time: when looking for group, wait for peers to join for up to this many seconds.\n Increase if you see \"averaged gradients with N peers\" where N is below 0.9x the real siee on >=25% of epochs.\n When training with low-latency network, decreasing matchmaking_time allows training with smaller batch sizes.\n :param averaging_timeout: if an averaging step hangs for this long, it will be cancelled automatically.\n Increase averaging_timeout if you see \"Proceeding with local gradients\" at least 25% of the time.\n Do not set this timeout too high, as it may cause your optimizer to hang after some types of network errors.\n :param allreduce_timeout: timeout for a single attempt to run all-reduce, default: equal to averaging_timeout.\n :param load_state_timeout: wait for at most this many seconds before giving up on load_state_from_peers.\n :param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation.\n This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all\n\n :param offload_optimizer: offload the optimizer to host memory, saving GPU memory for parameters and gradients\n :param delay_optimizer_step: run optimizer in background, apply results in future .step; requires offload_optimizer\n :param delay_grad_averaging: average gradients in background; requires offload_optimizer and delay_optimizer_step\n\n :param delay_state_averaging: if enabled (default), average parameters and extra tensors in a background thread;\n if set to False, average parameters synchronously within the corresponding hivemind.Optimizer.step call.\n\n :param average_state_every: average state (parameters, chosen opt tensors) with peers every this many **epochs**.\n This reduces the communication overhead increasing, but can cause parameters to diverge if too large.\n The maximal average_state_every=num_epochs depends on how often peers diverge from each other. If peers\n hardly ever skip averaging rounds, they can average state less frequently. In turn, network failures, lossy\n gradient compression and local_updates cause parameters to diverge faster and requires more frequent averaging.\n\n :param use_local_updates: if enabled, peers will update parameters on each .step using local gradients;\n if not enabled (default), accumulate gradients to target_batch_size, and then call .step with averaged gradients.\n Even if use_local_updates=True, learning rate scheduler will still be called once per target_batch_size.\n\n :param client_mode: if True, this peer will not accept incoming connections (firewall-compatible mode)\n :param auxiliary: if True, optimizer.step will only assist other peers in averaging (for cpu-only workers)\n\n :param grad_compression: compression strategy used for averaging gradients, default = no compression\n :param state_averaging_compression: compression for averaging params and state tensors, default = no compression\n :param load_state_compression: compression strategy for loading state from peers, default = no compression\n :param average_opt_statistics: names of optimizer statistics from state dict that should be averaged with peers\n :param extra_tensors: if specified, these extra tensors will also be averaged and shared in load_state_from_peers.\n\n :param averager_opts: additional keyword arguments forwarded to both GradientAverager and TrainingStateAverager\n :param tracker_opts: additional keyword arguments forwarded to ProgressTracker\n :param performance_ema_alpha: moving average alpha in ProgressTracker, TrainingStateAverager and Optimizer\n :param verbose: if True, report internal events such as accumilating gradients and running background tasks\n\n :note: in a large-scale training, peers will inevitably fail and you will see error messages. hivemind.Optimizer\n is designed to recover from such failures, but will sometimes need a minute or two to re-adjust.\n\n \"\"\"\n\n def __init__(\n self,\n *,\n dht: DHT,\n run_id: str,\n target_batch_size: int,\n batch_size_per_step: Optional[int] = None,\n optimizer: Union[TorchOptimizer, OptimizerFactory],\n params: Optional[Union[Parameters, ParamGroups]] = None,\n scheduler: Optional[Union[LRSchedulerBase, SchedulerFactory]] = None,\n matchmaking_time: Optional[float] = 15.0,\n averaging_timeout: Optional[float] = 60.0,\n allreduce_timeout: Optional[float] = None,\n next_chunk_timeout: Optional[float] = None,\n load_state_timeout: float = 600.0,\n reuse_grad_buffers: bool = False,\n offload_optimizer: Optional[bool] = None,\n delay_optimizer_step: Optional[bool] = None,\n delay_grad_averaging: bool = False,\n delay_state_averaging: bool = True,\n average_state_every: int = 1,\n use_local_updates: bool = False,\n client_mode: bool = None,\n auxiliary: bool = False,\n grad_compression: CompressionBase = NoCompression(),\n state_averaging_compression: CompressionBase = NoCompression(),\n load_state_compression: CompressionBase = NoCompression(),\n average_opt_statistics: Sequence[str] = (),\n extra_tensors: Sequence[torch.Tensor] = (),\n averager_opts: Optional[dict] = None,\n tracker_opts: Optional[dict] = None,\n performance_ema_alpha: float = 0.1,\n shutdown_timeout: float = 5,\n verbose: bool = False,\n ):\n self._parent_pid = os.getpid()\n\n client_mode = client_mode if client_mode is None else dht.client_mode\n delay_optimizer_step = delay_optimizer_step if delay_optimizer_step is not None else delay_grad_averaging\n offload_optimizer = offload_optimizer if offload_optimizer is not None else (params is not None)\n allreduce_timeout = allreduce_timeout if allreduce_timeout is not None else averaging_timeout\n next_chunk_timeout = next_chunk_timeout if next_chunk_timeout is not None else matchmaking_time\n assert not delay_grad_averaging or delay_optimizer_step, \"delay_grad_averaging requires delay_optimizer_step\"\n assert not (client_mode and auxiliary), \"Client-mode peers cannot serve as auxiliaries\"\n assert not auxiliary or batch_size_per_step is None, \"Auxiliary peers should not accumulate batches\"\n if callable(optimizer) and params is not None:\n if scheduler is not None and (not callable(scheduler) or isinstance(scheduler, LRSchedulerBase)):\n raise ValueError(\"For this mode, please provide scheduler factory: callable(optimizer) -> scheduler\")\n elif all(hasattr(optimizer, attr) for attr in (\"param_groups\", \"step\", \"zero_grad\")):\n if offload_optimizer or delay_optimizer_step or delay_grad_averaging:\n raise ValueError(\n \"To enable offload_optimizer or delayed updates, please initialize Optimizer as \"\n \"hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)\"\n )\n else:\n raise ValueError(\n \"Please initialize the optimizer in one of the following two ways:\\n\"\n \"(A) hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)\\n\"\n \"(B) hivemind.Optimizer(..., optimizer=pre_initialize_optimizer)\"\n )\n if use_local_updates:\n assert not reuse_grad_buffers, \"if local_updates is True, gradients will not be accumulated\"\n assert not delay_grad_averaging, \"if local_updates is True, gradients will not be averaged\"\n\n self.dht, self.run_id, self.client_mode, self.auxiliary = dht, run_id, client_mode, auxiliary\n self.batch_size_per_step, self.target_batch_size = batch_size_per_step, target_batch_size\n self.delay_state_averaging, self.average_state_every = delay_state_averaging, average_state_every\n self.matchmaking_time, self.offload_optimizer = matchmaking_time, offload_optimizer\n self.delay_grad_averaging, self.delay_optimizer_step = delay_grad_averaging, delay_optimizer_step\n\n self.averaging_timeout, self.allreduce_timeout = averaging_timeout, allreduce_timeout\n self.load_state_timeout, self.shutdown_timeout = load_state_timeout, shutdown_timeout\n self.next_chunk_timeout = next_chunk_timeout\n\n self.status_loglevel = logging.INFO if verbose else logging.DEBUG\n self.scheduled_grads: Optional[StepControl] = None\n self.scheduled_state: Optional[StepControl] = None\n\n self.tracker = self._make_progress_tracker(\n target_batch_size, performance_ema_alpha=performance_ema_alpha, **tracker_opts or {}\n )\n self.state_averager = self._make_state_averager(\n optimizer=optimizer,\n params=params,\n scheduler=scheduler,\n delta_rule_averaging=use_local_updates and self.delay_state_averaging,\n compression=state_averaging_compression,\n state_compression=load_state_compression,\n average_opt_statistics=average_opt_statistics,\n performance_ema_alpha=performance_ema_alpha,\n extra_tensors=extra_tensors,\n **averager_opts or {},\n )\n if not use_local_updates:\n self.grad_averager = self._make_gradient_averager(\n reuse_grad_buffers=reuse_grad_buffers, compression=grad_compression, **averager_opts or {}\n )\n else:\n self.grad_averager = None\n\n self._should_check_synchronization_on_update = True # used in self.should_load_state_from_peers\n self._schema_hash = self._compute_schema_hash()\n\n self.delay_before_state_averaging = PerformanceEMA(alpha=performance_ema_alpha)\n # measures the average time from the beginning of self._update_global_epoch to the call to state_averager\n # used for pre-scheduling the averaging round in state_averager\n\n self._step_supports_amp_scaling = reuse_grad_buffers\n # note: the line above is used by pytorch AMP GradScaler to enable custom behavior needed when reusing gradient\n # buffers over multiple steps (to avoid repeated unscaling). Without reuse_grad_buffers, this is not needed.\n\n def _make_state_averager(self, **kwargs) -> TrainingStateAverager:\n return TrainingStateAverager(\n dht=self.dht,\n prefix=f\"{self.run_id}_state_averager\",\n min_matchmaking_time=self.matchmaking_time,\n allreduce_timeout=self.allreduce_timeout,\n shutdown_timeout=self.shutdown_timeout,\n offload_optimizer=self.offload_optimizer,\n custom_gradients=self.offload_optimizer,\n status_loglevel=self.status_loglevel,\n next_chunk_timeout=self.next_chunk_timeout,\n client_mode=self.client_mode,\n auxiliary=self.auxiliary,\n start=True,\n **kwargs,\n )\n\n def _make_gradient_averager(self, **kwargs) -> GradientAverager:\n assert hasattr(self, \"state_averager\"), \"must initialize state averager first\"\n grad_averager = GradientAverager(\n dht=self.dht,\n prefix=f\"{self.run_id}_grad_averager\",\n parameters=self.state_averager.main_parameters,\n min_matchmaking_time=self.matchmaking_time,\n allreduce_timeout=self.allreduce_timeout,\n shutdown_timeout=self.shutdown_timeout,\n next_chunk_timeout=self.next_chunk_timeout,\n client_mode=self.client_mode,\n auxiliary=self.auxiliary,\n start=True,\n **kwargs,\n )\n if self.offload_optimizer:\n optimized_param_groups = self.state_averager.optimizer.param_groups\n optimized_parameters = [param for group in optimized_param_groups for param in group[\"params\"]]\n with grad_averager.get_tensors() as averaged_gradients:\n assert len(averaged_gradients) == len(optimized_parameters)\n for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):\n opt_param.grad = averaged_grad\n return grad_averager\n\n def _make_progress_tracker(self, target_batch_size: int, **kwargs) -> ProgressTracker:\n return ProgressTracker(\n dht=self.dht,\n prefix=self.run_id,\n target_batch_size=target_batch_size,\n client_mode=self.client_mode,\n status_loglevel=self.status_loglevel,\n start=True,\n **kwargs,\n )\n\n def _compute_schema_hash(self) -> int:\n optimized_param_groups = self.state_averager.optimizer.param_groups\n optimized_parameters = [param for group in optimized_param_groups for param in group[\"params\"]]\n param_shapes = tuple(tuple(param.shape) for param in optimized_parameters)\n\n # offloaded optimizer requires that gradient tensors are reused between iterations\n grad_ids = tuple(id(param.grad) for param in optimized_parameters) if self.offload_optimizer else None\n return hash((grad_ids, param_shapes))\n\n def is_alive(self) -> bool:\n return self.state_averager.is_alive()\n\n @property\n def local_epoch(self) -> int:\n \"\"\"\n This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will\n automatically re-synchronize by downloading state from another peer.\n An epoch corresponds to accumulating target_batch_size across all active devices.\n \"\"\"\n return self.state_averager.local_epoch\n\n @property\n def local_progress(self) -> LocalTrainingProgress:\n return self.tracker.local_progress\n\n @property\n def use_local_updates(self) -> bool:\n return self.grad_averager is None\n\n @property\n def use_gradient_averaging(self) -> bool:\n return self.grad_averager is not None\n\n def step(\n self,\n closure: Optional[Callable[[], torch.Tensor]] = None,\n batch_size: Optional[int] = None,\n grad_scaler: Optional[GradScaler] = None,\n ):\n \"\"\"\n Update training progress after accumulating another local batch size. Depending on the configuration, this will\n report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.\n\n :param closure: A closure that reevaluates the model and returns the loss.\n :param batch_size: optional override for batch_size_per_step from init.\n :param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler.\n :note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.\n \"\"\"\n if grad_scaler is not None and not isinstance(grad_scaler, GradScaler):\n raise ValueError(\"hivemind.Optimizer requires a hivemind-aware gradient scaler (hivemind.GradScaler)\")\n if self.batch_size_per_step is None and batch_size is None and not self.auxiliary:\n raise ValueError(\"Please either set batch_size_per_step parameter at init or when calling .step\")\n if self.auxiliary and (closure is not None or batch_size is not None or grad_scaler is not None):\n raise ValueError(\"Auxiliary peers should not have batch size, run closures, or use grad_scaler\")\n batch_size = batch_size if batch_size is not None else self.batch_size_per_step\n\n # if delayed updates finished before step, apply these updates; otherwise do nothing\n self.state_averager.step(apply_delayed_updates=True)\n\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n if not self.auxiliary and self._should_load_state_from_peers():\n logger.log(self.status_loglevel, \"Peer is out of sync\")\n self.load_state_from_peers()\n return loss # local gradients were computed with out-of-sync parameters, must start over\n\n if self.use_gradient_averaging:\n # accumulate gradients toward target batch size, then aggregate with peers and run optimizer\n if not self.auxiliary:\n grads_are_valid = self._check_and_accumulate_gradients(batch_size, grad_scaler)\n if not grads_are_valid:\n return loss # local gradients were reset due to overflow, must start over\n\n self._maybe_schedule_gradient_averaging()\n self._maybe_schedule_state_averaging()\n\n else:\n # use_local_updates=True: update parameters on every step independently of other peers\n if not self.auxiliary:\n if grad_scaler is not None:\n with grad_scaler.running_global_step():\n assert grad_scaler.unscale_(self)\n\n new_samples_accumulated = self.tracker.local_progress.samples_accumulated + batch_size\n self.tracker.report_local_progress(self.local_epoch, new_samples_accumulated)\n self._maybe_schedule_state_averaging()\n\n self.state_averager.step(\n increment_epoch=False,\n optimizer_step=True,\n delay_optimizer_step=self.delay_optimizer_step,\n grad_scaler=grad_scaler,\n )\n\n if self.tracker.ready_to_update_epoch:\n self._update_global_epoch(grad_scaler)\n\n return loss\n\n def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None:\n \"\"\"Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step\"\"\"\n assert self._schema_hash == self._compute_schema_hash(), \"parameters or gradients changed during iteration\"\n _epoch_start_time = time.perf_counter()\n\n with self.tracker.pause_updates():\n wait_for_trigger = None\n\n if self.use_gradient_averaging:\n logger.log(self.status_loglevel, f\"Beginning optimizer step #{self.local_epoch}\")\n if self.delay_optimizer_step:\n self.state_averager.step(wait_for_delayed_updates=True)\n\n began_averaging_gradients = self._begin_averaging_gradients(grad_scaler)\n if not began_averaging_gradients:\n # failed to start gradient averaging due to an internal error\n self.grad_averager.load_accumulators_into_averager_()\n elif self.delay_grad_averaging:\n # if using delayed grad averaing, send this to state_averager as a pre-condition for optimizer step\n wait_for_trigger = partial(self._average_gradients_and_load_into_optimizer, self.scheduled_grads)\n else:\n # delay_grad_averaging=False, average gradients immediately\n self._average_gradients_and_load_into_optimizer(self.scheduled_grads)\n\n next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)\n swarm_not_empty = self.tracker.global_progress.num_peers > 1\n should_perform_optimizer_step = not self.auxiliary and not self.use_local_updates\n should_average_state = (\n swarm_not_empty\n and next_epoch % self.average_state_every == 0\n and not self.state_averager.averaging_in_progress\n )\n\n if should_average_state and self.scheduled_state is not None:\n if self.scheduled_state.triggered or self.scheduled_state.done():\n logger.log(\n self.status_loglevel,\n f\"Not using pre-scheduled group for state averaging because it\"\n f\"was already used elsewhere: {self.scheduled_state}\",\n )\n self.scheduled_state = None\n self.delay_before_state_averaging.update(task_size=1, interval=time.perf_counter() - _epoch_start_time)\n\n self.state_averager.step(\n increment_epoch=True,\n wait_for_trigger=wait_for_trigger,\n optimizer_step=should_perform_optimizer_step,\n delay_optimizer_step=self.delay_optimizer_step and should_perform_optimizer_step,\n grad_scaler=grad_scaler,\n averaging_round=should_average_state,\n delay_averaging=self.delay_state_averaging and not self.auxiliary,\n averaging_control=self.scheduled_state if should_average_state else None,\n averaging_opts=dict(timeout=self.averaging_timeout) if should_average_state else None,\n )\n\n if not should_average_state and self.scheduled_state is not None and not self.scheduled_state.done():\n self.scheduled_state.cancel()\n self.scheduled_state = None\n\n self.tracker.update_epoch(new_epoch=self.state_averager.local_epoch)\n self._should_check_synchronization_on_update = True\n # the above line ensures that peers check for *strict* synchronization once per epoch\n\n if not self.client_mode:\n self.state_averager.state_sharing_priority = self.local_epoch\n\n if self.use_gradient_averaging and not self.auxiliary:\n self.grad_averager.reset_accumulated_grads_()\n if not self.client_mode:\n self.grad_averager.state_sharing_priority = self.local_epoch\n\n logger.log(self.status_loglevel, f\"Transitioning to epoch {self.local_epoch}\")\n\n def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool:\n \"\"\"Begin an all-reduce round to average gradients; return True if succeeded, False if failed\"\"\"\n if grad_scaler is not None:\n with grad_scaler.running_global_step():\n assert grad_scaler.unscale_(self)\n\n began_averaging_gradients = False\n if self.scheduled_grads is not None and (self.scheduled_grads.triggered or self.scheduled_grads.done()):\n logger.log(\n self.status_loglevel,\n f\"Not using pre-scheduled group for state averaging because it\"\n f\"was already used elsewhere: {self.scheduled_state}\",\n )\n self.scheduled_grads = None\n\n elif self.tracker.global_progress.num_peers > 1:\n try:\n self.scheduled_grads = self.grad_averager.step(\n control=self.scheduled_grads, reset_accumulators=True, wait=False\n )\n began_averaging_gradients = True\n except BaseException as e:\n logger.exception(e)\n\n if not began_averaging_gradients and self.scheduled_grads is not None and not self.scheduled_grads.done():\n if self.tracker.global_progress.num_peers > 1:\n logger.log(self.status_loglevel, f\"Tagging along for a pre-scheduled gradient averaging round\")\n self._tag_along_with_zero_weight(self.scheduled_grads)\n else:\n logger.log(self.status_loglevel, f\"Skipping pre-scheduled averaging round: there are no other peers\")\n self._load_local_gradients_into_optimizer()\n self.scheduled_grads.cancel()\n self.scheduled_grads = None\n return began_averaging_gradients\n\n def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool:\n \"\"\"Check if gradients are valid, accumulate and return True; otherwise, reset and return False\"\"\"\n assert not self.use_local_updates and not self.auxiliary\n if grad_scaler is not None and not grad_scaler.are_grads_finite(self):\n logger.log(self.status_loglevel, \"Encountered incorrect value in fp16 grads, resetting local gradients\")\n self.tracker.report_local_progress(self.local_epoch, samples_accumulated=0)\n self.grad_averager.reset_accumulated_grads_()\n return False\n\n self.grad_averager.accumulate_grads_(batch_size)\n self.tracker.report_local_progress(self.local_epoch, self.grad_averager.local_samples_accumulated)\n return True\n\n def _maybe_schedule_gradient_averaging(self) -> None:\n \"\"\"If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch\"\"\"\n assert self.use_gradient_averaging\n if self.tracker.estimated_next_update_time - get_dht_time() <= self.matchmaking_time:\n if self.scheduled_grads is None or self.scheduled_grads.triggered or self.scheduled_grads.done():\n eta_seconds = self.tracker.estimated_next_update_time - get_dht_time()\n eta_seconds = max(eta_seconds, self.grad_averager.matchmaking_kwargs[\"min_matchmaking_time\"])\n logger.log(self.status_loglevel, f\"Pre-scheduling gradient averaging round in {eta_seconds:.2f} sec\")\n self.scheduled_grads = self.grad_averager.schedule_step(timeout=self.averaging_timeout)\n\n def _maybe_schedule_state_averaging(self) -> None:\n \"\"\"If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start\"\"\"\n next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)\n if next_epoch % self.average_state_every != 0:\n return # averaging is not performed at this epoch\n if self.state_averager.averaging_in_progress:\n return # previous run is still in progress\n if self.delay_before_state_averaging.num_updates == 0:\n return # not enough data to accurately pre-schedule\n\n estimated_time = self.tracker.estimated_next_update_time\n estimated_time += self.delay_before_state_averaging.ema_seconds_per_sample\n estimated_time += self.state_averager.delay_before_averaging.ema_seconds_per_sample\n eta_seconds_to_averaging = estimated_time - get_dht_time()\n\n if eta_seconds_to_averaging <= self.matchmaking_time:\n if self.scheduled_state is None or self.scheduled_state.triggered or self.scheduled_state.done():\n min_matchmaking_time = self.state_averager.matchmaking_kwargs[\"min_matchmaking_time\"]\n actual_seconds = max(eta_seconds_to_averaging, min_matchmaking_time)\n logger.log(self.status_loglevel, f\"Pre-scheduling state averaging round in {actual_seconds:.2f} sec\")\n self.scheduled_state = self.state_averager.schedule_step(\n gather=next_epoch, timeout=self.averaging_timeout\n )\n\n def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]):\n \"\"\"Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients\"\"\"\n assert self.use_gradient_averaging and maybe_step_control is None or maybe_step_control.triggered\n averaged_gradients = False\n\n try:\n if maybe_step_control is not None:\n group_info = maybe_step_control.result(self.averaging_timeout)\n logger.log(self.status_loglevel, f\"Averaged gradients with {len(group_info)} peers\")\n self._load_averaged_gradients_into_optimizer_()\n averaged_gradients = True\n else:\n logger.log(self.status_loglevel, f\"Skipped averaging: there are no other peers\")\n except BaseException as e:\n logger.log(self.status_loglevel, f\"Averaging gradients failed with {repr(e)}\")\n\n if not averaged_gradients:\n self._load_local_gradients_into_optimizer()\n\n def _load_averaged_gradients_into_optimizer_(self):\n \"\"\"If required, load averaged gradients into optimizer; otherwise simply notify grad averager\"\"\"\n assert self.use_gradient_averaging\n\n if self.offload_optimizer:\n pass # averaged gradients are already baked into optimizer, see _make_gradient_averager\n else:\n # copy averaged gradients into optimizer .grad buffers\n optimized_param_groups = self.state_averager.optimizer.param_groups\n optimized_parameters = [param for group in optimized_param_groups for param in group[\"params\"]]\n with torch.no_grad(), self.grad_averager.get_tensors() as averaged_gradients:\n assert len(averaged_gradients) == len(optimized_parameters)\n for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):\n opt_param.grad.copy_(averaged_grad, non_blocking=True)\n\n self.grad_averager.notify_used_averaged_gradients()\n\n def _load_local_gradients_into_optimizer(self):\n \"\"\"Fallback to using local gradients in the optimizer (instead of averaged gradients)\"\"\"\n logger.log(self.status_loglevel, f\"Proceeding with local gradients\")\n self.grad_averager.load_accumulators_into_averager_()\n # note: we load gradients into grad_averager even though there is only one peer because of two reasons:\n # - if offload_optimizer, then we must load gradients onto the CPU gradient buffers used by the optimizer\n # - if not offload_optimizer, we must un-scale gradients (divide them by the number of accumulation steps)\n self._load_averaged_gradients_into_optimizer_()\n\n def zero_grad(self, set_to_none: bool = False):\n \"\"\"Reset gradients from model. If reuse_grad_buffers=True, this will raise an error.\"\"\"\n if self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers:\n raise ValueError(\n f\"When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never \"\n f\"call zero_grad manually. Gradients will be refreshed internally\"\n )\n for param_group in self.param_groups:\n for param in param_group[\"params\"]:\n if param.grad is None:\n pass\n elif set_to_none:\n param.grad = None\n else:\n param.grad.zero_()\n\n def _should_load_state_from_peers(self) -> bool:\n \"\"\"\n If true, peer will discard local progress and attempt to download state from peers.\n This method allows peer to continue training in two cases:\n - peer is on the same epoch as other collaborators - keep training normally\n - peer was on the same epoch and accumulated some grads, but some collaborators\n have just transitioned to the next epoch - this peer should also transition.\n\n :note: The latter case occurs due to the lack of network synchrony: the first peer that\n detects enough samples will transition to the next step and start counting samples anew.\n Some other peers may take time before they check with DHT and observe that\n - the global epoch is technically one epoch ahead of the current one and\n - the remaining (non-transitioned) peers no longer have target_batch_size between them\n If this is the case, peer should transition to the next epoch and does *not* need to re-load state.\n \"\"\"\n if self._should_check_synchronization_on_update and self.tracker.fetched_global_progress_this_epoch.is_set():\n self._should_check_synchronization_on_update = False\n return self.local_epoch != self.tracker.global_epoch # require exact synchronization once per step\n return self.local_epoch < self.tracker.global_epoch - 1 # catch up if a peer just switched to next epoch\n\n def is_synchronized_with_peers(self) -> bool:\n \"\"\"Checks whether the current peer is up-to-date with others in terms of the epoch (step) number.\"\"\"\n return self.local_epoch >= self.tracker.global_epoch - 1\n\n def load_state_from_peers(self, **kwargs):\n \"\"\"\n Attempt to load the newest collaboration state from other peers within the same run_id.\n\n If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.\n \"\"\"\n # note: we tag along for the next all-reduce because the run may have already started and cancelling it\n # will cause peers to restart matchmaking and may stall the entire collaboration for a few seconds.\n if self.scheduled_grads is not None and not self.scheduled_grads.done():\n self._tag_along_with_zero_weight(self.scheduled_grads)\n self.scheduled_grads = None\n self.state_averager.step(wait_for_delayed_updates=True)\n\n with self.tracker.pause_updates():\n while True:\n try:\n self.state_averager.load_state_from_peers(timeout=self.load_state_timeout, **kwargs)\n break\n except KeyboardInterrupt:\n raise\n except BaseException as e:\n logger.exception(f\"Failed to load state from peers: {e}, retrying ...\")\n continue\n\n if self.tracker.global_epoch - 1 <= self.local_epoch < self.tracker.global_epoch:\n logger.log(self.status_loglevel, f\"Catching up with collaboration step {self.tracker.global_epoch}\")\n self.state_averager.local_epoch = self.tracker.global_epoch\n\n self.tracker.report_local_progress(local_epoch=self.local_epoch, samples_accumulated=0)\n\n if not self.client_mode:\n self.state_averager.state_sharing_priority = self.local_epoch\n\n if self.use_gradient_averaging:\n self.grad_averager.reset_accumulated_grads_()\n if not self.client_mode:\n self.grad_averager.state_sharing_priority = self.local_epoch\n\n def state_dict(self) -> dict:\n state_dict = self.state_averager.optimizer.state_dict()\n state_dict[\"state\"][\"local_epoch\"] = self.local_epoch\n return state_dict\n\n def load_state_dict(self, state_dict: dict):\n if \"local_epoch\" in state_dict[\"state\"]:\n self.state_averager.local_epoch = state_dict[\"state\"].pop(\"local_epoch\")\n return self.state_averager.optimizer.load_state_dict(state_dict)\n\n @property\n def state(self):\n return dict(self.state_averager.optimizer.state, local_epoch=self.local_epoch)\n\n @property\n def opt(self) -> TorchOptimizer:\n return self.state_averager.optimizer\n\n @property\n def param_groups(self) -> ParamGroups:\n next_index = 0\n param_groups = tuple(dict(param_group) for param_group in self.state_averager.optimizer.param_groups)\n for param_group in param_groups:\n num_params = len(param_group[\"params\"])\n main_params_for_group = self.state_averager.main_parameters[next_index : next_index + num_params]\n param_group[\"params\"] = main_params_for_group\n next_index += num_params\n assert next_index == len(self.state_averager.main_parameters)\n return param_groups\n\n def add_param_group(self, param_group: dict) -> None:\n raise ValueError(\n f\"{self.__class__.__name__} does not support calling add_param_group after creation. \"\n f\"Please provide all parameter groups at init\"\n )\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(prefix={self.run_id}, epoch={self.local_epoch})\"\n\n def _tag_along_with_zero_weight(self, control: StepControl):\n \"\"\"Wait for a running averaging round to finish with zero weight.\"\"\"\n if not control.triggered:\n control.weight = 0\n control.allow_allreduce()\n if not control.done():\n try:\n control.result(self.averaging_timeout)\n except BaseException as e:\n logger.exception(e)\n if not control.done():\n control.cancel()\n\n def shutdown(self):\n logger.log(self.status_loglevel, \"Sending goodbye to peers...\")\n self.tracker.shutdown(self.shutdown_timeout)\n self.state_averager.step(wait_for_delayed_updates=True)\n for scheduled_round in self.scheduled_grads, self.scheduled_state:\n if scheduled_round is not None:\n if scheduled_round.stage == AveragingStage.LOOKING_FOR_GROUP:\n scheduled_round.cancel()\n else:\n self._tag_along_with_zero_weight(scheduled_round)\n\n logger.log(self.status_loglevel, \"Shutting down averagers...\")\n self.state_averager.shutdown()\n if self.use_gradient_averaging:\n self.grad_averager.shutdown()\n logger.log(self.status_loglevel, f\"{self.__class__.__name__} is shut down\")\n\n def __del__(self):\n if self._parent_pid == os.getpid() and self.is_alive():\n self.shutdown()\n"
] | [
[
"numpy.linspace",
"torch.zeros",
"numpy.asarray",
"numpy.quantile",
"numpy.frombuffer",
"torch.quantize_per_tensor",
"torch.finfo",
"torch.bucketize",
"torch.as_tensor"
],
[
"torch.no_grad",
"torch.enable_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PangYunsheng8/CGIPool | [
"2cf22019bad510804021f768c6a0d76bf79b62f6"
] | [
"train.py"
] | [
"import os\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport csv\r\nimport glob\r\nimport argparse\r\n\r\nfrom datasets.dataloader import build_loader\r\n\r\n\r\nparser = argparse.ArgumentParser(description=\"Graph Pooling\")\r\nparser.add_argument('--model', type=str, default=\"SAGNet\", help='model name')\r\nparser.add_argument('--seed', type=int, default=777, help='seed')\r\nparser.add_argument('--batch_size', type=int, default=128, help='batch size')\r\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate')\r\nparser.add_argument('--weight_decay', type=float, default=0.001, help='weight decay')\r\nparser.add_argument('--dataset', type=str, default='IMDB-MULTI', help='DD/NCI1/NCI109/Mutagenicity/PROTEINS')\r\nparser.add_argument('--epochs', type=int, default=1000, help='maximum number of epochs')\r\nparser.add_argument('--patience', type=int, default=100, help='path to save result')\r\n\r\nargs = parser.parse_args()\r\n\r\n\r\ndef train(args, model, train_loader, val_loader):\r\n model.train()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\r\n\r\n min_loss = 1e10\r\n max_acc = 0\r\n patience_cnt = 0\r\n best_epoch = 0\r\n val_loss_values = []\r\n for epoch in range(args.epochs):\r\n loss_train = 0.0\r\n loss_dis = 0.0\r\n correct = 0\r\n for i, data in enumerate(train_loader):\r\n optimizer.zero_grad()\r\n data = data.to(args.device)\r\n output = model(data)\r\n cls_loss = F.nll_loss(output, data.y)\r\n dis_loss = model.compute_disentangle_loss()\r\n loss = cls_loss + 0.001 * dis_loss \r\n loss.backward()\r\n optimizer.step()\r\n loss_train += cls_loss.item()\r\n loss_dis += dis_loss.item() \r\n pred = output.max(dim=1)[1]\r\n correct += pred.eq(data.y).sum().item()\r\n acc_train = correct / len(train_loader.dataset)\r\n val_acc, val_loss = test(args, model, val_loader)\r\n if val_acc > max_acc:\r\n max_acc = val_acc\r\n print('Epoch: {:04d}'.format(epoch + 1), 'loss_train: {:.6f}'.format(loss_train), 'loss_dis: {:.6f}'.format(loss_dis),\r\n 'acc_train: {:.6f}'.format(acc_train), 'loss_val: {:.6f}'.format(val_loss),\r\n 'acc_val: {:.6f}'.format(val_acc), 'max_acc: {:.6f}'.format(max_acc))\r\n\r\n val_loss_values.append(val_loss)\r\n if val_loss_values[-1] < min_loss:\r\n min_loss = val_loss_values[-1]\r\n best_epoch = epoch\r\n patience_cnt = 0\r\n torch.save(model.state_dict(), 'save/' + args.dataset + '/' + str(args.seed) + '.pth')\r\n else:\r\n patience_cnt += 1\r\n\r\n if patience_cnt == args.patience:\r\n break\r\n\r\n print('Optimization Finished!')\r\n\r\n\r\ndef test(args, model, loader):\r\n model.eval()\r\n correct = 0.\r\n loss = 0.\r\n for data in loader:\r\n data = data.to(args.device)\r\n output = model(data)\r\n pred = output.max(dim=1)[1]\r\n correct += pred.eq(data.y).sum().item()\r\n loss += F.nll_loss(output, data.y, reduction='sum').item()\r\n return correct / len(loader.dataset),loss / len(loader.dataset)\r\n\r\n\r\ndef main():\r\n torch.manual_seed(args.seed)\r\n\r\n if torch.cuda.is_available():\r\n torch.cuda.manual_seed(args.seed)\r\n args.device = \"cuda\"\r\n print('cuda')\r\n else:\r\n args.device = \"cpu\"\r\n\r\n train_loader, val_loader, test_loader = build_loader(args)\r\n \r\n if args.model == \"SAGNet\":\r\n from models.SAGNet import SAGNet, Config\r\n config = Config()\r\n model = SAGNet(config, args)\r\n elif args.model == \"GSANet\":\r\n from models.GSANet import GSANet, Config\r\n config = Config()\r\n model = GSANet(config, args)\r\n elif args.model == \"HGPSLNet\":\r\n from models.HGPSLNet import HGPSLNet, Config\r\n config = Config()\r\n model = HGPSLNet(config, args)\r\n elif args.model == \"ASAPNet\":\r\n from models.ASAPNet import ASAPNet, Config\r\n config = Config()\r\n model = ASAPNet(config, args)\r\n model.to(args.device)\r\n\r\n train(args, model, train_loader, val_loader)\r\n\r\n model.load_state_dict(torch.load('save/' + args.dataset + '/' + str(args.seed) + '.pth'))\r\n test_acc, test_loss = test(args, model, test_loader)\r\n print('Test set results, loss = {:.6f}, accuracy = {:.6f}'.format(test_loss, test_acc))\r\n \r\n with open('result.txt', 'a') as f:\r\n f.write(str(test_acc) + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"torch.manual_seed",
"torch.nn.functional.nll_loss",
"torch.cuda.manual_seed",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rwilliams251/taichi | [
"442710331be55baf5af17f9667db650c19cbb0b2",
"442710331be55baf5af17f9667db650c19cbb0b2"
] | [
"python/taichi/examples/simulation/mpm3d.py",
"tests/python/test_type_check.py"
] | [
"export_file = '' # use '/tmp/mpm3d.ply' for exporting result to disk\n\nimport numpy as np\n\nimport taichi as ti\n\nti.init(arch=ti.gpu)\n\n#dim, n_grid, steps, dt = 2, 128, 20, 2e-4\n#dim, n_grid, steps, dt = 2, 256, 32, 1e-4\ndim, n_grid, steps, dt = 3, 32, 25, 4e-4\n#dim, n_grid, steps, dt = 3, 64, 25, 2e-4\n#dim, n_grid, steps, dt = 3, 128, 25, 8e-5\n\nn_particles = n_grid**dim // 2**(dim - 1)\ndx = 1 / n_grid\n\np_rho = 1\np_vol = (dx * 0.5)**2\np_mass = p_vol * p_rho\ngravity = 9.8\nbound = 3\nE = 400\n\nx = ti.Vector.field(dim, float, n_particles)\nv = ti.Vector.field(dim, float, n_particles)\nC = ti.Matrix.field(dim, dim, float, n_particles)\nJ = ti.field(float, n_particles)\n\ngrid_v = ti.Vector.field(dim, float, (n_grid, ) * dim)\ngrid_m = ti.field(float, (n_grid, ) * dim)\n\nneighbour = (3, ) * dim\n\n\[email protected]\ndef substep():\n for I in ti.grouped(grid_m):\n grid_v[I] = ti.zero(grid_v[I])\n grid_m[I] = 0\n ti.block_dim(n_grid)\n for p in x:\n Xp = x[p] / dx\n base = int(Xp - 0.5)\n fx = Xp - base\n w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]\n stress = -dt * 4 * E * p_vol * (J[p] - 1) / dx**2\n affine = ti.Matrix.identity(float, dim) * stress + p_mass * C[p]\n for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))):\n dpos = (offset - fx) * dx\n weight = 1.0\n for i in ti.static(range(dim)):\n weight *= w[offset[i]][i]\n grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos)\n grid_m[base + offset] += weight * p_mass\n for I in ti.grouped(grid_m):\n if grid_m[I] > 0:\n grid_v[I] /= grid_m[I]\n grid_v[I][1] -= dt * gravity\n cond = (I < bound) & (grid_v[I] < 0) | \\\n (I > n_grid - bound) & (grid_v[I] > 0)\n grid_v[I] = 0 if cond else grid_v[I]\n ti.block_dim(n_grid)\n for p in x:\n Xp = x[p] / dx\n base = int(Xp - 0.5)\n fx = Xp - base\n w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]\n new_v = ti.zero(v[p])\n new_C = ti.zero(C[p])\n for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))):\n dpos = (offset - fx) * dx\n weight = 1.0\n for i in ti.static(range(dim)):\n weight *= w[offset[i]][i]\n g_v = grid_v[base + offset]\n new_v += weight * g_v\n new_C += 4 * weight * g_v.outer_product(dpos) / dx**2\n v[p] = new_v\n x[p] += dt * v[p]\n J[p] *= 1 + dt * new_C.trace()\n C[p] = new_C\n\n\[email protected]\ndef init():\n for i in range(n_particles):\n x[i] = ti.Vector([ti.random() for i in range(dim)]) * 0.4 + 0.15\n J[i] = 1\n\n\ndef T(a):\n if dim == 2:\n return a\n\n phi, theta = np.radians(28), np.radians(32)\n\n a = a - 0.5\n x, y, z = a[:, 0], a[:, 1], a[:, 2]\n c, s = np.cos(phi), np.sin(phi)\n C, S = np.cos(theta), np.sin(theta)\n x, z = x * c + z * s, z * c - x * s\n u, v = x, y * C + z * S\n return np.array([u, v]).swapaxes(0, 1) + 0.5\n\n\ninit()\ngui = ti.GUI('MPM3D', background_color=0x112F41)\nwhile gui.running and not gui.get_event(gui.ESCAPE):\n for s in range(steps):\n substep()\n pos = x.to_numpy()\n if export_file:\n writer = ti.PLYWriter(num_vertices=n_particles)\n writer.add_vertex_pos(pos[:, 0], pos[:, 1], pos[:, 2])\n writer.export_frame(gui.frame, export_file)\n gui.circles(T(pos), radius=1.5, color=0x66ccff)\n gui.show()\n",
"import numpy as np\nimport pytest\nfrom taichi.lang.util import has_pytorch\n\nimport taichi as ti\nfrom tests import test_utils\n\n\n@test_utils.test(arch=ti.cpu)\ndef test_unary_op():\n @ti.kernel\n def floor():\n a = 1\n b = ti.floor(a)\n\n with pytest.raises(ti.TaichiTypeError,\n match=\"'floor' takes real inputs only\"):\n floor()\n\n\n@test_utils.test(arch=ti.cpu)\ndef test_binary_op():\n @ti.kernel\n def bitwise_float():\n a = 1\n b = 3.1\n c = a & b\n\n with pytest.raises(ti.TaichiTypeError,\n match=r\"unsupported operand type\\(s\\) for '&'\"):\n bitwise_float()\n\n\n@test_utils.test(arch=ti.cpu)\ndef test_ternary_op():\n @ti.kernel\n def select():\n a = 1.1\n b = 3\n c = 3.6\n d = b if a else c\n\n with pytest.raises(TypeError,\n match=\"`if` conditions must be of type int32\"):\n select()\n\n\[email protected](not has_pytorch(), reason='Pytorch not installed.')\n@test_utils.test(arch=[ti.cpu, ti.opengl])\ndef test_subscript():\n a = ti.ndarray(ti.i32, shape=(10, 10))\n\n @ti.kernel\n def any_array(x: ti.any_arr()):\n b = x[3, 1.1]\n\n with pytest.raises(ti.TaichiTypeError, match=\"indices must be integers\"):\n any_array(a)\n\n\n@test_utils.test()\ndef test_0d_ndarray():\n @ti.kernel\n def foo() -> ti.i32:\n a = np.array(3, dtype=np.int32)\n return a\n\n assert foo() == 3\n\n\n@test_utils.test()\ndef test_non_0d_ndarray():\n @ti.kernel\n def foo():\n a = np.array([1])\n\n with pytest.raises(\n ti.TaichiTypeError,\n match=\n \"Only 0-dimensional numpy array can be used to initialize a scalar expression\"\n ):\n foo()\n"
] | [
[
"numpy.array",
"numpy.radians",
"numpy.cos",
"numpy.sin"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deno750/VRP_Optimization | [
"653c950b59acb3a1cd96d1e96bb334c90655eaa2"
] | [
"other_codes/perfProf.py"
] | [
"#!/usr/bin/env python2\n\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib\n#matplotlib.use('PDF')\nimport matplotlib.pyplot as plt\nimport sys\n\nfrom optparse import OptionParser\n\n# parameters\ndefLW = 1.2 # default line width\ndefMS = 7 # default marker size\ndashes = ['-', # solid line\n\t'--', # dashed line\n\t'-.', # dash-dot line\n\t':', # dotted line\n\t'-',\n\t'--']\n\nmarkers = ['+', 'x', 's', '^', 'o', 'd']\ncolors = ['r', 'b', 'y', 'g', 'm', 'c']\n\n\nclass CmdLineParser(object):\n\tdef __init__(self):\n\t\tself.parser = OptionParser(usage='usage: python2 perfprof.py [options] cvsfile.csv outputfile.pdf')\n\t\t# default options\n\t\tself.parser.add_option(\"-D\", \"--delimiter\", dest=\"delimiter\", default=None, help=\"delimiter for input files\")\n\t\tself.parser.add_option(\"-M\", \"--maxratio\", dest=\"maxratio\", default=4, type=float, help=\"maxratio for perf. profile\")\n\t\tself.parser.add_option(\"-S\", \"--shift\", dest=\"shift\", default=0, type=float, help=\"shift for data\")\n\t\tself.parser.add_option(\"-L\", \"--logplot\", dest=\"logplot\", action=\"store_true\", default=False, help=\"log scale for x\")\n\t\tself.parser.add_option(\"-T\", \"--timelimit\", dest=\"timelimit\", default=1e99, type=float, help=\"time limit for runs\")\n\t\tself.parser.add_option(\"-P\", \"--plot-title\", dest=\"plottitle\", default=None, help=\"plot title\")\n\t\tself.parser.add_option(\"-X\", \"--x-label\", dest=\"xlabel\", default='Time Ratio', help=\"x axis label\")\n\t\tself.parser.add_option(\"-B\", \"--bw\", dest=\"bw\", action=\"store_true\", default=False, help=\"plot B/W\")\n\n\tdef addOption(self, *args, **kwargs):\n\t\tself.parser.add_option(*args, **kwargs)\n\n\tdef parseArgs(self):\n\t\t(options, args) = self.parser.parse_args()\n\t\toptions.input = args[0]\n\t\toptions.output = args[1]\n\t\treturn options\n\n\ndef readTable(fp, delimiter):\n\t\"\"\"\n\tread a CSV file with performance profile specification\n\tthe format is as follows:\n\tncols algo1 algo2 ...\n\tnome_istanza tempo(algo1) tempo(algo2) ...\n\t...\n\t\"\"\"\n\tfirstline = fp.readline().strip().split(delimiter)\n\tncols = int(firstline[0])\n\tassert(ncols <= len(markers))\n\tcnames = firstline[1:]\n\trnames = []\n\trows = []\n\tfor row in fp:\n\t\trow = row.strip().split(delimiter)\n\t\trnames.append(row[0])\n\t\trdata = np.empty(ncols)\n\t\tfor j in range(ncols):\n\t\t\trdata[j] = float(row[j + 1])\n\t\trows.append(rdata)\n\tdata = np.array(rows)\n\treturn (rnames, cnames, data)\n\n\ndef main():\n\tparser = CmdLineParser()\n\topt = parser.parseArgs()\n\tprint(opt)\n\t# read data\n\trnames, cnames, data = readTable(open(opt.input, 'r'), opt.delimiter)\n\tnrows, ncols = data.shape\n\t# add shift\n\tdata = data + opt.shift\n\t# compute ratios\n\tminima = data.min(axis=1)\n\tratio = data\n\tfor j in range(ncols):\n\t\tratio[:, j] = data[:, j] / minima\n\t# compute maxratio\n\tif opt.maxratio == -1:\n\t\topt.maxratio = ratio.max()\n\t# any time >= timelimit will count as maxratio + bigM (so that it does not show up in plots)\n\tfor i in range(nrows):\n\t\tfor j in range(ncols):\n\t\t\tif data[i,j] >= opt.timelimit:\n\t\t\t\tratio[i,j] = opt.maxratio + 1e6\n\t# sort ratios\n\tratio.sort(axis=0)\n\t# plot first\n\ty = np.arange(nrows, dtype=np.float64) / nrows\n\tfor j in range(ncols):\n\t\toptions = dict(label=cnames[j],\n\t\t\t\tlinewidth=defLW, linestyle='steps-post' + dashes[j],\n\t\t\t\tmarker=markers[j], markeredgewidth=defLW, markersize=defMS)\n\t\t# plt.step(ratio[:,j], y, label=cnames[j], linewidth=defLW, marker=markers[j], markersize=defMS)\n\t\tif opt.bw:\n\t\t\toptions['markerfacecolor'] = 'w'\n\t\t\toptions['markeredgecolor'] = 'k'\n\t\t\toptions['color'] = 'k'\n\t\telse:\n\t\t\toptions['color'] = colors[j]\n\t\tif opt.logplot:\n\t\t\tplt.semilogx(ratio[:, j], y, **options)\n\t\telse:\n\t\t\tplt.plot(ratio[:, j], y, **options)\n\tplt.axis([1, opt.maxratio, 0, 1])\n\t#plt.xlim([1,1.4])\t#Comment when not using Cost\n\tplt.legend(loc='lower right')\n\tif opt.plottitle is not None:\n\t\tplt.title(opt.plottitle)\n\tplt.xlabel(opt.xlabel)\n\tplt.savefig(opt.output,dpi=600)\n\nif __name__ == '__main__':\n\tmain()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.semilogx",
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
martonlanga/deepchem | [
"1c007fbae899a49fe3c40f41e7273ed21867fef9",
"1c007fbae899a49fe3c40f41e7273ed21867fef9",
"1c007fbae899a49fe3c40f41e7273ed21867fef9"
] | [
"deepchem/feat/base_classes.py",
"deepchem/models/robust_multitask.py",
"deepchem/feat/one_hot.py"
] | [
"\"\"\"\nFeature calculations.\n\"\"\"\nimport logging\nimport types\nimport numpy as np\nimport multiprocessing\n\n__author__ = \"Steven Kearnes\"\n__copyright__ = \"Copyright 2014, Stanford University\"\n__license__ = \"BSD 3-clause\"\n\n\ndef _featurize_complex(featurizer, mol_pdb_file, protein_pdb_file, log_message):\n logging.info(log_message)\n return featurizer._featurize_complex(mol_pdb_file, protein_pdb_file)\n\n\nclass ComplexFeaturizer(object):\n \"\"\"\"\n Abstract class for calculating features for mol/protein complexes.\n \"\"\"\n\n def featurize_complexes(self, mol_files, protein_pdbs):\n \"\"\"\n Calculate features for mol/protein complexes.\n\n Parameters\n ----------\n mols: list\n List of PDB filenames for molecules.\n protein_pdbs: list\n List of PDB filenames for proteins.\n\n Returns\n -------\n features: np.array\n Array of features\n failures: list\n Indices of complexes that failed to featurize.\n \"\"\"\n pool = multiprocessing.Pool()\n results = []\n for i, (mol_file, protein_pdb) in enumerate(zip(mol_files, protein_pdbs)):\n log_message = \"Featurizing %d / %d\" % (i, len(mol_files))\n results.append(\n pool.apply_async(_featurize_complex,\n (self, mol_file, protein_pdb, log_message)))\n pool.close()\n features = []\n failures = []\n for ind, result in enumerate(results):\n new_features = result.get()\n # Handle loading failures which return None\n if new_features is not None:\n features.append(new_features)\n else:\n failures.append(ind)\n features = np.asarray(features)\n return features, failures\n\n def _featurize_complex(self, mol_pdb, complex_pdb):\n \"\"\"\n Calculate features for single mol/protein complex.\n\n Parameters\n ----------\n mol_pdb: list\n Should be a list of lines of the PDB file.\n complex_pdb: list\n Should be a list of lines of the PDB file.\n \"\"\"\n raise NotImplementedError('Featurizer is not defined.')\n\n\nclass Featurizer(object):\n \"\"\"\n Abstract class for calculating a set of features for a molecule.\n\n Child classes implement the _featurize method for calculating features\n for a single molecule.\n \"\"\"\n\n def featurize(self, mols, verbose=True, log_every_n=1000):\n \"\"\"\n Calculate features for molecules.\n\n Parameters\n ----------\n mols : iterable\n RDKit Mol objects.\n \"\"\"\n mols = list(mols)\n features = []\n for i, mol in enumerate(mols):\n if mol is not None:\n features.append(self._featurize(mol))\n else:\n features.append(np.array([]))\n\n features = np.asarray(features)\n return features\n\n def _featurize(self, mol):\n \"\"\"\n Calculate features for a single molecule.\n\n Parameters\n ----------\n mol : RDKit Mol\n Molecule.\n \"\"\"\n raise NotImplementedError('Featurizer is not defined.')\n\n def __call__(self, mols):\n \"\"\"\n Calculate features for molecules.\n\n Parameters\n ----------\n mols : iterable\n RDKit Mol objects.\n \"\"\"\n return self.featurize(mols)\n\n\nclass UserDefinedFeaturizer(Featurizer):\n \"\"\"Directs usage of user-computed featurizations.\"\"\"\n\n def __init__(self, feature_fields):\n \"\"\"Creates user-defined-featurizer.\"\"\"\n self.feature_fields = feature_fields\n",
"import numpy as np\nimport tensorflow as tf\nimport collections\n\nfrom deepchem.metrics import to_one_hot\nfrom deepchem.models import KerasModel\nfrom deepchem.models.layers import Stack\nfrom deepchem.models.losses import SoftmaxCrossEntropy, L2Loss\n\n\nclass RobustMultitaskClassifier(KerasModel):\n \"\"\"Implements a neural network for robust multitasking.\n\n Key idea is to have bypass layers that feed directly from features to task\n output. Hopefully will allow tasks to route around bad multitasking.\n\n \"\"\"\n\n def __init__(self,\n n_tasks,\n n_features,\n layer_sizes=[1000],\n weight_init_stddevs=0.02,\n bias_init_consts=1.0,\n weight_decay_penalty=0.0,\n weight_decay_penalty_type=\"l2\",\n dropouts=0.5,\n activation_fns=tf.nn.relu,\n n_classes=2,\n bypass_layer_sizes=[100],\n bypass_weight_init_stddevs=[.02],\n bypass_bias_init_consts=[1.],\n bypass_dropouts=[.5],\n **kwargs):\n \"\"\" Create a RobustMultitaskClassifier.\n\n Parameters\n ----------\n n_tasks: int\n number of tasks\n n_features: int\n number of features\n layer_sizes: list\n the size of each dense layer in the network. The length of this list determines the number of layers.\n weight_init_stddevs: list or float\n the standard deviation of the distribution to use for weight initialization of each layer. The length\n of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list,\n in which case the same value is used for every layer.\n bias_init_consts: list or loat\n the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes).\n Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.\n weight_decay_penalty: float\n the magnitude of the weight decay penalty to use\n weight_decay_penalty_type: str\n the type of penalty to use for weight decay, either 'l1' or 'l2'\n dropouts: list or float\n the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).\n Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.\n activation_fns: list or object\n the Tensorflow activation function to apply to each layer. The length of this list should equal\n len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the\n same value is used for every layer.\n n_classes: int\n the number of classes\n bypass_layer_sizes: list\n the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers.\n bypass_weight_init_stddevs: list or float\n the standard deviation of the distribution to use for weight initialization of bypass layers.\n same requirements as weight_init_stddevs\n bypass_bias_init_consts: list or float\n the value to initialize the biases in bypass layers\n same requirements as bias_init_consts\n bypass_dropouts: list or float\n the dropout probablity to use for bypass layers.\n same requirements as dropouts\n \"\"\"\n self.n_tasks = n_tasks\n self.n_features = n_features\n self.n_classes = n_classes\n n_layers = len(layer_sizes)\n if not isinstance(weight_init_stddevs, collections.Sequence):\n weight_init_stddevs = [weight_init_stddevs] * n_layers\n if not isinstance(bias_init_consts, collections.Sequence):\n bias_init_consts = [bias_init_consts] * n_layers\n if not isinstance(dropouts, collections.Sequence):\n dropouts = [dropouts] * n_layers\n if not isinstance(activation_fns, collections.Sequence):\n activation_fns = [activation_fns] * n_layers\n if weight_decay_penalty != 0.0:\n if weight_decay_penalty_type == 'l1':\n regularizer = tf.keras.regularizers.l1(weight_decay_penalty)\n else:\n regularizer = tf.keras.regularizers.l2(weight_decay_penalty)\n else:\n regularizer = None\n\n n_bypass_layers = len(bypass_layer_sizes)\n if not isinstance(bypass_weight_init_stddevs, collections.Sequence):\n bypass_weight_init_stddevs = [bypass_weight_init_stddevs\n ] * n_bypass_layers\n if not isinstance(bypass_bias_init_consts, collections.Sequence):\n bypass_bias_init_consts = [bypass_bias_init_consts] * n_bypass_layers\n if not isinstance(bypass_dropouts, collections.Sequence):\n bypass_dropouts = [bypass_dropouts] * n_bypass_layers\n bypass_activation_fns = [activation_fns[0]] * n_bypass_layers\n\n # Add the input features.\n mol_features = tf.keras.Input(shape=(n_features,))\n prev_layer = mol_features\n\n # Add the shared dense layers\n for size, weight_stddev, bias_const, dropout, activation_fn in zip(\n layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,\n activation_fns):\n layer = tf.keras.layers.Dense(\n size,\n activation=activation_fn,\n kernel_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=weight_stddev),\n bias_initializer=tf.constant_initializer(value=bias_const),\n kernel_regularizer=regularizer)(prev_layer)\n if dropout > 0.0:\n layer = tf.keras.layers.Dropout(rate=dropout)(layer)\n prev_layer = layer\n top_multitask_layer = prev_layer\n\n task_outputs = []\n for i in range(self.n_tasks):\n prev_layer = mol_features\n # Add task-specific bypass layers\n for size, weight_stddev, bias_const, dropout, activation_fn in zip(\n bypass_layer_sizes, bypass_weight_init_stddevs,\n bypass_bias_init_consts, bypass_dropouts, bypass_activation_fns):\n layer = tf.keras.layers.Dense(\n size,\n activation=activation_fn,\n kernel_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=weight_stddev),\n bias_initializer=tf.constant_initializer(value=bias_const),\n kernel_regularizer=regularizer)(prev_layer)\n if dropout > 0.0:\n layer = tf.keras.layers.Dropout(rate=dropout)(layer)\n prev_layer = layer\n top_bypass_layer = prev_layer\n\n if n_bypass_layers > 0:\n task_layer = tf.keras.layers.Concatenate(axis=1)(\n [top_multitask_layer, top_bypass_layer])\n else:\n task_layer = top_multitask_layer\n\n task_out = tf.keras.layers.Dense(n_classes)(task_layer)\n task_outputs.append(task_out)\n\n logits = Stack(axis=1)(task_outputs)\n output = tf.keras.layers.Softmax()(logits)\n model = tf.keras.Model(inputs=mol_features, outputs=[output, logits])\n super(RobustMultitaskClassifier, self).__init__(\n model,\n SoftmaxCrossEntropy(),\n output_types=['prediction', 'loss'],\n **kwargs)\n\n def default_generator(self,\n dataset,\n epochs=1,\n mode='fit',\n deterministic=True,\n pad_batches=True):\n for epoch in range(epochs):\n for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(\n batch_size=self.batch_size,\n deterministic=deterministic,\n pad_batches=pad_batches):\n if y_b is not None:\n y_b = to_one_hot(y_b.flatten(), self.n_classes).reshape(\n -1, self.n_tasks, self.n_classes)\n yield ([X_b], [y_b], [w_b])\n\n def create_estimator_inputs(self, feature_columns, weight_column, features,\n labels, mode):\n tensors = {}\n for layer, column in zip(self.features, feature_columns):\n tensors[layer] = tf.feature_column.input_layer(features, [column])\n if weight_column is not None:\n tensors[self.task_weights[0]] = tf.feature_column.input_layer(\n features, [weight_column])\n if labels is not None:\n tensors[self.labels[0]] = tf.one_hot(\n tf.cast(labels, tf.int32), self.n_classes)\n return tensors\n\n\nclass RobustMultitaskRegressor(KerasModel):\n \"\"\"Implements a neural network for robust multitasking.\n\n Key idea is to have bypass layers that feed directly from features to task\n output. Hopefully will allow tasks to route around bad multitasking.\n\n \"\"\"\n\n def __init__(self,\n n_tasks,\n n_features,\n layer_sizes=[1000],\n weight_init_stddevs=0.02,\n bias_init_consts=1.0,\n weight_decay_penalty=0.0,\n weight_decay_penalty_type=\"l2\",\n dropouts=0.5,\n activation_fns=tf.nn.relu,\n bypass_layer_sizes=[100],\n bypass_weight_init_stddevs=[.02],\n bypass_bias_init_consts=[1.],\n bypass_dropouts=[.5],\n **kwargs):\n \"\"\" Create a RobustMultitaskRegressor.\n\n Parameters\n ----------\n n_tasks: int\n number of tasks\n n_features: int\n number of features\n layer_sizes: list\n the size of each dense layer in the network. The length of this list determines the number of layers.\n weight_init_stddevs: list or float\n the standard deviation of the distribution to use for weight initialization of each layer. The length\n of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list,\n in which case the same value is used for every layer.\n bias_init_consts: list or loat\n the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes).\n Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.\n weight_decay_penalty: float\n the magnitude of the weight decay penalty to use\n weight_decay_penalty_type: str\n the type of penalty to use for weight decay, either 'l1' or 'l2'\n dropouts: list or float\n the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).\n Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.\n activation_fns: list or object\n the Tensorflow activation function to apply to each layer. The length of this list should equal\n len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the\n same value is used for every layer.\n bypass_layer_sizes: list\n the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers.\n bypass_weight_init_stddevs: list or float\n the standard deviation of the distribution to use for weight initialization of bypass layers.\n same requirements as weight_init_stddevs\n bypass_bias_init_consts: list or float\n the value to initialize the biases in bypass layers\n same requirements as bias_init_consts\n bypass_dropouts: list or float\n the dropout probablity to use for bypass layers.\n same requirements as dropouts\n \"\"\"\n self.n_tasks = n_tasks\n self.n_features = n_features\n n_layers = len(layer_sizes)\n if not isinstance(weight_init_stddevs, collections.Sequence):\n weight_init_stddevs = [weight_init_stddevs] * n_layers\n if not isinstance(bias_init_consts, collections.Sequence):\n bias_init_consts = [bias_init_consts] * n_layers\n if not isinstance(dropouts, collections.Sequence):\n dropouts = [dropouts] * n_layers\n if not isinstance(activation_fns, collections.Sequence):\n activation_fns = [activation_fns] * n_layers\n if weight_decay_penalty != 0.0:\n if weight_decay_penalty_type == 'l1':\n regularizer = tf.keras.regularizers.l1(weight_decay_penalty)\n else:\n regularizer = tf.keras.regularizers.l2(weight_decay_penalty)\n else:\n regularizer = None\n\n n_bypass_layers = len(bypass_layer_sizes)\n if not isinstance(bypass_weight_init_stddevs, collections.Sequence):\n bypass_weight_init_stddevs = [bypass_weight_init_stddevs\n ] * n_bypass_layers\n if not isinstance(bypass_bias_init_consts, collections.Sequence):\n bypass_bias_init_consts = [bypass_bias_init_consts] * n_bypass_layers\n if not isinstance(bypass_dropouts, collections.Sequence):\n bypass_dropouts = [bypass_dropouts] * n_bypass_layers\n bypass_activation_fns = [activation_fns[0]] * n_bypass_layers\n\n # Add the input features.\n mol_features = tf.keras.Input(shape=(n_features,))\n prev_layer = mol_features\n\n # Add the shared dense layers\n for size, weight_stddev, bias_const, dropout, activation_fn in zip(\n layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,\n activation_fns):\n layer = tf.keras.layers.Dense(\n size,\n activation=activation_fn,\n kernel_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=weight_stddev),\n bias_initializer=tf.constant_initializer(value=bias_const),\n kernel_regularizer=regularizer)(prev_layer)\n if dropout > 0.0:\n layer = tf.keras.layers.Dropout(rate=dropout)(layer)\n prev_layer = layer\n top_multitask_layer = prev_layer\n\n task_outputs = []\n for i in range(self.n_tasks):\n prev_layer = mol_features\n # Add task-specific bypass layers\n for size, weight_stddev, bias_const, dropout, activation_fn in zip(\n bypass_layer_sizes, bypass_weight_init_stddevs,\n bypass_bias_init_consts, bypass_dropouts, bypass_activation_fns):\n layer = tf.keras.layers.Dense(\n size,\n activation=activation_fn,\n kernel_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=weight_stddev),\n bias_initializer=tf.constant_initializer(value=bias_const),\n kernel_regularizer=regularizer)(prev_layer)\n if dropout > 0.0:\n layer = tf.keras.layers.Dropout(rate=dropout)(layer)\n prev_layer = layer\n top_bypass_layer = prev_layer\n\n if n_bypass_layers > 0:\n task_layer = tf.keras.layers.Concatenate(axis=1)(\n [top_multitask_layer, top_bypass_layer])\n else:\n task_layer = top_multitask_layer\n\n task_out = tf.keras.layers.Dense(1)(task_layer)\n task_outputs.append(task_out)\n\n outputs = tf.keras.layers.Concatenate(axis=1)(task_outputs)\n model = tf.keras.Model(inputs=mol_features, outputs=outputs)\n super(RobustMultitaskRegressor, self).__init__(model, L2Loss(), **kwargs)\n",
"import numpy as np\nfrom deepchem.feat import Featurizer\n\nzinc_charset = [\n ' ', '#', ')', '(', '+', '-', '/', '1', '3', '2', '5', '4', '7', '6', '8',\n '=', '@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'S', '[', ']', '\\\\', 'c', 'l',\n 'o', 'n', 'p', 's', 'r'\n]\n\n\nclass OneHotFeaturizer(Featurizer):\n \"\"\"\n NOTE(LESWING) Not Thread Safe in initialization of charset\n \"\"\"\n\n def __init__(self, charset=None, padlength=120):\n \"\"\"\n Parameters\n ----------\n charset: obj:`list` of obj:`str`\n Each string is length 1\n padlength: int\n length to pad the smile strings to\n \"\"\"\n self.charset = charset\n self.pad_length = padlength\n\n def featurize(self, mols, verbose=True, log_every_n=1000):\n \"\"\"\n Parameters\n ----------\n mols: obj\n List of rdkit Molecule Objects\n verbose: bool\n How much logging\n log_every_n:\n How often to log\n Returns\n\n -------\n obj\n numpy array of features\n \"\"\"\n from rdkit import Chem\n smiles = [Chem.MolToSmiles(mol) for mol in mols]\n if self.charset is None:\n self.charset = self._create_charset(smiles)\n return np.array([self.one_hot_encoded(smile) for smile in smiles])\n\n def one_hot_array(self, i):\n \"\"\"\n Create a one hot array with bit i set to 1\n Parameters\n ----------\n i: int\n bit to set to 1\n Returns\n -------\n obj:`list` of obj:`int`\n length len(self.charset)\n \"\"\"\n return [int(x) for x in [ix == i for ix in range(len(self.charset))]]\n\n def one_hot_index(self, c):\n \"\"\"\n TODO(LESWING) replace with map lookup vs linear scan\n Parameters\n ----------\n c\n character whose index we want\n Returns\n -------\n int\n index of c in self.charset\n \"\"\"\n return self.charset.index(c)\n\n def pad_smile(self, smile):\n \"\"\"\n Pad A Smile String to self.pad_length\n Parameters\n ----------\n smile: str\n\n Returns\n -------\n str\n smile string space padded to self.pad_length\n \"\"\"\n\n return smile.ljust(self.pad_length)\n\n def one_hot_encoded(self, smile):\n \"\"\"\n One Hot Encode an entire SMILE string\n Parameters\n ----------\n smile: str\n smile string to encode\n\n Returns\n -------\n object\n np.array of one hot encoded arrays for each character in smile\n \"\"\"\n return np.array([\n self.one_hot_array(self.one_hot_index(x)) for x in self.pad_smile(smile)\n ])\n\n def untransform(self, z):\n \"\"\"\n Convert from one hot representation back to SMILE\n Parameters\n ----------\n z: obj:`list`\n list of one hot encoded features\n\n Returns\n -------\n Smile Strings picking MAX for each one hot encoded array\n \"\"\"\n z1 = []\n for i in range(len(z)):\n s = \"\"\n for j in range(len(z[i])):\n oh = np.argmax(z[i][j])\n s += self.charset[oh]\n z1.append([s.strip()])\n return z1\n\n def _create_charset(self, smiles):\n \"\"\"\n create the charset from smiles\n Parameters\n ----------\n smiles: obj:`list` of obj:`str`\n list of smile strings\n\n Returns\n -------\n obj:`list` of obj:`str`\n List of length one strings that are characters in smiles. No duplicates\n \"\"\"\n s = set()\n for smile in smiles:\n for c in smile:\n s.add(c)\n return [' '] + sorted(list(s))\n"
] | [
[
"numpy.asarray",
"numpy.array"
],
[
"tensorflow.feature_column.input_layer",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.Input",
"tensorflow.keras.regularizers.l1",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.regularizers.l2",
"tensorflow.cast",
"tensorflow.keras.Model",
"tensorflow.constant_initializer",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Softmax"
],
[
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
avelez93/tfx | [
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195",
"75fbb6a7d50e99138609be3ca4c3a204a13a2195"
] | [
"tfx/benchmarks/tft_benchmark_chicago_taxi.py",
"tfx/orchestration/experimental/core/async_pipeline_task_gen_test.py",
"tfx/extensions/google_cloud_ai_platform/prediction_clients_test.py",
"tfx/dsl/compiler/placeholder_utils_test.py",
"tfx/orchestration/kubeflow/e2e_tests/kubeflow_e2e_test.py",
"tfx/types/component_spec_test.py",
"tfx/extensions/google_cloud_ai_platform/tuner/component_test.py",
"tfx/extensions/google_cloud_ai_platform/tuner/executor_test.py",
"tfx/components/trainer/rewriting/tfjs_rewriter_test.py"
] | [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TFT benchmark for Chicago Taxi dataset.\"\"\"\n\nfrom absl import flags\nfrom tfx.benchmarks import tft_benchmark_base\nfrom tfx.benchmarks.datasets.chicago_taxi import dataset\n\nfrom tensorflow.python.platform import test # pylint: disable=g-direct-tensorflow-import\n\nFLAGS = flags.FLAGS\nflags.DEFINE_integer(\"num_analyzers_wide\", 10,\n \"Number of analyzers in the TFT preprocessing function. \"\n \"Only used in `TFTBenchmarkChicagoTaxiWide`.\")\n\n\nclass TFTBenchmarkChicagoTaxi(tft_benchmark_base.TFTBenchmarkBase):\n\n def __init__(self, **kwargs):\n super().__init__(dataset=dataset.get_dataset(), **kwargs)\n\n\nclass TFTBenchmarkChicagoTaxiWide(tft_benchmark_base.TFTBenchmarkBase):\n\n def __init__(self, **kwargs):\n super().__init__(\n dataset=dataset.get_wide_dataset(num_analyzers=self._num_analyzers()),\n **kwargs)\n\n def _num_analyzers(self):\n return (FLAGS.num_analyzers_wide\n if FLAGS.is_parsed() else FLAGS[\"num_analyzers_wide\"].default)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.orchestration.experimental.core.async_pipeline_task_gen.\"\"\"\n\nimport os\n\nfrom absl.testing import parameterized\nfrom absl.testing.absltest import mock\nimport tensorflow as tf\nfrom tfx.orchestration import metadata\nfrom tfx.orchestration.experimental.core import async_pipeline_task_gen as asptg\nfrom tfx.orchestration.experimental.core import mlmd_state\nfrom tfx.orchestration.experimental.core import pipeline_state as pstate\nfrom tfx.orchestration.experimental.core import service_jobs\nfrom tfx.orchestration.experimental.core import task as task_lib\nfrom tfx.orchestration.experimental.core import task_gen_utils\nfrom tfx.orchestration.experimental.core import task_queue as tq\nfrom tfx.orchestration.experimental.core import test_utils\nfrom tfx.orchestration.experimental.core.testing import test_async_pipeline\nfrom tfx.utils import status as status_lib\n\nfrom google.protobuf import any_pb2\nfrom ml_metadata.proto import metadata_store_pb2\n\n\nclass AsyncPipelineTaskGeneratorTest(test_utils.TfxTest,\n parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n pipeline_root = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self.id())\n self._pipeline_root = pipeline_root\n\n # Makes sure multiple connections within a test always connect to the same\n # MLMD instance.\n metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')\n self._metadata_path = metadata_path\n connection_config = metadata.sqlite_metadata_connection_config(\n metadata_path)\n connection_config.sqlite.SetInParent()\n self._mlmd_connection = metadata.Metadata(\n connection_config=connection_config)\n\n # Sets up the pipeline.\n pipeline = test_async_pipeline.create_pipeline()\n self._pipeline = pipeline\n self._pipeline_info = pipeline.pipeline_info\n self._pipeline_runtime_spec = pipeline.runtime_spec\n self._pipeline_runtime_spec.pipeline_root.field_value.string_value = (\n pipeline_root)\n\n # Extracts components.\n self._example_gen = pipeline.nodes[0].pipeline_node\n self._transform = pipeline.nodes[1].pipeline_node\n self._trainer = pipeline.nodes[2].pipeline_node\n\n self._task_queue = tq.TaskQueue()\n\n self._mock_service_job_manager = mock.create_autospec(\n service_jobs.ServiceJobManager, instance=True)\n\n def _is_pure_service_node(unused_pipeline_state, node_id):\n return node_id == self._example_gen.node_info.id\n\n def _is_mixed_service_node(unused_pipeline_state, node_id):\n return node_id == self._transform.node_info.id\n\n self._mock_service_job_manager.is_pure_service_node.side_effect = (\n _is_pure_service_node)\n self._mock_service_job_manager.is_mixed_service_node.side_effect = (\n _is_mixed_service_node)\n\n def _default_ensure_node_services(unused_pipeline_state, node_id):\n self.assertIn(\n node_id,\n (self._example_gen.node_info.id, self._transform.node_info.id))\n return service_jobs.ServiceStatus.RUNNING\n\n self._mock_service_job_manager.ensure_node_services.side_effect = (\n _default_ensure_node_services)\n\n def _finish_node_execution(self, use_task_queue, exec_node_task):\n \"\"\"Simulates successful execution of a node.\"\"\"\n test_utils.fake_execute_node(self._mlmd_connection, exec_node_task)\n if use_task_queue:\n dequeued_task = self._task_queue.dequeue()\n self._task_queue.task_done(dequeued_task)\n self.assertEqual(exec_node_task.task_id, dequeued_task.task_id)\n\n def _generate_and_test(self,\n use_task_queue,\n num_initial_executions,\n num_tasks_generated,\n num_new_executions,\n num_active_executions,\n expected_exec_nodes=None,\n ignore_update_node_state_tasks=False):\n \"\"\"Generates tasks and tests the effects.\"\"\"\n return test_utils.run_generator_and_test(\n self,\n self._mlmd_connection,\n asptg.AsyncPipelineTaskGenerator,\n self._pipeline,\n self._task_queue,\n use_task_queue,\n self._mock_service_job_manager,\n num_initial_executions=num_initial_executions,\n num_tasks_generated=num_tasks_generated,\n num_new_executions=num_new_executions,\n num_active_executions=num_active_executions,\n expected_exec_nodes=expected_exec_nodes,\n ignore_update_node_state_tasks=ignore_update_node_state_tasks)\n\n @parameterized.parameters(0, 1)\n def test_no_tasks_generated_when_no_inputs(self, min_count):\n \"\"\"Tests no tasks are generated when there are no inputs, regardless of min_count.\"\"\"\n for node in self._pipeline.nodes:\n for v in node.pipeline_node.inputs.inputs.values():\n v.min_count = min_count\n\n with self._mlmd_connection as m:\n pipeline_state = test_utils.get_or_create_pipeline_state(\n m, self._pipeline)\n task_gen = asptg.AsyncPipelineTaskGenerator(\n m, lambda _: False, service_jobs.DummyServiceJobManager())\n tasks = task_gen.generate(pipeline_state)\n self.assertEmpty(tasks, 'Expected no task generation when no inputs.')\n self.assertEmpty(\n test_utils.get_non_orchestrator_executions(m),\n 'There must not be any registered executions since no tasks were '\n 'generated.')\n\n @parameterized.parameters(False, True)\n def test_task_generation(self, use_task_queue):\n \"\"\"Tests async pipeline task generation.\n\n Args:\n use_task_queue: If task queue is enabled, new tasks are only generated if\n a task with the same task_id does not already exist in the queue.\n `use_task_queue=False` is useful to test the case of task generation\n when task queue is empty (for eg: due to orchestrator restart).\n \"\"\"\n # Simulate that ExampleGen has already completed successfully.\n test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,\n 1)\n\n # Generate once.\n [update_example_gen_task, update_transform_task,\n exec_transform_task] = self._generate_and_test(\n use_task_queue,\n num_initial_executions=1,\n num_tasks_generated=3,\n num_new_executions=1,\n num_active_executions=1,\n expected_exec_nodes=[self._transform])\n self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_transform_task.state)\n self.assertTrue(task_lib.is_exec_node_task(exec_transform_task))\n\n self._mock_service_job_manager.ensure_node_services.assert_has_calls([\n mock.call(mock.ANY, self._example_gen.node_info.id),\n mock.call(mock.ANY, self._transform.node_info.id)\n ])\n\n # No new effects if generate called again.\n tasks = self._generate_and_test(\n use_task_queue,\n num_initial_executions=2,\n num_tasks_generated=1 if use_task_queue else 3,\n num_new_executions=0,\n num_active_executions=1,\n expected_exec_nodes=[] if use_task_queue else [self._transform])\n if not use_task_queue:\n exec_transform_task = tasks[2]\n\n # Mark transform execution complete.\n self._finish_node_execution(use_task_queue, exec_transform_task)\n\n # Trainer execution task should be generated next.\n [\n update_example_gen_task, update_transform_task, update_trainer_task,\n exec_trainer_task\n ] = self._generate_and_test(\n use_task_queue,\n num_initial_executions=2,\n num_tasks_generated=4,\n num_new_executions=1,\n num_active_executions=1,\n expected_exec_nodes=[self._trainer])\n self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))\n self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)\n self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))\n\n # Mark the trainer execution complete.\n self._finish_node_execution(use_task_queue, exec_trainer_task)\n\n # Only UpdateNodeStateTask are generated as there are no new inputs.\n tasks = self._generate_and_test(\n use_task_queue,\n num_initial_executions=3,\n num_tasks_generated=3,\n num_new_executions=0,\n num_active_executions=0)\n for task in tasks:\n self.assertTrue(task_lib.is_update_node_state_task(task))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n\n # Fake another ExampleGen run.\n test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,\n 1)\n\n # Both transform and trainer tasks should be generated as they both find\n # new inputs.\n [\n update_example_gen_task, update_transform_task, exec_transform_task,\n update_trainer_task, exec_trainer_task\n ] = self._generate_and_test(\n use_task_queue,\n num_initial_executions=4,\n num_tasks_generated=5,\n num_new_executions=2,\n num_active_executions=2,\n expected_exec_nodes=[self._transform, self._trainer])\n self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_transform_task.state)\n self.assertTrue(task_lib.is_exec_node_task(exec_transform_task))\n self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)\n self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))\n\n # Re-generation will produce the same tasks when task queue disabled.\n tasks = self._generate_and_test(\n use_task_queue,\n num_initial_executions=6,\n num_tasks_generated=1 if use_task_queue else 5,\n num_new_executions=0,\n num_active_executions=2,\n expected_exec_nodes=[]\n if use_task_queue else [self._transform, self._trainer])\n if not use_task_queue:\n self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(tasks[1]))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_exec_node_task(tasks[2]))\n self.assertTrue(task_lib.is_update_node_state_task(tasks[3]))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_exec_node_task(tasks[4]))\n exec_transform_task = tasks[2]\n exec_trainer_task = tasks[4]\n else:\n self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n\n # Mark transform execution complete.\n self._finish_node_execution(use_task_queue, exec_transform_task)\n\n # Mark the trainer execution complete.\n self._finish_node_execution(use_task_queue, exec_trainer_task)\n\n # Trainer should be triggered again due to transform producing new output.\n [\n update_example_gen_task, update_transform_task, update_trainer_task,\n exec_trainer_task\n ] = self._generate_and_test(\n use_task_queue,\n num_initial_executions=6,\n num_tasks_generated=4,\n num_new_executions=1,\n num_active_executions=1,\n expected_exec_nodes=[self._trainer])\n self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))\n self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)\n self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))\n\n # Finally, no new tasks once trainer completes.\n self._finish_node_execution(use_task_queue, exec_trainer_task)\n [update_example_gen_task, update_transform_task,\n update_trainer_task] = self._generate_and_test(\n use_task_queue,\n num_initial_executions=7,\n num_tasks_generated=3,\n num_new_executions=0,\n num_active_executions=0)\n self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))\n self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))\n self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)\n self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))\n self.assertEqual(pstate.NodeState.STARTED, update_trainer_task.state)\n\n if use_task_queue:\n self.assertTrue(self._task_queue.is_empty())\n\n @parameterized.parameters(False, True)\n def test_task_generation_when_node_stopped(self, stop_transform):\n \"\"\"Tests stopped nodes are ignored when generating tasks.\"\"\"\n # Simulate that ExampleGen has already completed successfully.\n test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,\n 1)\n\n # Generate once.\n num_initial_executions = 1\n if stop_transform:\n num_tasks_generated = 1\n num_new_executions = 0\n num_active_executions = 0\n with self._mlmd_connection as m:\n pipeline_state = test_utils.get_or_create_pipeline_state(\n m, self._pipeline)\n with pipeline_state:\n with pipeline_state.node_state_update_context(\n task_lib.NodeUid.from_pipeline_node(\n self._pipeline, self._transform)) as node_state:\n node_state.update(pstate.NodeState.STOPPING,\n status_lib.Status(code=status_lib.Code.CANCELLED))\n else:\n num_tasks_generated = 3\n num_new_executions = 1\n num_active_executions = 1\n tasks = self._generate_and_test(\n True,\n num_initial_executions=num_initial_executions,\n num_tasks_generated=num_tasks_generated,\n num_new_executions=num_new_executions,\n num_active_executions=num_active_executions)\n self.assertLen(tasks, num_tasks_generated)\n\n if stop_transform:\n self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))\n self.assertEqual(pstate.NodeState.RUNNING, tasks[0].state)\n else:\n self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))\n self.assertEqual(pstate.NodeState.RUNNING, tasks[0].state)\n self.assertTrue(task_lib.is_update_node_state_task(tasks[1]))\n self.assertEqual(pstate.NodeState.RUNNING, tasks[1].state)\n self.assertTrue(task_lib.is_exec_node_task(tasks[2]))\n\n def test_service_job_failed(self):\n \"\"\"Tests task generation when example-gen service job fails.\"\"\"\n\n def _ensure_node_services(unused_pipeline_state, node_id):\n self.assertEqual('my_example_gen', node_id)\n return service_jobs.ServiceStatus.FAILED\n\n self._mock_service_job_manager.ensure_node_services.side_effect = (\n _ensure_node_services)\n [update_task] = self._generate_and_test(\n True,\n num_initial_executions=0,\n num_tasks_generated=1,\n num_new_executions=0,\n num_active_executions=0)\n self.assertTrue(task_lib.is_update_node_state_task(update_task))\n self.assertEqual(status_lib.Code.ABORTED, update_task.status.code)\n\n def test_triggering_upon_exec_properties_change(self):\n test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,\n 1)\n\n [exec_transform_task] = self._generate_and_test(\n False,\n num_initial_executions=1,\n num_tasks_generated=1,\n num_new_executions=1,\n num_active_executions=1,\n expected_exec_nodes=[self._transform],\n ignore_update_node_state_tasks=True)\n\n # Fail the registered execution.\n with self._mlmd_connection as m:\n with mlmd_state.mlmd_execution_atomic_op(\n m, exec_transform_task.execution_id) as execution:\n execution.last_known_state = metadata_store_pb2.Execution.FAILED\n\n # Try to generate with same execution properties. This should not trigger\n # as there are no changes since last run.\n self._generate_and_test(\n False,\n num_initial_executions=2,\n num_tasks_generated=0,\n num_new_executions=0,\n num_active_executions=0,\n ignore_update_node_state_tasks=True)\n\n # Change execution properties of last run.\n with self._mlmd_connection as m:\n with mlmd_state.mlmd_execution_atomic_op(\n m, exec_transform_task.execution_id) as execution:\n execution.custom_properties['a_param'].int_value = 20\n\n # Generating with different execution properties should trigger.\n self._generate_and_test(\n False,\n num_initial_executions=2,\n num_tasks_generated=1,\n num_new_executions=1,\n num_active_executions=1,\n expected_exec_nodes=[self._transform],\n ignore_update_node_state_tasks=True)\n\n def test_triggering_upon_executor_spec_change(self):\n test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,\n 1)\n\n with mock.patch.object(task_gen_utils,\n 'get_executor_spec') as mock_get_executor_spec:\n mock_get_executor_spec.side_effect = _fake_executor_spec(1)\n [exec_transform_task] = self._generate_and_test(\n False,\n num_initial_executions=1,\n num_tasks_generated=1,\n num_new_executions=1,\n num_active_executions=1,\n expected_exec_nodes=[self._transform],\n ignore_update_node_state_tasks=True)\n\n # Fail the registered execution.\n with self._mlmd_connection as m:\n with mlmd_state.mlmd_execution_atomic_op(\n m, exec_transform_task.execution_id) as execution:\n execution.last_known_state = metadata_store_pb2.Execution.FAILED\n\n # Try to generate with same executor spec. This should not trigger as\n # there are no changes since last run.\n with mock.patch.object(task_gen_utils,\n 'get_executor_spec') as mock_get_executor_spec:\n mock_get_executor_spec.side_effect = _fake_executor_spec(1)\n self._generate_and_test(\n False,\n num_initial_executions=2,\n num_tasks_generated=0,\n num_new_executions=0,\n num_active_executions=0,\n ignore_update_node_state_tasks=True)\n\n # Generating with a different executor spec should trigger.\n with mock.patch.object(task_gen_utils,\n 'get_executor_spec') as mock_get_executor_spec:\n mock_get_executor_spec.side_effect = _fake_executor_spec(2)\n self._generate_and_test(\n False,\n num_initial_executions=2,\n num_tasks_generated=1,\n num_new_executions=1,\n num_active_executions=1,\n expected_exec_nodes=[self._transform],\n ignore_update_node_state_tasks=True)\n\n\ndef _fake_executor_spec(val):\n\n def _get_executor_spec(*unused_args, **unused_kwargs):\n value = metadata_store_pb2.Value(int_value=val)\n any_proto = any_pb2.Any()\n any_proto.Pack(value)\n return any_proto\n\n return _get_executor_spec\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.extensions.google_cloud_ai_platform.prediction_clients.\"\"\"\n\nimport tensorflow as tf\nfrom tfx.extensions.google_cloud_ai_platform import prediction_clients\n\n\nclass PredictionClientTest(tf.test.TestCase):\n\n def testGetTensorflowRuntime(self):\n self.assertEqual('1.14', prediction_clients._get_tf_runtime_version('1.14'))\n self.assertEqual('1.15',\n prediction_clients._get_tf_runtime_version('1.15.0'))\n self.assertEqual('1.15',\n prediction_clients._get_tf_runtime_version('1.15.1'))\n self.assertEqual('1.15',\n prediction_clients._get_tf_runtime_version('2.0.0'))\n self.assertEqual('1.15',\n prediction_clients._get_tf_runtime_version('2.0.1'))\n self.assertEqual('2.1', prediction_clients._get_tf_runtime_version('2.1.0'))\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.dsl.compiler.placeholder_utils.\"\"\"\n\nimport base64\nimport re\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\nfrom tfx.dsl.compiler import placeholder_utils\nfrom tfx.orchestration.portable import data_types\nfrom tfx.proto import infra_validator_pb2\nfrom tfx.proto.orchestration import executable_spec_pb2\nfrom tfx.proto.orchestration import execution_invocation_pb2\nfrom tfx.proto.orchestration import pipeline_pb2\nfrom tfx.proto.orchestration import placeholder_pb2\nfrom tfx.types import artifact_utils\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import proto_utils\n\nfrom google.protobuf import descriptor_pb2\nfrom google.protobuf import descriptor_pool\nfrom google.protobuf import json_format\nfrom google.protobuf import text_format\nfrom ml_metadata.proto import metadata_store_pb2\n\n# Concatenate the URI of `examples` input artifact's `train` split with /1\n_CONCAT_SPLIT_URI_EXPRESSION = \"\"\"\noperator {\n concat_op {\n expressions {\n operator {\n artifact_uri_op {\n expression {\n operator {\n index_op{\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"examples\"\n }\n }\n index: 0\n }\n }\n }\n split: \"train\"\n }\n }\n }\n expressions{\n value {\n string_value: \"/\"\n }\n }\n expressions{\n value {\n int_value: 1\n }\n }\n }\n}\n\"\"\"\n\n_WANT_EXEC_INVOCATION = \"\"\"\nexecution_properties {\n key: \"proto_property\"\n value {\n string_value: \"{\\\\n \\\\\"tensorflow_serving\\\\\": {\\\\n \\\\\"tags\\\\\": [\\\\n \\\\\"latest\\\\\",\\\\n \\\\\"1.15.0-gpu\\\\\"\\\\n ]\\\\n }\\\\n}\"\n }\n}\nexecution_properties {\n key: \"list_proto_property\"\n value {\n string_value: \"[\\\\\"{\\\\\\\\n \\\\\\\\\\\\\"tensorflow_serving\\\\\\\\\\\\\": {\\\\\\\\n \\\\\\\\\\\\\"tags\\\\\\\\\\\\\": [\\\\\\\\n \\\\\\\\\\\\\"latest\\\\\\\\\\\\\",\\\\\\\\n \\\\\\\\\\\\\"1.15.0-gpu\\\\\\\\\\\\\"\\\\\\\\n ]\\\\\\\\n }\\\\\\\\n}\\\\\"]\"\n }\n}\nexecution_properties_with_schema {\n key: \"proto_property\"\n value {\n field_value {\n string_value: \"{\\\\n \\\\\"tensorflow_serving\\\\\": {\\\\n \\\\\"tags\\\\\": [\\\\n \\\\\"latest\\\\\",\\\\n \\\\\"1.15.0-gpu\\\\\"\\\\n ]\\\\n }\\\\n}\"\n }\n }\n}\nexecution_properties_with_schema {\n key: \"list_proto_property\"\n value {\n field_value {\n string_value: \"[\\\\\"{\\\\\\\\n \\\\\\\\\\\\\"tensorflow_serving\\\\\\\\\\\\\": {\\\\\\\\n \\\\\\\\\\\\\"tags\\\\\\\\\\\\\": [\\\\\\\\n \\\\\\\\\\\\\"latest\\\\\\\\\\\\\",\\\\\\\\n \\\\\\\\\\\\\"1.15.0-gpu\\\\\\\\\\\\\"\\\\\\\\n ]\\\\\\\\n }\\\\\\\\n}\\\\\"]\"\n }\n schema {\n value_type {\n list_type {\n proto_type {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n }\n }\n }\n }\n}\noutput_metadata_uri: \"test_executor_output_uri\"\ninput_dict {\n key: \"examples\"\n value {\n elements {\n artifact {\n artifact {\n uri: \"/tmp\"\n properties {\n key: \"split_names\"\n value {\n string_value: \"[\\\\\"train\\\\\", \\\\\"eval\\\\\"]\"\n }\n }\n properties {\n key: \"version\"\n value {\n int_value: 42\n }\n }\n custom_properties {\n key: \"custom_key\"\n value {\n string_value: \"custom_value\"\n }\n }\n }\n type {\n name: \"Examples\"\n properties {\n key: \"span\"\n value: INT\n }\n properties {\n key: \"split_names\"\n value: STRING\n }\n properties {\n key: \"version\"\n value: INT\n }\n base_type: DATASET\n }\n }\n }\n }\n}\ninput_dict {\n key: \"model\"\n value {\n elements {\n artifact {\n artifact {\n }\n type {\n name: \"Model\"\n base_type: MODEL\n }\n }\n }\n }\n}\noutput_dict {\n key: \"blessing\"\n value {\n elements {\n artifact {\n artifact {\n }\n type {\n name: \"ModelBlessing\"\n }\n }\n }\n }\n}\nstateful_working_dir: \"test_stateful_working_dir\"\npipeline_info {\n id: \"test_pipeline_id\"\n}\npipeline_node {\n node_info {\n type {\n name: \"infra_validator\"\n }\n }\n}\n\"\"\"\n\n\nclass PlaceholderUtilsTest(tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n examples = [standard_artifacts.Examples()]\n examples[0].uri = \"/tmp\"\n examples[0].split_names = artifact_utils.encode_split_names(\n [\"train\", \"eval\"])\n examples[0].version = 42\n examples[0].set_string_custom_property(\"custom_key\", \"custom_value\")\n self._serving_spec = infra_validator_pb2.ServingSpec()\n self._serving_spec.tensorflow_serving.tags.extend([\"latest\", \"1.15.0-gpu\"])\n self._resolution_context = placeholder_utils.ResolutionContext(\n exec_info=data_types.ExecutionInfo(\n input_dict={\n \"model\": [standard_artifacts.Model()],\n \"examples\": examples,\n },\n output_dict={\"blessing\": [standard_artifacts.ModelBlessing()]},\n exec_properties={\n \"proto_property\": proto_utils.proto_to_json(self._serving_spec),\n \"list_proto_property\": [self._serving_spec],\n },\n execution_output_uri=\"test_executor_output_uri\",\n stateful_working_dir=\"test_stateful_working_dir\",\n pipeline_node=pipeline_pb2.PipelineNode(\n node_info=pipeline_pb2.NodeInfo(\n type=metadata_store_pb2.ExecutionType(\n name=\"infra_validator\"))),\n pipeline_info=pipeline_pb2.PipelineInfo(id=\"test_pipeline_id\")),\n executor_spec=executable_spec_pb2.PythonClassExecutableSpec(\n class_path=\"test_class_path\"),\n )\n # Resolution context to simulate missing optional values.\n self._none_resolution_context = placeholder_utils.ResolutionContext(\n exec_info=data_types.ExecutionInfo(\n input_dict={},\n output_dict={},\n exec_properties={},\n pipeline_node=pipeline_pb2.PipelineNode(\n node_info=pipeline_pb2.NodeInfo(\n type=metadata_store_pb2.ExecutionType(\n name=\"infra_validator\"))),\n pipeline_info=pipeline_pb2.PipelineInfo(id=\"test_pipeline_id\")),\n executor_spec=None,\n platform_config=None)\n\n def testConcatArtifactUri(self):\n pb = text_format.Parse(_CONCAT_SPLIT_URI_EXPRESSION,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), \"/tmp/Split-train/1\")\n\n def testArtifactProperty(self):\n placeholder_expression = \"\"\"\n operator {\n artifact_property_op {\n expression {\n operator {\n index_op{\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"examples\"\n }\n }\n index: 0\n }\n }\n }\n key: \"version\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), 42)\n\n self.assertEqual(\n placeholder_utils.debug_str(pb),\n \"input(\\\"examples\\\")[0].property(\\\"version\\\")\")\n\n def testArtifactCustomProperty(self):\n placeholder_expression = \"\"\"\n operator {\n artifact_property_op {\n expression {\n operator {\n index_op{\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"examples\"\n }\n }\n index: 0\n }\n }\n }\n key: \"custom_key\"\n is_custom_property: True\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), \"custom_value\")\n\n self.assertEqual(\n placeholder_utils.debug_str(pb),\n \"input(\\\"examples\\\")[0].custom_property(\\\"custom_key\\\")\")\n\n def testArtifactUriNoneAccess(self):\n # Access a missing optional channel.\n placeholder_expression = \"\"\"\n operator {\n artifact_uri_op {\n expression {\n operator {\n index_op{\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"examples\"\n }\n }\n index: 0\n }\n }\n }\n split: \"train\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n self.assertIsNone(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._none_resolution_context))\n\n def testArtifactValueOperator(self):\n test_artifact = standard_artifacts.Integer()\n test_artifact.uri = self.create_tempfile().full_path\n test_artifact.value = 42\n self._resolution_context = placeholder_utils.ResolutionContext(\n exec_info=data_types.ExecutionInfo(\n input_dict={\n \"channel_1\": [test_artifact],\n },\n pipeline_node=pipeline_pb2.PipelineNode(\n node_info=pipeline_pb2.NodeInfo()),\n pipeline_info=pipeline_pb2.PipelineInfo(id=\"test_pipeline_id\")))\n pb = text_format.Parse(\n \"\"\"\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_1\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n \"\"\", placeholder_pb2.PlaceholderExpression())\n resolved_value = placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context)\n self.assertEqual(resolved_value, 42)\n\n def testProtoExecPropertyPrimitiveField(self):\n # Access a non-message type proto field\n placeholder_expression = \"\"\"\n operator {\n index_op {\n expression {\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n proto_field_path: \".tags\"\n }\n }\n }\n index: 1\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n # Prepare FileDescriptorSet\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.index_op.expression.operator.proto_op.proto_schema.file_descriptors.file.append(\n fd)\n\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), \"1.15.0-gpu\")\n\n def testListProtoExecPropertyIndex(self):\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"list_proto_property\"\n }\n }\n index: 0\n }\n }\n }\n serialization_format: JSON\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n expected_json_serialization = \"\"\"\\\n{\n \"tensorflow_serving\": {\n \"tags\": [\n \"latest\",\n \"1.15.0-gpu\"\n ]\n }\n}\"\"\"\n\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), expected_json_serialization)\n\n def testListExecPropertySerializationJson(self):\n placeholder_expression = \"\"\"\n operator {\n list_serialization_op {\n expression {\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n proto_field_path: \".tags\"\n }\n }\n }\n serialization_format: JSON\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n expected_json_serialization = '[\"latest\", \"1.15.0-gpu\"]'\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), expected_json_serialization)\n\n def testListExecPropertySerializationCommaSeparatedString(self):\n placeholder_expression = \"\"\"\n operator {\n list_serialization_op {\n expression {\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n proto_field_path: \".tags\"\n }\n }\n }\n serialization_format: COMMA_SEPARATED_STR\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n expected_serialization = '\"latest\",\"1.15.0-gpu\"'\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), expected_serialization)\n\n def testListConcat(self):\n placeholder_expression = \"\"\"\n operator {\n list_concat_op {\n expressions {\n operator {\n artifact_property_op {\n expression {\n operator {\n index_op{\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"examples\"\n }\n }\n index: 0\n }\n }\n }\n key: \"version\"\n }\n }\n }\n expressions {\n value {\n string_value: \"random_str\"\n }\n }\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n expected_result = [42, \"random_str\"]\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), expected_result)\n\n def testListConcatAndSerialize(self):\n placeholder_expression = \"\"\"\n operator {\n list_serialization_op {\n expression {\n operator {\n list_concat_op {\n expressions {\n operator {\n artifact_property_op {\n expression {\n operator {\n index_op{\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"examples\"\n }\n }\n index: 0\n }\n }\n }\n key: \"version\"\n }\n }\n }\n expressions {\n value {\n string_value: \"random_str\"\n }\n }\n }\n }\n }\n serialization_format: JSON\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n expected_result = '[42, \"random_str\"]'\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), expected_result)\n\n def testProtoExecPropertyMessageFieldTextFormat(self):\n # Access a message type proto field\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n serialization_format: TEXT_FORMAT\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.proto_op.proto_schema.file_descriptors.file.append(fd)\n\n # If proto_field_path points to a message type field, the message will\n # be rendered using text_format.\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context),\n \"tags: \\\"latest\\\"\\ntags: \\\"1.15.0-gpu\\\"\\n\")\n\n def testProtoExecPropertyRepeatedField(self):\n # Access a repeated field.\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n proto_field_path: \".tags\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n # Prepare FileDescriptorSet\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.proto_op.proto_schema.file_descriptors.file.append(fd)\n\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), [\"latest\", \"1.15.0-gpu\"])\n\n def testProtoExecPropertyInvalidField(self):\n # Access a repeated field.\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".some_invalid_field\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n # Prepare FileDescriptorSet\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.proto_op.proto_schema.file_descriptors.file.append(fd)\n\n with self.assertRaises(ValueError):\n placeholder_utils.resolve_placeholder_expression(pb,\n self._resolution_context)\n\n def testProtoExecPropertyNoneAccess(self):\n # Access a missing optional exec property.\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n proto_field_path: \".tags\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n # Prepare FileDescriptorSet\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.proto_op.proto_schema.file_descriptors.file.append(fd)\n\n self.assertIsNone(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._none_resolution_context))\n\n def testSerializeDoubleValue(self):\n # Read a primitive value\n placeholder_expression = \"\"\"\n value {\n double_value: 1.000000009\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), 1.000000009)\n\n def testProtoRuntimeInfoPlaceholderMessageField(self):\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: RUNTIME_INFO\n key: \"executor_spec\"\n }\n }\n proto_field_path: \".class_path\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), \"test_class_path\")\n\n def testProtoRuntimeInfoNoneAccess(self):\n # Access a missing platform config.\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: RUNTIME_INFO\n key: \"platform_config\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n proto_field_path: \".tags\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n # Prepare FileDescriptorSet\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.proto_op.proto_schema.file_descriptors.file.append(fd)\n\n self.assertIsNone(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._none_resolution_context))\n\n def testProtoSerializationJSON(self):\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n serialization_format: JSON\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n # Prepare FileDescriptorSet\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.proto_op.proto_schema.file_descriptors.file.append(fd)\n\n expected_json_serialization = \"\"\"\\\n{\n \"tensorflow_serving\": {\n \"tags\": [\n \"latest\",\n \"1.15.0-gpu\"\n ]\n }\n}\"\"\"\n\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context), expected_json_serialization)\n\n def testProtoWithoutSerializationFormat(self):\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n\n # Prepare FileDescriptorSet\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n pb.operator.proto_op.proto_schema.file_descriptors.file.append(fd)\n\n with self.assertRaises(ValueError):\n placeholder_utils.resolve_placeholder_expression(pb,\n self._resolution_context)\n\n def testExecutionInvocationPlaceholderSimple(self):\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_INVOCATION\n }\n }\n serialization_format: JSON\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n resolved = placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context)\n got_exec_invocation = json_format.Parse(\n resolved, execution_invocation_pb2.ExecutionInvocation())\n want_exec_invocation = text_format.Parse(\n _WANT_EXEC_INVOCATION, execution_invocation_pb2.ExecutionInvocation())\n fd = descriptor_pb2.FileDescriptorProto()\n infra_validator_pb2.ServingSpec().DESCRIPTOR.file.CopyToProto(fd)\n want_exec_invocation.execution_properties_with_schema[\n \"list_proto_property\"].schema.value_type.list_type.proto_type.file_descriptors.file.append(\n fd)\n self.assertProtoEquals(want_exec_invocation, got_exec_invocation)\n\n def testExecutionInvocationPlaceholderAccessProtoField(self):\n placeholder_expression = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_INVOCATION\n }\n }\n proto_field_path: \".stateful_working_dir\"\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n resolved = placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context)\n self.assertEqual(resolved, \"test_stateful_working_dir\")\n\n def testExecutionInvocationDescriptor(self):\n # Test if ExecutionInvocation proto is in the default descriptor pool\n pool = descriptor_pool.Default()\n message_descriptor = pool.FindMessageTypeByName(\n \"tfx.orchestration.ExecutionInvocation\")\n self.assertEqual(\"tfx.orchestration.ExecutionInvocation\",\n message_descriptor.full_name)\n\n def testBase64EncodeOperator(self):\n placeholder_expression = \"\"\"\n operator {\n base64_encode_op {\n expression {\n operator {\n index_op {\n expression {\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n proto_field_path: \".tags\"\n }\n }\n }\n index: 0\n }\n }\n }\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context),\n base64.urlsafe_b64encode(b\"latest\").decode(\"ASCII\"))\n\n def _assert_serialized_proto_b64encode_eq(self, serialize_format, expected):\n placeholder_expression = \"\"\"\n operator {\n base64_encode_op {\n expression {\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"proto_property\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n serialization_format: \"\"\" + serialize_format + \"\"\"\n }\n }\n }\n }\n }\n \"\"\"\n pb = text_format.Parse(placeholder_expression,\n placeholder_pb2.PlaceholderExpression())\n resolved_base64_str = placeholder_utils.resolve_placeholder_expression(\n pb, self._resolution_context)\n decoded = base64.urlsafe_b64decode(resolved_base64_str).decode()\n self.assertEqual(decoded, expected)\n\n def testJsonSerializedProtoBase64Encode(self):\n expected_json_str = json_format.MessageToJson(\n message=self._serving_spec,\n sort_keys=True,\n preserving_proto_field_name=True)\n self._assert_serialized_proto_b64encode_eq(\"JSON\", expected_json_str)\n\n def testTextFormatSerializedProtoBase64Encode(self):\n expected_text_format_str = text_format.MessageToString(self._serving_spec)\n self._assert_serialized_proto_b64encode_eq(\"TEXT_FORMAT\",\n expected_text_format_str)\n\n def testBinarySerializedProtoBase64Encode(self):\n expected_binary_str = self._serving_spec.SerializeToString().decode()\n self._assert_serialized_proto_b64encode_eq(\"BINARY\", expected_binary_str)\n\n def testDebugPlaceholder(self):\n pb = text_format.Parse(_CONCAT_SPLIT_URI_EXPRESSION,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.debug_str(pb),\n \"(input(\\\"examples\\\")[0].split_uri(\\\"train\\\") + \\\"/\\\" + \\\"1\\\")\")\n\n another_pb_str = \"\"\"\n operator {\n proto_op {\n expression {\n placeholder {\n type: EXEC_PROPERTY\n key: \"serving_spec\"\n }\n }\n proto_schema {\n message_type: \"tfx.components.infra_validator.ServingSpec\"\n }\n proto_field_path: \".tensorflow_serving\"\n serialization_format: TEXT_FORMAT\n }\n }\n \"\"\"\n another_pb = text_format.Parse(another_pb_str,\n placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.debug_str(another_pb),\n \"exec_property(\\\"serving_spec\\\").tensorflow_serving.serialize(TEXT_FORMAT)\"\n )\n\n\nclass PredicateResolutionTest(parameterized.TestCase, tf.test.TestCase):\n\n def _createResolutionContext(self, input_values_dict):\n input_dict = {}\n for channel_name, values in input_values_dict.items():\n input_dict[channel_name] = []\n for value in values:\n artifact = standard_artifacts.Integer()\n artifact.uri = self.create_tempfile().full_path\n artifact.value = value\n input_dict[channel_name].append(artifact)\n\n return placeholder_utils.ResolutionContext(\n exec_info=data_types.ExecutionInfo(\n input_dict=input_dict,\n pipeline_node=pipeline_pb2.PipelineNode(\n node_info=pipeline_pb2.NodeInfo()),\n pipeline_info=pipeline_pb2.PipelineInfo(id=\"test_pipeline_id\")))\n\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"1==1\",\n \"input_values_dict\": {\n \"channel_1\": [1],\n \"channel_2\": [1],\n },\n \"comparison_op\": placeholder_pb2.ComparisonOperator.Operation.EQUAL,\n \"expected_result\": True,\n },\n {\n \"testcase_name\": \"1==2\",\n \"input_values_dict\": {\n \"channel_1\": [1],\n \"channel_2\": [2],\n },\n \"comparison_op\": placeholder_pb2.ComparisonOperator.Operation.EQUAL,\n \"expected_result\": False,\n },\n {\n \"testcase_name\":\n \"1<2\",\n \"input_values_dict\": {\n \"channel_1\": [1],\n \"channel_2\": [2],\n },\n \"comparison_op\":\n placeholder_pb2.ComparisonOperator.Operation.LESS_THAN,\n \"expected_result\":\n True,\n },\n {\n \"testcase_name\":\n \"1<1\",\n \"input_values_dict\": {\n \"channel_1\": [1],\n \"channel_2\": [1],\n },\n \"comparison_op\":\n placeholder_pb2.ComparisonOperator.Operation.LESS_THAN,\n \"expected_result\":\n False,\n },\n {\n \"testcase_name\":\n \"2<1\",\n \"input_values_dict\": {\n \"channel_1\": [2],\n \"channel_2\": [1],\n },\n \"comparison_op\":\n placeholder_pb2.ComparisonOperator.Operation.LESS_THAN,\n \"expected_result\":\n False,\n },\n {\n \"testcase_name\":\n \"2>1\",\n \"input_values_dict\": {\n \"channel_1\": [2],\n \"channel_2\": [1],\n },\n \"comparison_op\":\n placeholder_pb2.ComparisonOperator.Operation.GREATER_THAN,\n \"expected_result\":\n True,\n },\n {\n \"testcase_name\":\n \"1>1\",\n \"input_values_dict\": {\n \"channel_1\": [1],\n \"channel_2\": [1],\n },\n \"comparison_op\":\n placeholder_pb2.ComparisonOperator.Operation.GREATER_THAN,\n \"expected_result\":\n False,\n },\n {\n \"testcase_name\":\n \"1>2\",\n \"input_values_dict\": {\n \"channel_1\": [1],\n \"channel_2\": [2],\n },\n \"comparison_op\":\n placeholder_pb2.ComparisonOperator.Operation.GREATER_THAN,\n \"expected_result\":\n False,\n },\n )\n def testComparisonOperator(self, input_values_dict, comparison_op,\n expected_result):\n resolution_context = self._createResolutionContext(input_values_dict)\n # Similar to:\n # some_channel.future()[0].value <?> other_channel.future()[0].value\n pb = text_format.Parse(\n \"\"\"\n operator {\n compare_op {\n lhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_1\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n rhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_2\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n }\n }\n \"\"\", placeholder_pb2.PlaceholderExpression())\n pb.operator.compare_op.op = comparison_op\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, resolution_context), expected_result)\n\n def _createTrueFalsePredsAndResolutionContext(self):\n \"\"\"Outputs predicate expressions that evaluate to some constant boolean.\n\n To test the evaluation of AND, OR, NOT expressions, we want to assert\n that the evaluation code has the same truth table as the corresponding\n operators they are implementing.\n\n This helper method outputs one predicate expression that always evaluates to\n `True` (true_pb), and one predicate expression that always evaluates to\n `False` (false_pb), as well as the resolution context that produces those\n results.\n\n true_pb is effectively `1 == 1`.\n false_pb is effectively `1 < 1`.\n\n These expressions are meant to be used as test inputs for logical\n expressions.\n\n For example, to assert that `not(True) == False`, construct a placeholder\n expression that represents the NOT operator, copy true_pb into the\n NOT operator's sub expression field, then resolve this placeholder\n expression using the code to be tested, and assert that the resolved value\n is equal to `False`.\n\n Returns:\n A tuple with three items:\n - A Placeholder expression that always evaluates to True using the given\n ResolutionContext,\n - A Placeholder expression that always evaluates to False using the given\n ResolutionContext, and\n - The ResolutionContext for evaluating the expression.\n \"\"\"\n\n resolution_context = self._createResolutionContext({\"channel_1\": [1]})\n # Evaluating true_pb using the above resolution context is equivalent to\n # evaluating `1 == 1`.\n # Always evaluates to True.\n true_pb = text_format.Parse(\n \"\"\"\n operator {\n compare_op {\n lhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_1\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n rhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_1\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n op: EQUAL\n }\n }\n \"\"\", placeholder_pb2.PlaceholderExpression())\n # This assertion is just to re-assure the reader of this test code that\n # true_pb does evaluate to True, as promised.\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n true_pb, resolution_context), True)\n\n # Evaluating false_pb using the above resolution context is equivalent to\n # evaluating `1 < 1`.\n # Always evaluates to False.\n false_pb = text_format.Parse(\n \"\"\"\n operator {\n compare_op {\n lhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_1\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n rhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_1\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n op: LESS_THAN\n }\n }\n \"\"\", placeholder_pb2.PlaceholderExpression())\n # This assertion is just to re-assure the reader of this test code that\n # false_pb does evaluate to False, as promised.\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n false_pb, resolution_context), False)\n return true_pb, false_pb, resolution_context\n\n def testNotOperator(self):\n true_pb, false_pb, resolution_context = (\n self._createTrueFalsePredsAndResolutionContext())\n\n # assert not(True) == False\n not_true_pb = placeholder_pb2.PlaceholderExpression()\n not_true_pb.operator.unary_logical_op.op = (\n placeholder_pb2.UnaryLogicalOperator.Operation.NOT)\n not_true_pb.operator.unary_logical_op.expression.CopyFrom(true_pb)\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n not_true_pb, resolution_context), False)\n\n # assert not(False) == True\n not_false_pb = placeholder_pb2.PlaceholderExpression()\n not_false_pb.operator.unary_logical_op.op = (\n placeholder_pb2.UnaryLogicalOperator.Operation.NOT)\n not_false_pb.operator.unary_logical_op.expression.CopyFrom(false_pb)\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n not_false_pb, resolution_context), True)\n\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"true_and_true\",\n \"lhs_evaluates_to_true\": True,\n \"rhs_evaluates_to_true\": True,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.AND,\n \"expected_result\": True,\n },\n {\n \"testcase_name\": \"true_and_false\",\n \"lhs_evaluates_to_true\": True,\n \"rhs_evaluates_to_true\": False,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.AND,\n \"expected_result\": False,\n },\n {\n \"testcase_name\": \"false_and_true\",\n \"lhs_evaluates_to_true\": False,\n \"rhs_evaluates_to_true\": True,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.AND,\n \"expected_result\": False,\n },\n {\n \"testcase_name\": \"false_and_false\",\n \"lhs_evaluates_to_true\": False,\n \"rhs_evaluates_to_true\": False,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.AND,\n \"expected_result\": False,\n },\n {\n \"testcase_name\": \"true_or_true\",\n \"lhs_evaluates_to_true\": True,\n \"rhs_evaluates_to_true\": True,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.OR,\n \"expected_result\": True,\n },\n {\n \"testcase_name\": \"true_or_false\",\n \"lhs_evaluates_to_true\": True,\n \"rhs_evaluates_to_true\": False,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.OR,\n \"expected_result\": True,\n },\n {\n \"testcase_name\": \"false_or_true\",\n \"lhs_evaluates_to_true\": False,\n \"rhs_evaluates_to_true\": True,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.OR,\n \"expected_result\": True,\n },\n {\n \"testcase_name\": \"false_or_false\",\n \"lhs_evaluates_to_true\": False,\n \"rhs_evaluates_to_true\": False,\n \"op\": placeholder_pb2.BinaryLogicalOperator.Operation.OR,\n \"expected_result\": False,\n },\n )\n def testBinaryLogicalOperator(self, lhs_evaluates_to_true,\n rhs_evaluates_to_true, op, expected_result):\n true_pb, false_pb, resolution_context = (\n self._createTrueFalsePredsAndResolutionContext())\n\n pb = placeholder_pb2.PlaceholderExpression()\n pb.operator.binary_logical_op.op = op\n pb.operator.binary_logical_op.lhs.CopyFrom(\n true_pb if lhs_evaluates_to_true else false_pb)\n pb.operator.binary_logical_op.rhs.CopyFrom(\n true_pb if rhs_evaluates_to_true else false_pb)\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n pb, resolution_context), expected_result)\n\n def testNestedExpression(self):\n true_pb, false_pb, resolution_context = (\n self._createTrueFalsePredsAndResolutionContext())\n\n true_and_false_pb = placeholder_pb2.PlaceholderExpression()\n true_and_false_pb.operator.binary_logical_op.op = (\n placeholder_pb2.BinaryLogicalOperator.Operation.AND)\n true_and_false_pb.operator.binary_logical_op.lhs.CopyFrom(true_pb)\n true_and_false_pb.operator.binary_logical_op.rhs.CopyFrom(false_pb)\n\n not_false_pb = placeholder_pb2.PlaceholderExpression()\n not_false_pb.operator.unary_logical_op.op = (\n placeholder_pb2.UnaryLogicalOperator.Operation.NOT)\n not_false_pb.operator.unary_logical_op.expression.CopyFrom(false_pb)\n\n # assert (True and False) and not(False) == False\n nested_pb_1 = placeholder_pb2.PlaceholderExpression()\n nested_pb_1.operator.binary_logical_op.op = (\n placeholder_pb2.BinaryLogicalOperator.Operation.AND)\n nested_pb_1.operator.binary_logical_op.lhs.CopyFrom(true_and_false_pb)\n nested_pb_1.operator.binary_logical_op.rhs.CopyFrom(not_false_pb)\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n nested_pb_1, resolution_context), False)\n\n # assert (True and False) or not(False) == True\n nested_pb_2 = placeholder_pb2.PlaceholderExpression()\n nested_pb_2.operator.binary_logical_op.op = (\n placeholder_pb2.BinaryLogicalOperator.Operation.OR)\n nested_pb_2.operator.binary_logical_op.lhs.CopyFrom(true_and_false_pb)\n nested_pb_2.operator.binary_logical_op.rhs.CopyFrom(not_false_pb)\n self.assertEqual(\n placeholder_utils.resolve_placeholder_expression(\n nested_pb_2, resolution_context), True)\n\n def testDebugPlaceholder(self):\n pb = text_format.Parse(\n \"\"\"\n operator {\n compare_op {\n lhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_1\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n rhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n type: INPUT_ARTIFACT\n key: \"channel_2\"\n }\n }\n index: 0\n }\n }\n }\n }\n }\n }\n op: EQUAL\n }\n }\n \"\"\", placeholder_pb2.PlaceholderExpression())\n self.assertEqual(\n placeholder_utils.debug_str(pb),\n \"(input(\\\"channel_1\\\")[0].value == input(\\\"channel_2\\\")[0].value)\")\n\n another_pb = text_format.Parse(\n \"\"\"\n operator {\n binary_logical_op {\n lhs {\n operator {\n binary_logical_op {\n lhs {\n operator {\n unary_logical_op {\n expression {\n operator {\n compare_op {\n lhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n key: \"channel_11_key\"\n }\n }\n }\n }\n }\n }\n }\n }\n rhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n key: \"channel_12_key\"\n }\n }\n }\n }\n }\n }\n }\n }\n op: LESS_THAN\n }\n }\n }\n op: NOT\n }\n }\n }\n rhs {\n operator {\n compare_op {\n lhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n key: \"channel_21_key\"\n }\n }\n }\n }\n }\n }\n }\n }\n rhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n key: \"channel_22_key\"\n }\n }\n }\n }\n }\n }\n }\n }\n op: GREATER_THAN\n }\n }\n }\n op: AND\n }\n }\n }\n rhs {\n operator {\n unary_logical_op {\n expression {\n operator {\n compare_op {\n lhs {\n operator {\n artifact_value_op {\n expression {\n operator {\n index_op {\n expression {\n placeholder {\n key: \"channel_3_key\"\n }\n }\n }\n }\n }\n }\n }\n }\n rhs {\n value {\n string_value: \"foo\"\n }\n }\n op: EQUAL\n }\n }\n }\n op: NOT\n }\n }\n }\n op: OR\n }\n }\n \"\"\", placeholder_pb2.PlaceholderExpression())\n actual_debug_str = placeholder_utils.debug_str(another_pb)\n expected_debug_str_pretty = \"\"\"\n (\n (\n not(\n (\n input(\"channel_11_key\")[0].value\n <\n input(\"channel_12_key\")[0].value\n )\n ) and\n (\n input(\"channel_21_key\")[0].value\n >\n input(\"channel_22_key\")[0].value\n )\n )\n or\n not(\n (\n input(\"channel_3_key\")[0].value == \"foo\"\n )\n )\n )\n \"\"\"\n self.assertEqual(\n re.sub(r\"\\s+\", \"\", actual_debug_str),\n re.sub(r\"\\s+\", \"\", expected_debug_str_pretty))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"End to end tests for Kubeflow-based orchestrator.\"\"\"\n\nimport os\nimport subprocess\nimport time\nfrom typing import List\n\nfrom absl import logging\nfrom grpc import insecure_channel\nimport tensorflow as tf\nfrom tfx.dsl.io import fileio\nfrom tfx.orchestration import test_utils\nfrom tfx.orchestration.kubeflow import test_utils as kubeflow_test_utils\nfrom tfx.orchestration.test_pipelines import download_grep_print_pipeline\nfrom tfx.types import standard_artifacts\n\nfrom ml_metadata.proto import metadata_store_pb2\nfrom ml_metadata.proto import metadata_store_service_pb2\nfrom ml_metadata.proto import metadata_store_service_pb2_grpc\n\n# The range of port-forwarding addresses used by Kubeflow E2E test.\n# If the current specified address is occupied, the test will scan forward until\n# a unused port is met, or stop at _KFP_E2E_TEST_FORWARDING_PORT_END.\n_KFP_E2E_TEST_FORWARDING_PORT_BEGIN = 8081\n_KFP_E2E_TEST_FORWARDING_PORT_END = 8888\n\n# Number of attempts to bind one port.\n_MAX_ATTEMPTS = 5\n\n# Context name of pipeline contexts.\n_CONTEXT_TYPE_PIPELINE = 'pipeline'\n\n\nclass KubeflowEndToEndTest(kubeflow_test_utils.BaseKubeflowTest):\n\n @classmethod\n def setUpClass(cls):\n # Initializes the port-forward process to talk MLMD.\n super().setUpClass()\n cls._port_forwarding_process = cls._setup_mlmd_port_forward()\n\n @classmethod\n def tearDownClass(cls):\n super(KubeflowEndToEndTest, cls).tearDownClass()\n\n # Delete container image used in tests.\n logging.info('Killing the GRPC port-forwarding process.')\n cls._port_forwarding_process.kill()\n\n @classmethod\n def _get_grpc_port(cls) -> str:\n \"\"\"Get the port number used by MLMD gRPC server.\"\"\"\n get_grpc_port_command = [\n 'kubectl', '-n', 'kubeflow', 'get', 'configmap',\n 'metadata-grpc-configmap', '-o',\n 'jsonpath={.data.METADATA_GRPC_SERVICE_PORT}'\n ]\n\n grpc_port = subprocess.check_output(get_grpc_port_command)\n return grpc_port.decode('utf-8')\n\n @classmethod\n def _setup_mlmd_port_forward(cls) -> subprocess.Popen:\n \"\"\"Uses port forward to talk to MLMD gRPC server.\"\"\"\n grpc_port = cls._get_grpc_port()\n\n is_bind = False\n forwarded_port = None\n\n for port in range(_KFP_E2E_TEST_FORWARDING_PORT_BEGIN,\n _KFP_E2E_TEST_FORWARDING_PORT_END):\n grpc_forward_command = [\n 'kubectl', 'port-forward', 'deployment/metadata-grpc-deployment',\n '-n', 'kubeflow', ('%s:%s' % (port, grpc_port))\n ]\n # Begin port forwarding.\n proc = subprocess.Popen(grpc_forward_command)\n try:\n # Wait while port forward to pod is being established\n poll_grpc_port_command = ['lsof', '-i', ':%s' % port]\n result = subprocess.run( # pylint: disable=subprocess-run-check\n poll_grpc_port_command,\n stdout=subprocess.PIPE)\n for _ in range(_MAX_ATTEMPTS):\n if (result.returncode == 0 and\n 'kubectl' in result.stdout.decode('utf-8')):\n is_bind = True\n break\n logging.info(\n 'Waiting while gRPC port-forward is being established...')\n time.sleep(5)\n result = subprocess.run( # pylint: disable=subprocess-run-check\n poll_grpc_port_command,\n stdout=subprocess.PIPE)\n\n except: # pylint: disable=bare-except\n # Kill the process in case unexpected error occurred.\n proc.kill()\n\n if is_bind:\n forwarded_port = port\n break\n\n if not is_bind:\n raise RuntimeError('Failed to establish gRPC port-forward to cluster in '\n 'the specified range: port %s to %s' %\n (_KFP_E2E_TEST_FORWARDING_PORT_BEGIN,\n _KFP_E2E_TEST_FORWARDING_PORT_END))\n\n # Establish MLMD gRPC channel.\n forwarding_channel = insecure_channel('localhost:%s' % forwarded_port)\n cls._stub = metadata_store_service_pb2_grpc.MetadataStoreServiceStub(\n forwarding_channel)\n\n return proc\n\n def _get_artifacts_with_type_and_pipeline(\n self, type_name: str,\n pipeline_name: str) -> List[metadata_store_pb2.Artifact]:\n \"\"\"Helper function returns artifacts of specified pipeline and type.\"\"\"\n # 1. Find the pipeline context according to its name.\n request = metadata_store_service_pb2.GetContextByTypeAndNameRequest(\n type_name=_CONTEXT_TYPE_PIPELINE, context_name=pipeline_name)\n pipeline_context = self._stub.GetContextByTypeAndName(request)\n # 2. Find the artifacts associated with the pipeline context.\n request = metadata_store_service_pb2.GetArtifactsByContextRequest(\n context_id=pipeline_context.context.id)\n artifacts_response = self._stub.GetArtifactsByContext(request)\n # 3. Find the specified artifact type id.\n artifact_type_request = metadata_store_service_pb2.GetArtifactTypeRequest(\n type_name=type_name)\n artifact_type = self._stub.GetArtifactType(\n artifact_type_request).artifact_type\n # 4. Filter the returned artifacts according to their types and return.\n return [\n artifact for artifact in artifacts_response.artifacts\n if artifact.type_id == artifact_type.id\n ]\n\n def _get_value_of_string_artifact(\n self, string_artifact: metadata_store_pb2.Artifact) -> str:\n \"\"\"Helper function returns the actual value of a ValueArtifact.\"\"\"\n\n string_artifact_obj = standard_artifacts.String()\n string_artifact_obj.uri = string_artifact.uri\n string_artifact_obj.read()\n return string_artifact_obj.value\n\n def _get_executions_by_pipeline_name(\n self, pipeline_name: str) -> List[metadata_store_pb2.Execution]:\n \"\"\"Helper function returns executions under a given pipeline name.\"\"\"\n # step 1: get context id by context name\n request = metadata_store_service_pb2.GetContextByTypeAndNameRequest(\n type_name='pipeline', context_name=pipeline_name)\n context_id = self._stub.GetContextByTypeAndName(request).context.id\n # step 2: get executions by context id\n request = metadata_store_service_pb2.GetExecutionsByContextRequest(\n context_id=context_id)\n return self._stub.GetExecutionsByContext(request).executions\n\n def _get_executions_by_pipeline_name_and_state(\n self, pipeline_name: str, state: metadata_store_pb2.Execution.State\n ) -> List[metadata_store_pb2.Execution]:\n \"\"\"Helper function returns executions for a given state.\"\"\"\n executions = self._get_executions_by_pipeline_name(pipeline_name)\n result = []\n for e in executions:\n if e.last_known_state == state:\n result.append(e)\n\n return result\n\n def _assert_infra_validator_passed(self, pipeline_name: str):\n artifacts = self._get_artifacts_with_type_and_pipeline(\n type_name='InfraBlessing', pipeline_name=pipeline_name)\n self.assertGreaterEqual(len(artifacts), 1)\n for artifact in artifacts:\n blessed = os.path.join(artifact.uri, 'INFRA_BLESSED')\n self.assertTrue(\n fileio.exists(blessed),\n 'Expected InfraBlessing results cannot be found under path %s for '\n 'artifact %s' % (blessed, artifact))\n\n def testSimpleEnd2EndPipeline(self):\n \"\"\"End-to-End test for simple pipeline.\"\"\"\n pipeline_name = 'kubeflow-e2e-test-{}'.format(test_utils.random_id())\n # Test data is copied from the repository(tfx/components/testdata/) to an\n # ephemeral location in GCS bucket(BaseKubeflowTest._BUCKET_NAME).\n # See kubeflow_test_utils.BaseKubeflowTest.setUp() for the detail.\n components = kubeflow_test_utils.create_e2e_components(\n self._pipeline_root(pipeline_name),\n self._data_root,\n self._transform_module,\n self._trainer_module,\n )\n pipeline = self._create_pipeline(pipeline_name, components)\n\n self._compile_and_run_pipeline(pipeline)\n self._assert_infra_validator_passed(pipeline_name)\n\n def testPrimitiveEnd2EndPipeline(self):\n \"\"\"End-to-End test for primitive artifacts passing.\"\"\"\n pipeline_name = 'kubeflow-primitive-e2e-test-{}'.format(\n test_utils.random_id())\n components = kubeflow_test_utils.create_primitive_type_components(\n pipeline_name)\n # Test that the pipeline can be executed successfully.\n pipeline = self._create_pipeline(pipeline_name, components)\n self._compile_and_run_pipeline(\n pipeline=pipeline, workflow_name=pipeline_name + '-run-1')\n # Test if the correct value has been passed.\n str_artifacts = self._get_artifacts_with_type_and_pipeline(\n type_name='String', pipeline_name=pipeline_name)\n # There should be exactly one string artifact.\n self.assertEqual(1, len(str_artifacts))\n self.assertEqual(\n self._get_value_of_string_artifact(str_artifacts[0]),\n 'hello %s\\n' % pipeline_name)\n # Test caching.\n self._compile_and_run_pipeline(\n pipeline=pipeline, workflow_name=pipeline_name + '-run-2')\n cached_execution = self._get_executions_by_pipeline_name_and_state(\n pipeline_name=pipeline_name,\n state=metadata_store_pb2.Execution.State.CACHED)\n self.assertEqual(2, len(cached_execution))\n\n def testCreateContainerComponentEnd2EndPipeline(self):\n \"\"\"End-to-End test for container components.\"\"\"\n pipeline_name = 'kubeflow-container-e2e-test-{}'.format(\n test_utils.random_id())\n text_url = (\n 'https://storage.googleapis.com/ml-pipeline-playground/hamlet.txt')\n pattern = 'art thou'\n component_instances = download_grep_print_pipeline.create_pipeline_component_instances(\n text_url=text_url,\n pattern=pattern,\n )\n # Test that the pipeline can be executed successfully.\n pipeline = self._create_pipeline(pipeline_name, component_instances)\n self._compile_and_run_pipeline(\n pipeline=pipeline, workflow_name=pipeline_name)\n # Test if the correct value has been passed.\n artifacts = self._get_artifacts_with_type_and_pipeline(\n type_name='ExternalArtifact', pipeline_name=pipeline_name)\n # There should be exactly two artifacts.\n self.assertEqual(len(artifacts), 2)\n for artifact in artifacts:\n # TODO(b/150515270) Remove the '/data' suffix when b/150515270 is fixed.\n artifact_value = fileio.open(artifact.uri + '/data', 'r').read()\n self.assertGreater(len(artifact_value), 100)\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.INFO)\n tf.test.main()\n",
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.types.artifact_utils.\"\"\"\n\nimport json\nfrom typing import Dict, List\n\nimport tensorflow as tf\nfrom tfx.dsl.placeholder import placeholder\nfrom tfx.proto import example_gen_pb2\nfrom tfx.types import artifact\nfrom tfx.types import channel\nfrom tfx.types import component_spec\nfrom tfx.types.component_spec import ChannelParameter\nfrom tfx.types.component_spec import ComponentSpec\nfrom tfx.types.component_spec import ExecutionParameter\nfrom tfx.types.standard_artifacts import Examples\nfrom tfx.utils import proto_utils\n\nfrom google.protobuf import json_format\nfrom google.protobuf import text_format\n\n\nclass _InputArtifact(artifact.Artifact):\n TYPE_NAME = 'InputArtifact'\n\n\nclass _OutputArtifact(artifact.Artifact):\n TYPE_NAME = 'OutputArtifact'\n\n\nclass _X(artifact.Artifact):\n TYPE_NAME = 'X'\n\n\nclass _Z(artifact.Artifact):\n TYPE_NAME = 'Z'\n\n\nclass _BasicComponentSpec(ComponentSpec):\n\n PARAMETERS = {\n 'folds': ExecutionParameter(type=int),\n 'proto': ExecutionParameter(type=example_gen_pb2.Input, optional=True),\n }\n INPUTS = {\n 'input': ChannelParameter(type=_InputArtifact),\n }\n OUTPUTS = {\n 'output': ChannelParameter(type=_OutputArtifact),\n }\n\n\nclass ComponentSpecTest(tf.test.TestCase):\n # pylint: disable=unused-variable\n\n def testComponentSpec_Empty(self):\n\n class EmptyComponentSpec(ComponentSpec):\n PARAMETERS = {}\n INPUTS = {}\n OUTPUTS = {}\n\n EmptyComponentSpec()\n\n def testComponentSpec_Basic(self):\n proto = example_gen_pb2.Input()\n proto.splits.extend([\n example_gen_pb2.Input.Split(name='name1', pattern='pattern1'),\n example_gen_pb2.Input.Split(name='name2', pattern='pattern2'),\n example_gen_pb2.Input.Split(name='name3', pattern='pattern3'),\n ])\n input_channel = channel.Channel(type=_InputArtifact)\n output_channel = channel.Channel(type=_OutputArtifact)\n spec = _BasicComponentSpec(\n folds=10, proto=proto, input=input_channel, output=output_channel)\n # Verify proto property.\n self.assertIsInstance(spec.exec_properties['proto'], str)\n decoded_proto = json.loads(spec.exec_properties['proto'])\n self.assertCountEqual(['splits'], decoded_proto.keys())\n self.assertLen(decoded_proto['splits'], 3)\n self.assertCountEqual(['name1', 'name2', 'name3'],\n list(s['name'] for s in decoded_proto['splits']))\n self.assertCountEqual(['pattern1', 'pattern2', 'pattern3'],\n list(s['pattern'] for s in decoded_proto['splits']))\n\n # Verify other properties.\n self.assertEqual(10, spec.exec_properties['folds'])\n self.assertIs(spec.inputs['input'], input_channel)\n self.assertIs(spec.outputs['output'], output_channel)\n\n with self.assertRaisesRegex(\n TypeError,\n \"Expected type <(class|type) 'int'> for parameter u?'folds' but got \"\n 'string.'):\n spec = _BasicComponentSpec(\n folds='string', input=input_channel, output=output_channel)\n\n with self.assertRaisesRegex(\n TypeError,\n '.*should be a Channel of .*InputArtifact.*got (.|\\\\s)*Examples.*'):\n spec = _BasicComponentSpec(\n folds=10, input=channel.Channel(type=Examples), output=output_channel)\n\n with self.assertRaisesRegex(\n TypeError,\n '.*should be a Channel of .*OutputArtifact.*got (.|\\\\s)*Examples.*'):\n spec = _BasicComponentSpec(\n folds=10, input=input_channel, output=channel.Channel(type=Examples))\n\n def testComponentSpec_JsonProto(self):\n proto_str = '{\"splits\": [{\"name\": \"name1\", \"pattern\": \"pattern1\"}]}'\n spec = _BasicComponentSpec(\n folds=10,\n proto=proto_str,\n input=channel.Channel(type=_InputArtifact),\n output=channel.Channel(type=_OutputArtifact))\n self.assertIsInstance(spec.exec_properties['proto'], str)\n self.assertEqual(spec.exec_properties['proto'], proto_str)\n\n def testComponentSpec_WithUnionChannel(self):\n input_channel_1 = channel.Channel(type=_InputArtifact)\n input_channel_2 = channel.Channel(type=_InputArtifact)\n output_channel = channel.Channel(type=_OutputArtifact)\n spec = _BasicComponentSpec(\n folds=10,\n input=channel.union([input_channel_1, input_channel_2]),\n output=output_channel)\n\n # Verify properties.\n self.assertEqual(10, spec.exec_properties['folds'])\n self.assertEqual(spec.inputs['input'].type, _InputArtifact)\n self.assertEqual(spec.inputs['input'].channels,\n [input_channel_1, input_channel_2])\n self.assertIs(spec.outputs['output'], output_channel)\n\n def testInvalidComponentSpec_MissingProperties(self):\n\n with self.assertRaisesRegex(TypeError, 'does not have PARAMETERS'):\n\n class InvalidComponentSpecA(ComponentSpec):\n # Missing PARAMETERS.\n INPUTS = {}\n OUTPUTS = {}\n\n with self.assertRaisesRegex(TypeError, 'does not have INPUTS'):\n\n class InvalidComponentSpecB(ComponentSpec):\n PARAMETERS = {}\n # Missing INPUTS.\n OUTPUTS = {}\n\n with self.assertRaisesRegex(TypeError, 'does not have OUTPUTS'):\n\n class InvalidComponentSpecC(ComponentSpec):\n PARAMETERS = {}\n INPUTS = {}\n # Missing OUTPUTS.\n\n def testInvalidComponentSpec_WrongProperties(self):\n\n with self.assertRaisesRegex(TypeError, 'PARAMETERS should be a dict'):\n\n class InvalidComponentSpecA(ComponentSpec):\n PARAMETERS = object()\n INPUTS = {}\n OUTPUTS = {}\n\n with self.assertRaisesRegex(TypeError, 'INPUTS should be a dict'):\n\n class InvalidComponentSpecB(ComponentSpec):\n PARAMETERS = {}\n INPUTS = object()\n OUTPUTS = {}\n\n with self.assertRaisesRegex(TypeError, 'OUTPUTS should be a dict'):\n\n class InvalidComponentSpecC(ComponentSpec):\n PARAMETERS = {}\n INPUTS = {}\n OUTPUTS = object()\n\n def testInvalidComponentSpec_WrongType(self):\n\n with self.assertRaisesRegex(TypeError,\n 'expects values of type ExecutionParameter'):\n\n class WrongTypeComponentSpecA(ComponentSpec):\n PARAMETERS = {'x': object()}\n INPUTS = {}\n OUTPUTS = {}\n\n with self.assertRaisesRegex(TypeError,\n 'expects values of type ExecutionParameter'):\n\n class WrongTypeComponentSpecB(ComponentSpec):\n PARAMETERS = {'x': ChannelParameter(type=_X)}\n INPUTS = {}\n OUTPUTS = {}\n\n with self.assertRaisesRegex(TypeError,\n 'expect values of type ChannelParameter'):\n\n class WrongTypeComponentSpecC(ComponentSpec):\n PARAMETERS = {}\n INPUTS = {'x': ExecutionParameter(type=int)}\n OUTPUTS = {}\n\n with self.assertRaisesRegex(TypeError,\n 'expect values of type ChannelParameter'):\n\n class WrongTypeComponentSpecD(ComponentSpec):\n PARAMETERS = {}\n INPUTS = {}\n OUTPUTS = {'x': ExecutionParameter(type=int)}\n\n def testInvalidComponentSpec_DuplicateProperty(self):\n\n with self.assertRaisesRegex(ValueError, 'has a duplicate argument'):\n\n class DuplicatePropertyComponentSpec(ComponentSpec):\n PARAMETERS = {'x': ExecutionParameter(type=int)}\n INPUTS = {'x': ChannelParameter(type=_X)}\n OUTPUTS = {}\n\n def testComponentSpec_MissingArguments(self):\n\n class SimpleComponentSpec(ComponentSpec):\n PARAMETERS = {\n 'x': ExecutionParameter(type=int),\n 'y': ExecutionParameter(type=int, optional=True),\n }\n INPUTS = {'z': ChannelParameter(type=_Z)}\n OUTPUTS = {}\n\n with self.assertRaisesRegex(ValueError, 'Missing argument'):\n SimpleComponentSpec(x=10)\n\n with self.assertRaisesRegex(ValueError, 'Missing argument'):\n SimpleComponentSpec(z=channel.Channel(type=_Z))\n\n # Okay since y is optional.\n SimpleComponentSpec(x=10, z=channel.Channel(type=_Z))\n\n def testOptionalInputs(self):\n\n class SpecWithOptionalInput(ComponentSpec):\n PARAMETERS = {}\n INPUTS = {'x': ChannelParameter(type=_Z, optional=True)}\n OUTPUTS = {}\n\n optional_not_specified = SpecWithOptionalInput()\n self.assertNotIn('x', optional_not_specified.inputs.keys())\n self.assertTrue(optional_not_specified.is_optional_input('x'))\n optional_specified = SpecWithOptionalInput(x=channel.Channel(type=_Z))\n self.assertIn('x', optional_specified.inputs.keys())\n\n def testOptionalOutputs(self):\n\n class SpecWithOptionalOutput(ComponentSpec):\n PARAMETERS = {}\n INPUTS = {}\n OUTPUTS = {'x': ChannelParameter(type=_Z, optional=True)}\n\n optional_not_specified = SpecWithOptionalOutput()\n self.assertNotIn('x', optional_not_specified.outputs.keys())\n self.assertTrue(optional_not_specified.is_optional_output('x'))\n optional_specified = SpecWithOptionalOutput(x=channel.Channel(type=_Z))\n self.assertIn('x', optional_specified.outputs.keys())\n\n def testChannelParameterType(self):\n arg_name = 'foo'\n\n class _FooArtifact(artifact.Artifact):\n TYPE_NAME = 'FooArtifact'\n\n class _BarArtifact(artifact.Artifact):\n TYPE_NAME = 'BarArtifact'\n\n channel_parameter = ChannelParameter(type=_FooArtifact)\n # Following should pass.\n channel_parameter.type_check(arg_name, channel.Channel(type=_FooArtifact))\n\n with self.assertRaisesRegex(TypeError, arg_name):\n channel_parameter.type_check(arg_name, 42) # Wrong value.\n\n with self.assertRaisesRegex(TypeError, arg_name):\n channel_parameter.type_check(arg_name, channel.Channel(type=_BarArtifact))\n\n setattr(_FooArtifact, component_spec.COMPATIBLE_TYPES_KEY, {_BarArtifact})\n channel_parameter.type_check(arg_name, channel.Channel(type=_BarArtifact))\n\n def testExecutionParameterTypeCheck(self):\n int_parameter = ExecutionParameter(type=int)\n int_parameter.type_check('int_parameter', 8)\n with self.assertRaisesRegex(TypeError, \"Expected type <(class|type) 'int'>\"\n \" for parameter u?'int_parameter'\"):\n int_parameter.type_check('int_parameter', 'string')\n\n list_parameter = ExecutionParameter(type=List[int])\n list_parameter.type_check('list_parameter', [])\n list_parameter.type_check('list_parameter', [42])\n with self.assertRaisesRegex(TypeError, 'Expecting a list for parameter'):\n list_parameter.type_check('list_parameter', 42)\n\n with self.assertRaisesRegex(TypeError, \"Expecting item type <(class|type) \"\n \"'int'> for parameter u?'list_parameter'\"):\n list_parameter.type_check('list_parameter', [42, 'wrong item'])\n\n dict_parameter = ExecutionParameter(type=Dict[str, int])\n dict_parameter.type_check('dict_parameter', {})\n dict_parameter.type_check('dict_parameter', {'key1': 1, 'key2': 2})\n with self.assertRaisesRegex(TypeError, 'Expecting a dict for parameter'):\n dict_parameter.type_check('dict_parameter', 'simple string')\n\n with self.assertRaisesRegex(TypeError, \"Expecting value type \"\n \"<(class|type) 'int'>\"):\n dict_parameter.type_check('dict_parameter', {'key1': '1'})\n\n proto_parameter = ExecutionParameter(type=example_gen_pb2.Input)\n proto_parameter.type_check('proto_parameter', example_gen_pb2.Input())\n proto_parameter.type_check(\n 'proto_parameter', proto_utils.proto_to_json(example_gen_pb2.Input()))\n proto_parameter.type_check('proto_parameter',\n {'splits': [{\n 'name': 'hello'\n }]})\n proto_parameter.type_check('proto_parameter', {'wrong_field': 42})\n with self.assertRaisesRegex(\n TypeError, \"Expected type <class 'tfx.proto.example_gen_pb2.Input'>\"):\n proto_parameter.type_check('proto_parameter', 42)\n with self.assertRaises(json_format.ParseError):\n proto_parameter.type_check('proto_parameter', {'splits': 42})\n\n output_channel = channel.Channel(type=_OutputArtifact)\n\n placeholder_parameter = ExecutionParameter(type=str)\n placeholder_parameter.type_check(\n 'wrapped_channel_placeholder_parameter',\n output_channel.future()[0].value)\n placeholder_parameter.type_check(\n 'placeholder_parameter',\n placeholder.runtime_info('platform_config').base_dir)\n with self.assertRaisesRegex(\n TypeError, 'Only simple RuntimeInfoPlaceholders are supported'):\n placeholder_parameter.type_check(\n 'placeholder_parameter',\n placeholder.runtime_info('platform_config').base_dir +\n placeholder.exec_property('version'))\n\n def testExecutionParameterUseProto(self):\n\n class SpecWithNonPrimitiveTypes(ComponentSpec):\n PARAMETERS = {\n 'config_proto':\n ExecutionParameter(type=example_gen_pb2.Input, use_proto=True),\n 'boolean':\n ExecutionParameter(type=bool, use_proto=True),\n 'list_config_proto':\n ExecutionParameter(\n type=List[example_gen_pb2.Input], use_proto=True),\n 'list_boolean':\n ExecutionParameter(type=List[bool], use_proto=True),\n }\n INPUTS = {\n 'input': ChannelParameter(type=_InputArtifact),\n }\n OUTPUTS = {\n 'output': ChannelParameter(type=_OutputArtifact),\n }\n\n spec = SpecWithNonPrimitiveTypes(\n config_proto='{\"splits\": [{\"name\": \"name\", \"pattern\": \"pattern\"}]}',\n boolean=True,\n list_config_proto=[\n example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(\n name='trainer', pattern='train.data')\n ]),\n example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(name='eval', pattern='*eval.data')\n ])\n ],\n list_boolean=[False, True],\n input=channel.Channel(type=_InputArtifact),\n output=channel.Channel(type=_OutputArtifact))\n\n # Verify exec_properties store parsed value when use_proto set to True.\n expected_proto = text_format.Parse(\n \"\"\"\n splits {\n name: \"name\"\n pattern: \"pattern\"\n }\n \"\"\", example_gen_pb2.Input())\n self.assertProtoEquals(expected_proto, spec.exec_properties['config_proto'])\n self.assertEqual(True, spec.exec_properties['boolean'])\n self.assertIsInstance(spec.exec_properties['list_config_proto'], list)\n self.assertEqual(spec.exec_properties['list_boolean'], [False, True])\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.tuner.component.\"\"\"\n\nimport tensorflow as tf\nfrom tfx.extensions.google_cloud_ai_platform.tuner import component\nfrom tfx.proto import trainer_pb2\nfrom tfx.proto import tuner_pb2\nfrom tfx.types import channel_utils\nfrom tfx.types import standard_artifacts\n\n\nclass TunerTest(tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n self.examples = channel_utils.as_channel([standard_artifacts.Examples()])\n self.schema = channel_utils.as_channel([standard_artifacts.Schema()])\n self.transform_graph = channel_utils.as_channel(\n [standard_artifacts.TransformGraph()])\n self.train_args = trainer_pb2.TrainArgs(num_steps=100)\n self.eval_args = trainer_pb2.EvalArgs(num_steps=50)\n self.tune_args = tuner_pb2.TuneArgs(num_parallel_trials=3)\n self.custom_config = {'key': 'value'}\n\n def _verify_output(self, tuner):\n self.assertEqual(standard_artifacts.HyperParameters.TYPE_NAME,\n tuner.outputs['best_hyperparameters'].type_name)\n\n def testConstructWithCustomConfig(self):\n tuner = component.Tuner(\n examples=self.examples,\n schema=self.schema,\n train_args=self.train_args,\n eval_args=self.eval_args,\n tune_args=self.tune_args,\n module_file='/path/to/module/file',\n custom_config=self.custom_config,\n )\n self._verify_output(tuner)\n\n def testConstructWithoutCustomConfig(self):\n tuner = component.Tuner(\n examples=self.examples,\n schema=self.schema,\n train_args=self.train_args,\n eval_args=self.eval_args,\n tune_args=self.tune_args,\n module_file='/path/to/module/file',\n )\n self._verify_output(tuner)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for Cloud AI Platform Tuner Executor.\"\"\"\n\nimport copy\nimport os\nfrom typing import Any, Dict\nfrom unittest import mock\n\nimport tensorflow as tf\nfrom tfx.extensions.google_cloud_ai_platform import constants\nfrom tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor\nfrom tfx.extensions.google_cloud_ai_platform.tuner import executor as ai_platform_tuner_executor\nfrom tfx.proto import tuner_pb2\nfrom tfx.types import standard_component_specs\nfrom tfx.utils import json_utils\nfrom tfx.utils import proto_utils\n\n\nclass ExecutorTest(tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self._output_data_dir = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self._testMethodName)\n self._job_dir = os.path.join(self._output_data_dir, 'jobDir')\n self._project_id = '12345'\n self._job_id = 'fake_job_id'\n self._inputs = {}\n self._outputs = {}\n # Dict format of exec_properties. custom_config needs to be serialized\n # before being passed into Do function.\n self._exec_properties = {\n 'custom_config': {\n ai_platform_trainer_executor.JOB_ID_KEY: self._job_id,\n ai_platform_tuner_executor.TUNING_ARGS_KEY: {\n 'project': self._project_id,\n 'jobDir': self._job_dir,\n },\n },\n }\n self._executor_class_path = '%s.%s' % (\n ai_platform_tuner_executor._WorkerExecutor.__module__,\n ai_platform_tuner_executor._WorkerExecutor.__name__)\n\n self.addCleanup(mock.patch.stopall)\n self.mock_runner = mock.patch(\n 'tfx.extensions.google_cloud_ai_platform.tuner.executor.runner').start(\n )\n\n def _serialize_custom_config_under_test(self) -> Dict[str, Any]:\n \"\"\"Converts self._exec_properties['custom_config'] to string.\"\"\"\n result = copy.deepcopy(self._exec_properties)\n result['custom_config'] = json_utils.dumps(result['custom_config'])\n return result\n\n def testDo(self):\n executor = ai_platform_tuner_executor.Executor()\n executor.Do(self._inputs, self._outputs,\n self._serialize_custom_config_under_test())\n\n def testDoWithTuneArgs(self):\n executor = ai_platform_tuner_executor.Executor()\n self._exec_properties['tune_args'] = proto_utils.proto_to_json(\n tuner_pb2.TuneArgs(num_parallel_trials=3))\n\n executor.Do(self._inputs, self._outputs,\n self._serialize_custom_config_under_test())\n\n self.mock_runner.start_cloud_training.assert_called_with(\n self._inputs, self._outputs, self._serialize_custom_config_under_test(),\n self._executor_class_path, {\n 'project': self._project_id,\n 'jobDir': self._job_dir,\n 'scaleTier': 'CUSTOM',\n 'masterType': 'standard',\n 'workerType': 'standard',\n 'workerCount': 2,\n }, self._job_id, False, None)\n\n def testDoWithTuneArgsAndTrainingInputOverride(self):\n executor = ai_platform_tuner_executor.Executor()\n self._exec_properties['tune_args'] = proto_utils.proto_to_json(\n tuner_pb2.TuneArgs(num_parallel_trials=6))\n\n self._exec_properties['custom_config'][\n ai_platform_tuner_executor.TUNING_ARGS_KEY].update({\n 'scaleTier': 'CUSTOM',\n 'masterType': 'n1-highmem-16',\n 'workerType': 'n1-highmem-16',\n 'workerCount': 2,\n })\n\n executor.Do(self._inputs, self._outputs,\n self._serialize_custom_config_under_test())\n\n self.mock_runner.start_cloud_training.assert_called_with(\n self._inputs,\n self._outputs,\n self._serialize_custom_config_under_test(),\n self._executor_class_path,\n {\n 'project': self._project_id,\n 'jobDir': self._job_dir,\n # Confirm scale tier and machine types are not overritten.\n 'scaleTier': 'CUSTOM',\n 'masterType': 'n1-highmem-16',\n 'workerType': 'n1-highmem-16',\n # Confirm workerCount has been adjusted to num_parallel_trials.\n 'workerCount': 5,\n },\n self._job_id, False, None)\n\n def testDoWithoutCustomCaipTuneArgs(self):\n executor = ai_platform_tuner_executor.Executor()\n self._exec_properties = {'custom_config': {}}\n with self.assertRaises(ValueError):\n executor.Do(self._inputs, self._outputs,\n self._serialize_custom_config_under_test())\n\n def testDoWithEnableVertexOverride(self):\n executor = ai_platform_tuner_executor.Executor()\n enable_vertex = True\n vertex_region = 'us-central2'\n self._exec_properties[standard_component_specs.CUSTOM_CONFIG_KEY][\n constants.ENABLE_VERTEX_KEY] = enable_vertex\n self._exec_properties[standard_component_specs.CUSTOM_CONFIG_KEY][\n constants.VERTEX_REGION_KEY] = vertex_region\n executor.Do(self._inputs, self._outputs,\n self._serialize_custom_config_under_test())\n self.mock_runner.start_cloud_training.assert_called_with(\n self._inputs, self._outputs, self._serialize_custom_config_under_test(),\n self._executor_class_path, {\n 'project': self._project_id,\n 'jobDir': self._job_dir,\n }, self._job_id, enable_vertex, vertex_region)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for third_party.tfx.components.trainer.rewriting.tfjs_rewriter.\"\"\"\n\nimport unittest\nfrom unittest import mock\n\nimport tensorflow as tf\n\nfrom tfx.components.trainer.rewriting import rewriter\n\n\ntry:\n from tfx.components.trainer.rewriting import tfjs_rewriter # pylint: disable=g-import-not-at-top\nexcept ImportError as err:\n tfjs_rewriter = None\n\n\[email protected](tf.__version__ < '2',\n 'TFJS requires TF2 which is not satisfied for TF1 environment,'\n ' thus skip any TFJS related tests.')\[email protected](tfjs_rewriter is None,\n 'Cannot import tfjs_rewriter. This can happen when tfjs is not'\n ' available.')\nclass TFJSRewriterTest(tf.test.TestCase):\n\n @mock.patch('tfx.components.trainer.rewriting.'\n 'tfjs_rewriter._convert_tfjs_model')\n def testInvokeTFJSRewriter(self, converter):\n src_model_path = '/path/to/src/model'\n dst_model_path = '/path/to/dst/model'\n\n src_model = rewriter.ModelDescription(rewriter.ModelType.SAVED_MODEL,\n src_model_path)\n dst_model = rewriter.ModelDescription(rewriter.ModelType.TFJS_MODEL,\n dst_model_path)\n\n tfrw = tfjs_rewriter.TFJSRewriter(name='myrw')\n tfrw.perform_rewrite(src_model, dst_model)\n\n converter.assert_called_once_with(src_model_path, dst_model_path)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.python.platform.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
parvex/residual-continual-learning-benchmark | [
"8eeb2e57ecf0711e075eb02e8ed06fc8e7b9f20d"
] | [
"dataloaders/wrapper.py"
] | [
"from os import path\nimport torch\nimport torch.utils.data as data\n\n\nclass CacheClassLabel(data.Dataset):\n \"\"\"\n A dataset wrapper that has a quick access to all labels of data.\n \"\"\"\n def __init__(self, dataset):\n super(CacheClassLabel, self).__init__()\n self.dataset = dataset\n self.labels = torch.LongTensor(len(dataset)).fill_(-1)\n label_cache_filename = path.join(dataset.root, dataset.__module__+'_'+str(len(dataset))+'.pth')\n if path.exists(label_cache_filename):\n self.labels = torch.load(label_cache_filename)\n else:\n for i, data in enumerate(dataset):\n self.labels[i] = data[1]\n torch.save(self.labels, label_cache_filename)\n self.number_classes = len(torch.unique(self.labels))\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img,target = self.dataset[index]\n return img, target\n\n\nclass AppendName(data.Dataset):\n \"\"\"\n A dataset wrapper that also return the name of the dataset/task\n \"\"\"\n def __init__(self, dataset, name, first_class_ind=0):\n super(AppendName,self).__init__()\n self.dataset = dataset\n self.name = name\n self.first_class_ind = first_class_ind # For remapping the class index\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img,target = self.dataset[index]\n target = target + self.first_class_ind\n return img, target, self.name\n\n\nclass Subclass(data.Dataset):\n \"\"\"\n A dataset wrapper that return the task name and remove the offset of labels (Let the labels start from 0)\n \"\"\"\n def __init__(self, dataset, class_list, remap=True):\n '''\n :param dataset: (CacheClassLabel)\n :param class_list: (list) A list of integers\n :param remap: (bool) Ex: remap class [2,4,6 ...] to [0,1,2 ...]\n '''\n super(Subclass,self).__init__()\n assert isinstance(dataset, CacheClassLabel), 'dataset must be wrapped by CacheClassLabel'\n self.dataset = dataset\n self.class_list = class_list\n self.remap = remap\n self.indices = []\n for c in class_list:\n self.indices.extend((dataset.labels==c).nonzero().flatten().tolist())\n if remap:\n self.class_mapping = {c: i for i, c in enumerate(class_list)}\n\n def __len__(self):\n return len(self.indices)\n\n def __getitem__(self, index):\n img,target = self.dataset[self.indices[index]]\n if self.remap:\n raw_target = target.item() if isinstance(target,torch.Tensor) else target\n target = self.class_mapping[raw_target]\n return img, target\n\n\nclass Permutation(data.Dataset):\n \"\"\"\n A dataset wrapper that permute the position of features\n \"\"\"\n def __init__(self, dataset, permute_idx):\n super(Permutation,self).__init__()\n self.dataset = dataset\n self.permute_idx = permute_idx\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img,target = self.dataset[index]\n shape = img.size()\n img = img.view(-1)[self.permute_idx].view(shape)\n return img, target\n\n\nclass Storage(data.Subset):\n\n def reduce(self, m):\n self.indices = self.indices[:m]\n"
] | [
[
"torch.unique",
"torch.save",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Davjes15/ardas_platform | [
"d962e7280ac4477fc3ee71280e90aeab58f74bd6"
] | [
"hs_logisticregression.py"
] | [
"# Implementation of Random Forest model to classify failures in a hydraulic process\n# Hydraulic system can be found : https://archive.ics.uci.edu/ml/datasets/Condition+monitoring+of+hydraulic+systems\n# The data set contains raw process sensor data (i.e. without feature extraction) which are structured as matrices (tab-delimited) \n# with rows representing the cycles and the columns the data points within a cycle. \n# The sensors involved are: \n\n# Sensor Physical quantity Unit Sampling rate\n# PS1 Pressure bar 100 Hz \n# PS2 Pressure bar 100 Hz \n# PS3 Pressure bar 100 Hz \n# PS4 Pressure bar 100 Hz \n# PS5 Pressure bar 100 Hz \n# PS6 Pressure bar 100 Hz \n# EPS1 Motor power W 100 Hz \n# FS1 Volume flow l/min 10 Hz \n# FS2 Volume flow l/min 10 Hz \n# TS1 Temperature °C 1 Hz \n# TS2 Temperature °C 1 Hz \n# TS3 Temperature °C 1 Hz \n# TS4 Temperature °C 1 Hz \n# VS1 Vibration mm/s 1 Hz \n# CE Cooling efficiency (virtual) % 1 Hz \n# CP Cooling power (virtual) kW 1 Hz \n# SE Efficiency factor % 1 Hz \n\n\n#**************** Python Version : Python 3.7.3 ************\n# Package Version\n# --------------- --------\n# aniso8601 7.0.0\n# certifi 2019.3.9\n# chardet 3.0.4\n# Click 7.0\n# configparser 3.7.4\n# cycler 0.10.0\n# databricks-cli 0.8.7\n# Flask 1.1.1\n# Flask-Cors 3.0.8\n# Flask-RESTful 0.3.7\n# idna 2.8\n# itsdangerous 1.1.0\n# Jinja2 2.10.1\n# joblib 0.13.2\n# jsonify 0.5\n# kiwisolver 1.1.0\n# MarkupSafe 1.1.1\n# matplotlib 3.1.1\n# numpy 1.17.0\n# pandas 0.25.1\n# pathlib 1.0.1\n# pip 19.2.3\n# pyparsing 2.4.2\n# python-dateutil 2.8.0\n# pytz 2019.1\n# requests 2.22.0\n# scikit-learn 0.21.3\n# scipy 1.3.1\n# seaborn 0.9.0\n# setuptools 40.8.0\n# six 1.12.0\n# sklearn 0.0\n# tabulate 0.8.3\n# urllib3 1.25.3\n# virtualenv 16.6.1\n# Werkzeug 0.15.4\n\n\n# Import libraries\n\nimport pandas as pd\nimport numpy as np\nimport os, sys\nimport pickle\nimport sklearn as sk\nfrom pathlib import Path\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\n# Define directories for reading and saving files\npath = os.path.abspath(os.path.dirname(sys.argv[0]))\npath_file= Path(path)\nml_model_dir = path_file / \"ml_model\"\nos.chdir(ml_model_dir) # os.chdir changes the directory so we can import the data from a different directory depending on the computer\nprint (os.getcwd())\n\n# Import feature extracted\n\ndf_features = pd.read_csv('feature_hs.csv')\nprint (df_features.head(10))\nprint (\"Features imported 100%\")\n# Import target conditions\nnames = ['cooler_condition', 'valve_condition', 'pump_leakage', 'hydraulic_accumulator', 'stable_flag']\nconditions = pd.read_csv('profile.txt',names = names, sep=\"\\t\")\nprint (conditions.head(10))\nprint (\"Target Conditions imported 100%\")\n\n\n# Define features\nX = df_features # features file\n\n\n# Save feature importance as csv file\ndef save_fi (data, path):\n df = pd.DataFrame(data,\n index = X_train.columns,\n columns=['Importance']).sort_values('Importance',ascending=False)\n data=df.T\n data = data.iloc[:,0:6]\n export_fi = data.to_csv (path, index = None, header=True)\n return (export_fi)\n\n# Trainnig a random forest algorithm \ndef train_lr (X_train, X_test, y_train, y_test, element):\n\t# Initialize model \n\tlr = LogisticRegression(multi_class = 'ovr', solver = 'liblinear')\n #Train the model on training data\n\tmodel_lr= lr.fit(X_train, y_train);\n\tprint (element + \" Model Training Ready\")\n\t# Use the LR's predict method on the test data\n\tpredictions = lr.predict(X_test)\n\tprint(element + ' Accuracy Condition: %.2f%%' % (accuracy_score(predictions,y_test)*100))\n\treturn (model_lr)\n\ndef save_model_object(model_object,model_name,model_params):\n file_name=model_name+\"_\"+str(model_params).replace('[',\"\").replace(']',\"\").replace(',',\"_\").replace(' ',\"_\")+\".obj\"\n with open(file_name,'wb') as handle:\n try:\n pickle.dump(model_object,handle)\n except:\n print(\"ERROR\")\n print(file_name,\" saved successfully\")\n# ---------------------------------------------------------------\n\n# Train model for cooler condition classification\nY = conditions[\"cooler_condition\"] # define target value\n# split the data into training and testing setssplit_data (X,Y)\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3, random_state = 42)\nlr_cooler = train_lr (X_train, X_test, y_train, y_test, \"Cooler\")\n# X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.30,random_state = 42)\n# rf = RandomForestClassifier(n_estimators= 1000, random_state=42)\n# rf.fit(X_train, y_train);\n# print ('Model training 100%')\n# predictions = rf.predict(X_test)\n# print('Accuracy Cooler Condition: %.2f%%' % (accuracy_score(predictions,y_test)*100))\n\n#Save machine learning model\nsave_model_object(lr_cooler,\"logistic_regression\",\"c\")\n\n# Create a dataframe with feature importance\n#fic_path = path_file / 'static' / 'hs_database'/ 'lr_feature_cooler.csv'\n#fi_c=rf_cooler.feature_importances_\n#save_fi (fi_c, fic_path)\n\n#-----------------------------------------------------------------\n\n# Train model for valve condition classification\nYval = conditions[\"valve_condition\"]\nX_train, X_test, y_train, y_test = train_test_split(X, Yval, test_size = 0.3, random_state = 42)\nlr_valve = train_lr (X_train, X_test, y_train, y_test, \"Valve\")\nsave_model_object(lr_valve,\"logistic_regression\",\"v\")\n#fiv_path = path_file / 'static' / 'hs_database'/ 'feature_valve.csv'\n#fi_v=rf_valve.feature_importances_\n#save_fi (fi_v, fiv_path)\n#-----------------------------------------------------------------\n\n# Train model for pump condition classification\nYpum = conditions[\"pump_leakage\"]\nX_train, X_test, y_train, y_test = train_test_split(X, Ypum, test_size = 0.3, random_state = 42)\nlr_pump = train_lr (X_train, X_test, y_train, y_test, \"Pump\")\nsave_model_object(lr_pump,\"logistic_regression\",\"p\")\n#fip_path = path_file / 'static' / 'hs_database'/ 'feature_pump.csv'\n#fi_p=rf_pump.feature_importances_\n#save_fi (fi_p, fip_path)\n\n#-----------------------------------------------------------------\n\n# Train model for accumulator condition classification\n\nYacc = conditions[\"hydraulic_accumulator\"]\nX_train, X_test, y_train, y_test = train_test_split(X, Yacc, test_size = 0.3, random_state = 42)\nlr_acc = train_lr (X_train, X_test, y_train, y_test, \"Accumulator\")\nsave_model_object(lr_acc ,\"logistic_regression\",\"a\")\n#fia_path = path_file / 'static' / 'hs_database'/ 'feature_acc.csv'\n#fi_a=rf_acc.feature_importances_\n#save_fi (fi_a, fia_path)\n"
] | [
[
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
NCIA-Diffusion/ScoreSDE | [
"b5a562908daf66e6dcf0b791beb83f1fcb61174b"
] | [
"run_lib.py"
] | [
"import os\nimport logging\nimport copy\nfrom tqdm import trange\nfrom datetime import datetime\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision.utils import save_image\n\nfrom utils import ema\nfrom lib.dataset import DataLooper \nfrom lib.sde import VPSDE\nfrom lib.model.ddpm import DDPM\nfrom lib.trainer import DiffusionTrainer\nfrom lib.sampler import DiffusionSampler\n\n\ndef train(config, logdir, resume=True):\n \"\"\"Running a training pipeline\"\"\"\n # Dataset setup\n datalooper = DataLooper(\n config,\n batch_size=config.train.batch_size,\n )\n\n # Model setup\n if config.model.name.lower() == 'ddpm':\n net_model = DDPM(\n config.dataset.ch,\n config.model.ch,\n config.model.ch_mult,\n config.model.attn,\n config.model.num_res_blocks,\n config.model.dropout,\n )\n else:\n raise ValueError\n\n ema_model = copy.deepcopy(net_model)\n\n if config.parallel:\n net_model = torch.nn.DataParallel(net_model)\n ema_model = torch.nn.DataParallel(ema_model)\n\n # SDE setup\n if config.sde.name == 'VPSDE':\n sde = VPSDE(\n config.sde.beta_min,\n config.sde.beta_max,\n config.sde.N,\n )\n else:\n raise ValueError\n\n # Trainer setup\n trainer = DiffusionTrainer(\n sde,\n net_model,\n config.model.pred_type,\n ).to(config.device)\n trainer.train()\n\n # Optimizer setup\n optim = torch.optim.Adam(\n net_model.parameters(),\n lr=config.train.lr,\n )\n warmup = config.train.warmup\n sched = torch.optim.lr_scheduler.LambdaLR(\n optim,\n lr_lambda=lambda step: min(step, warmup) / warmup,\n )\n\n # Sampler setup\n sampler = DiffusionSampler(\n sde,\n ema_model,\n config.model.pred_type,\n ).to(config.device)\n sampler.eval()\n \n # Log setup \n sample_dir = os.path.join(logdir, 'samples')\n os.makedirs(sample_dir, exist_ok=True)\n writer = SummaryWriter(logdir)\n\n # Show model size\n model_size = sum(p.numel() for p in net_model.parameters())\n logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')\n\n # Load checkpoint (if exists)\n try:\n assert resume\n ckpt = torch.load(os.path.join(logdir, f'ckpt_latest.pt'))\n net_model.load_state_dict(ckpt['net_model'])\n ema_model.load_state_dict(ckpt['ema_model'])\n optim.load_state_dict(ckpt['optimizer'])\n sched.load_state_dict(ckpt['scheduler'])\n init_step = ckpt['step'] + 1\n logging.info(f'Checkpoint loaded! Re-start from step {init_step}.')\n except:\n init_step = 0\n logging.info(f'No checkpoint found. Start from step {init_step}.')\n\n # Start training\n with trange(init_step, config.train.total_steps, dynamic_ncols=True) as pbar:\n for step in pbar:\n # Train\n optim.zero_grad()\n x_0 = next(datalooper)\n x_0 = x_0.to(config.device)\n loss = trainer(x_0)\n loss = loss.mean()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(\n net_model.parameters(),\n config.train.grad_clip,\n )\n optim.step()\n sched.step()\n ema(net_model, ema_model, config.train.ema_decay)\n\n # Log\n writer.add_scalar('loss', loss, step)\n pbar.set_postfix(loss=f'{loss:.3f}')\n\n # Sample\n if config.train.sample_step > 0 and step % config.train.sample_step == 0:\n xs = []\n total_steps = config.eval.sample_size // config.eval.batch_size\n for i in range(0, config.eval.sample_size, config.eval.batch_size):\n x_T = torch.randn_like(x_0)\n with torch.no_grad():\n x = sampler(\n x_T,\n pbar,\n corrector_n_steps=1,\n corrector_langevin_snr=0.16,\n )\n xs.append((x.detach().cpu() + 1.) / 2)\n pbar.set_postfix(option=f'({i+1}/{total_steps})')\n xs = torch.cat(xs, dim=0)\n save_image(\n xs[:64],\n os.path.join(sample_dir, f'sample_{step}.png'),\n nrow=8,\n )\n\n # Save\n if config.train.save_step > 0 and step % config.train.save_step == 0:\n ckpt = {\n 'net_model': net_model.state_dict(),\n 'ema_model': ema_model.state_dict(),\n 'optimizer': optim.state_dict(),\n 'scheduler': sched.state_dict(),\n 'step': step,\n }\n torch.save(ckpt, os.path.join(logdir, f'ckpt_latest.pt'))\n\n # Archive\n if config.train.archive_step > 0 and step % config.train.archive_step == 0:\n ckpt = {\n 'net_model': net_model.state_dict(),\n 'ema_model': ema_model.state_dict(),\n 'optimizer': optim.state_dict(),\n 'scheduler': sched.state_dict(),\n 'step': step,\n }\n torch.save(ckpt, os.path.join(logdir, f'ckpt_{step}.pt'))\n\n writer.close()\n\n\ndef eval(config, logdir):\n \"\"\"Running an evaluation pipeline\"\"\"\n # Datalooper setup\n eval_datalooper = DataLooper(\n config,\n batch_size=config.eval.batch_size,\n )\n sample_size = config.eval.sample_size\n batch_size = config.eval.batch_size\n\n # Model setup\n if config.model.name.lower() == 'ddpm':\n model = DDPM(\n config.dataset.ch,\n config.model.ch,\n config.model.ch_mult,\n config.model.attn,\n config.model.num_res_blocks,\n config.model.dropout,\n )\n else:\n raise ValueError\n\n if config.parallel:\n model = torch.nn.DataParallel(model)\n \n # SDE setup\n if config.sde.name == 'VPSDE':\n sde = VPSDE(\n config.sde.beta_min,\n config.sde.beta_max,\n config.sde.N,\n )\n else:\n raise ValueError\n\n # Sampler setup\n sampler = DiffusionSampler(\n sde,\n model,\n config.model.pred_type,\n ).to(config.device)\n sampler.eval()\n\n # Show model size\n model_size = sum(p.numel() for p in model.parameters())\n logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')\n\n # Load checkpoint\n ckpt = torch.load(\n os.path.join(logdir, f'ckpt_latest.pt'),\n map_location=config.device\n )\n logging.info(f'Checkpoint step : {ckpt[\"step\"]}')\n model.load_state_dict(ckpt['ema_model'])\n\n # Directory setup\n eval_dir = os.path.join(logdir, 'eval')\n sample_dir = os.path.join(eval_dir, 'samples')\n os.makedirs(eval_dir, exist_ok=True)\n os.makedirs(sample_dir, exist_ok=True)\n\n xs = []\n x_0 = next(eval_datalooper).to(config.device)\n with trange(0, sample_size, batch_size, dynamic_ncols=True) as pbar:\n for _ in pbar:\n x_T = torch.randn_like(x_0)\n with torch.no_grad():\n x = sampler(\n x_T,\n pbar,\n corrector_n_steps=3,\n corrector_langevin_snr=0.16,\n )\n xs.append((x.detach().cpu() + 1.) / 2)\n xs = torch.cat(xs, dim=0)\n now = datetime.now()\n save_image(\n xs[:64],\n os.path.join(sample_dir, f'samples_{now}.png'),\n nrow=8,\n )"
] | [
[
"torch.randn_like",
"torch.cat",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joshfp/fastai | [
"794365cd7f734b5e1027d7e19c99e648fbb9a12b"
] | [
"tests/test_core.py"
] | [
"import pytest, torch\nimport numpy as np\nfrom fastai import *\nfrom tempfile import TemporaryDirectory\n\ndef test_cpus(): assert num_cpus() >= 1\n\[email protected](\"p, q, expected\", [\n (5 , 1 , [5]),\n (5 , [1,1], [5, 5]),\n ([5], 1 , [5]),\n ([5], [1,1], [5, 5]),\n (\"ab\" , \"cd\" , [\"a\", \"b\"]),\n (\"ab\" , [\"cd\", \"ef\"], [\"a\", \"b\"]),\n ([\"ab\"], \"cd\" , [\"ab\", \"ab\"]),\n ([\"ab\"], [\"cd\", \"ef\"], [\"ab\", \"ab\"]),\n])\ndef test_listify(p, q, expected):\n assert listify(p, q) == expected\n\ndef test_ifnone():\n assert ifnone(None, 5) == 5\n assert ifnone(5, None) == 5\n assert ifnone(1, 5) == 1\n assert ifnone(0, 5) == 0\n\ndef test_uniqueify():\n assert uniqueify([1,1,3,3,5]) == [1,3,5]\n assert uniqueify([1,3,5]) == [1,3,5]\n assert uniqueify([1,1,1,3,5]) == [1,3,5]\n\ndef test_listy():\n assert is_listy([1,1,3,3,5]) == True\n assert is_listy((1,1,3,3,5)) == True\n assert is_listy([1,\"2\",3,3,5]) == True\n assert is_listy((1,\"2\",3,3,5)) == True\n assert is_listy(1) == False\n assert is_listy(\"2\") == False\n assert is_listy({1, 2}) == False\n assert is_listy(set([1,1,3,3,5])) == False\n\ndef test_tuple():\n assert is_tuple((1,1,3,3,5)) == True\n assert is_tuple([1]) == False\n assert is_tuple(1) == False\n\ndef test_noop():\n assert noop(1) is 1\n\ndef test_to_int():\n assert to_int((\"1\",\"1\",\"3\",\"3\",\"5\")) == [1,1,3,3,5]\n assert to_int([1,\"2\",3.3,3,5]) == [1,2,3,3,5]\n assert to_int(1) == 1\n assert to_int(1.2) == 1\n assert to_int(\"1\") == 1\n\ndef test_partition_functionality():\n\n def test_partition(a, sz, ex):\n result = partition(a, sz)\n assert len(result) == len(ex)\n assert all([a == b for a, b in zip(result, ex)])\n\n a = [1,2,3,4,5]\n\n sz = 2\n ex = [[1,2],[3,4],[5]]\n test_partition(a, sz, ex)\n\n sz = 3\n ex = [[1,2,3],[4,5]]\n test_partition(a, sz, ex)\n\n sz = 1\n ex = [[1],[2],[3],[4],[5]]\n test_partition(a, sz, ex)\n\n sz = 6\n ex = [[1,2,3,4,5]]\n test_partition(a, sz, ex)\n\n sz = 3\n a = []\n result = partition(a, sz)\n assert len(result) == 0\n\ndef test_idx_dict():\n assert idx_dict(np.array([1,2,3]))=={1: 0, 2: 1, 3: 2}\n assert idx_dict([1, 2, 3])=={1: 0, 2: 1, 3: 2}\n assert idx_dict((1, 2, 3))=={1: 0, 2: 1, 3: 2}\n\ndef test_find_classes():\n path = Path('./classes_test').resolve()\n os.mkdir(path)\n classes = ['class_0', 'class_1', 'class_2']\n for class_num in classes:\n os.mkdir(path/class_num)\n try:\n assert find_classes(path)==[Path('./classes_test/class_0').resolve(),Path('./classes_test/class_1').resolve(),Path('./classes_test/class_2').resolve()]\n finally:\n shutil.rmtree(path)\n\ndef test_arrays_split():\n a = arrays_split([0,3],[1, 2, 3, 4, 5], ['a', 'b', 'c', 'd', 'e'])\n b = [(array([1, 4]),array(['a', 'd'])), (array([5, 2]),(array(['e','b'])))]\n np.testing.assert_array_equal(a,b)\n\n c = arrays_split([0,3],[1, 2, 3, 4, 5])\n d = [(array([1, 4]),), (array([5, 2]),)]\n np.testing.assert_array_equal(c,d)\n\n with pytest.raises(Exception): arrays_split([0,5],[1, 2, 3, 4, 5])\n with pytest.raises(Exception): arrays_split([0,3],[1, 2, 3, 4, 5], [1, 2, 3, 4])\n\ndef test_random_split():\n valid_pct = 0.4\n a = [len(arr) for arr in random_split(valid_pct, [1,2,3,4,5], ['a', 'b', 'c', 'd', 'e'])]\n b = [2, 2]\n assert a == b\n\n with pytest.raises(Exception): random_split(1.1, [1,2,3])\n with pytest.raises(Exception): random_split(0.1, [1,2,3], [1,2,3,4])\n\ndef test_camel2snake():\n a = camel2snake('someString')\n b = 'some_string'\n assert a == b\n\n c = camel2snake('some2String')\n d = 'some2_string'\n assert c == d\n\n e = camel2snake('longStringExmpl')\n f = 'long_string_exmpl'\n assert e == f\n\ndef test_even_mults():\n a = even_mults(start=1, stop=8, n=4)\n b = array([1.,2.,4.,8.])\n np.testing.assert_array_equal(a,b)\n\ndef test_series2cat():\n df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], 'col3':[5, 6]})\n cols = 'col1','col2'\n series2cat(df,*cols)\n for col in cols:\n assert (df[col].dtypes == 'category')\n assert (df['col3'].dtypes == 'int64')\n\ndef _write_file(path): f = open(path, 'w'); f.write(str(path.name)); f.close()\nclass TestMaybeCopy(object):\n def test_copies_if_does_not_exist(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'src')\n maybe_copy([str(tmpdir/'src')], [str(tmpdir/'dst')]) # works with strings\n assert os.path.exists(tmpdir/'dst')\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'src')\n maybe_copy([tmpdir/'src'], [tmpdir/'dst']) # works with Paths\n assert os.path.exists(tmpdir/'dst')\n\n def test_copies_if_older(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'first')\n _write_file(tmpdir/'second')\n os.utime(tmpdir/'first', (1,1))\n os.utime(tmpdir/'second', (2,2))\n maybe_copy([tmpdir/'second'], [tmpdir/'first'])\n assert open(tmpdir/'first').read() == 'second'\n\n def test_does_not_copy_if_newer(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'first')\n _write_file(tmpdir/'second')\n os.utime(tmpdir/'first', (1,1))\n os.utime(tmpdir/'second', (2,2))\n maybe_copy([tmpdir/'first'], [tmpdir/'second'])\n assert open(tmpdir/'second').read() == 'second'\n\n def test_creates_dst_dir_if_does_not_exist(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'file')\n maybe_copy([tmpdir/'file'], [tmpdir/'dir'/'file'])\n assert os.path.exists(tmpdir/'dir'/'file')\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bratao/-PySeqLab | [
"fea1c4bd4d43565b1bb20a789d78946e1022d0ff",
"fea1c4bd4d43565b1bb20a789d78946e1022d0ff"
] | [
"pyseqlab/utilities.py",
"pyseqlab/linear_chain_crf.py"
] | [
"\"\"\"\n@author: ahmed allam <[email protected]>\n\"\"\"\nimport os\nimport pickle\nimport shutil\nfrom datetime import datetime\nfrom copy import deepcopy\nfrom itertools import combinations\nimport heapq\nimport numpy\n\n\nclass SequenceStruct(object):\n r\"\"\"class for representing each sequence/segment\n \n Args:\n Y: list containing the sequence of states/labels (i.e. ['P','O','O','L','L'])\n X: list containing dictionary elements of observation sequences and/or features of the input\n seg_other_symbol: string or None (default), if specified then the task is a segmentation problem \n where it represents the non-entity symbol else (None) then it is considered \n as sequence labeling problem\n \n Attributes:\n Y: list containing the sequence of states/labels (i.e. ['P','O','O','L','L'])\n X: list containing dictionary elements of observation sequences and/or features of the input\n seg_other_symbol: string or None(default), if specified then the task is a segmentation problem \n where it represents the non-entity symbol else (None) then it is considered \n as sequence labeling problem\n T: int, length of a sequence (i.e. len(X))\n seg_attr: dictionary comprising the extracted attributes per each boundary of a sequence\n L: int, longest length of an identified segment in the sequence\n flat_y: list of labels/tags \n y_sboundaries: sorted list of boundaries of the :attr:`Y` of the sequence\n y_range: range of the sequence\n \n \"\"\"\n\n def __init__(self, X, Y, seg_other_symbol=None):\n self.seg_attr = {}\n self.X = X\n self.Y = (Y, seg_other_symbol)\n\n @property\n def X(self):\n return self._X\n\n @X.setter\n def X(self, l):\n \"\"\"setup the observation sequence \n \n Args:\n l: a list of elements (i.e. ``X = [{'w':'Michael'}, {'w':'is'}, {'w':'in'}, {'w':'New'}, {'w':'Haven'}]``)\n \n \n Example::\n \n the output X becomes:\n {1:{'w':'Michael'},\n 2:{'w':'is'}, \n 3:{'w':'in'}, \n 4:{'w':'New'},\n 5:{'w':'Haven'}\n }\n \"\"\"\n self._X = {}\n T = len(l)\n for i in range(T):\n self._X[i + 1] = l[i]\n\n # new assignment clear seg_attr\n if self.seg_attr:\n self.seg_attr.clear()\n self.T = T\n\n @property\n def Y(self):\n return self._Y\n\n @Y.setter\n def Y(self, elmtup):\n \"\"\"setup the label sequence\n \n Args:\n elmtup: tuple consisting of:\n - **Y** a list of elements (i.e. ``Y = ['P','O','O','L','L']``) \n representing the labels of the elements in X\n - **non_entity_symbol** which represents the Other category (i.e. non entity element which is 'O' in above example)\n \n Example:\n \n Y after the transformation becomes ``{(1, 1): 'P', (2,2): 'O', (3, 3): 'O', (4, 5): 'L'}``\n \"\"\"\n try:\n Y_ref, non_entity_symb = elmtup\n except ValueError:\n raise ValueError(\"tuple containing Y and non-entity symbol must be passed\")\n else:\n self._Y = {}\n # length of longest entity in a segment\n L = 1\n if non_entity_symb:\n label_indices = {}\n for i in range(len(Y_ref)):\n label = Y_ref[i]\n if label in label_indices:\n label_indices[label].append(i + 1)\n else:\n label_indices[label] = [i + 1]\n\n for label, indices_list in label_indices.items():\n if label == non_entity_symb or len(indices_list) == 1:\n for indx in indices_list:\n boundary = (indx, indx)\n self._Y[boundary] = label\n\n else:\n indx_stack = []\n for indx in indices_list:\n if not indx_stack:\n indx_stack.append(indx)\n else:\n diff = indx - indx_stack[-1]\n if diff > 1:\n boundary = (indx_stack[0], indx_stack[-1])\n self._Y[boundary] = label\n l = indx_stack[-1] - indx_stack[0] + 1\n if l > L:\n L = l\n indx_stack = [indx]\n else:\n indx_stack.append(indx)\n if indx_stack:\n boundary = (indx_stack[0], indx_stack[-1])\n self._Y[boundary] = label\n l = indx_stack[-1] - indx_stack[0] + 1\n if l > L:\n L = l\n indx_stack = [indx]\n\n else:\n for i in range(len(Y_ref)):\n label = Y_ref[i]\n boundary = (i + 1, i + 1)\n self._Y[boundary] = label\n\n # store the length of longest entity\n self.L = L\n # keep a copy of Y in as flat list (i.e. ['P','O','O','L','L'])\n self.flat_y = Y_ref\n\n # construct a map from the yboundaries to the pos in the list\n y_sboundaries = self.get_y_boundaries()\n self.y_sboundaries = y_sboundaries\n\n self.y_boundpos_map = {}\n pos = 0\n for boundary in y_sboundaries:\n self.y_boundpos_map[boundary] = pos\n pos += 1\n self.y_range = set(range(0, pos))\n\n # def update_boundaries(self):\n # self.y_boundaries = self.get_y_boundaries()\n # self.x_boundaries = self.get_x_boundaries()\n\n def flatten_y(self, Y):\n r\"\"\"flatten the :attr:`Y` attribute \n \n Args:\n Y: dictionary of this form ``{(1, 1): 'P', (2,2): 'O', (3, 3): 'O', (4, 5): 'L'}``\n \n Example:\n \n flattened y becomes ``['P','O','O','L','L']``\n \"\"\"\n s_boundaries = sorted(Y)\n flat_y = []\n for u, v in s_boundaries:\n for _ in range(u, v + 1):\n flat_y.append(Y[(u, v)])\n return flat_y\n\n def get_y_boundaries(self):\n \"\"\"return the sorted boundaries of the labels of the sequence\"\"\"\n return sorted(self.Y.keys())\n\n def get_x_boundaries(self):\n \"\"\"return the boundaries of the observation sequence\"\"\"\n boundaries = []\n for u in self.X:\n boundaries.append((u, u))\n return boundaries\n\n def __str__(self):\n \"\"\"return string representation of the parsed sequence\"\"\"\n out_str = \"Y sequence:\\n {}\\nX sequence:\\n {}\\n{}\".format(\n self.flat_y, self.X, \"-\" * 40\n )\n return out_str\n\n\nclass DataFileParser(object):\n \"\"\"class to parse a data file comprising the training/testing data\n \n Attributes:\n seqs: list comprising of sequences that are instances of :class:`SequenceStruct` class\n header: list of attribute names read from the file\n\n \"\"\"\n\n def __init__(self):\n self.header = []\n\n def read_file(\n self, file_path, header, y_ref=True, seg_other_symbol=None, column_sep=\" \"\n ):\n r\"\"\"read and parse a file the contains the sequences following a predefined format\n \n the file should contain label and observation tracks each separated in a column \n \n .. note::\n \n label column is the **LAST** column in the file (i.e. X_a X_b Y)\n \n Args:\n file_path: string representing the file path to the data file\n header: specifies how the header is reported in the file containing the sequences\n options include:\n - 'main' -> one header in the beginning of the file\n - 'per_sequence' -> a header for every sequence\n - list of keywords as header (i.e. ['w', 'part_of_speech'])\n \n Keyword Arguments:\n y_ref: boolean specifying if the reference label column in the data file\n seg_other_sybmol: string or None(default), if specified then the task is a segmentation problem \n where `seg_other_symbol` represents the non-entity symbol. In this case semi-CRF models\n are used. Else (i.e. `seg_other_symbol` is not None) then it is considered \n as sequence labeling problem.\n column_sep: string, separator used between the columns in the file\n\n \"\"\"\n if y_ref:\n update_seq = self.update_XY\n else:\n update_seq = self.update_X\n\n with open(file_path) as file_obj:\n counter = 0\n X = []\n Y = []\n for line in file_obj:\n counter += 1\n line = line.rstrip()\n # print(line)\n if line:\n # print(line)\n if y_ref:\n *x_arg, y = line.split(column_sep)\n self._xarg = x_arg\n self._y = y\n else:\n x_arg = line.split(column_sep)\n self._xarg = x_arg\n\n # print(x_arg)\n # first line of a sequence\n if counter == 1:\n if header == \"main\":\n if self.header:\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n else:\n self.parse_header(x_arg)\n\n elif header == \"per_sequence\":\n if not self.header:\n self.parse_header(x_arg)\n else:\n if self.header:\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n else:\n self.parse_header(header)\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n else:\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n\n else:\n seq = SequenceStruct(X, Y, seg_other_symbol)\n # reset counter for filling new sequence\n counter = 0\n X = []\n Y = []\n self._xarg = None\n self._y = None\n yield seq\n\n if X and Y:\n seq = SequenceStruct(X, Y, seg_other_symbol)\n # reset counter for filling new sequence\n counter = 0\n X = []\n Y = []\n self._xarg = None\n self._y = None\n yield seq\n\n def update_XY(self, X, Y):\n \"\"\"update sequence observations and corresponding labels\"\"\"\n X.append(self.parse_line(self._xarg))\n Y.append(self._y)\n\n def update_X(self, X, Y):\n \"\"\"update sequence observations\"\"\"\n X.append(self.parse_line(self._xarg))\n\n def parse_line(self, x_arg):\n \"\"\"parse the read line\n \n Args:\n x_arg: tuple of observation columns\n \"\"\"\n # fill the sequences X and Y with observations and tags respectively\n header = self.header\n x = {}\n for i in range(len(x_arg)):\n x[header[i]] = x_arg[i]\n return x\n\n def parse_header(self, x_arg):\n \"\"\"parse header\n \n Args:\n x_arg: tuple of attribute/observation names \n \"\"\"\n seq_header = [input_src for input_src in x_arg]\n self.header = seq_header\n\n\nclass ReaderWriter(object):\n \"\"\"class for dumping, reading and logging data\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def dump_data(data, file_name, mode=\"wb\"):\n \"\"\"dump data by pickling \n \n Args:\n data: data to be pickled\n file_name: file path where data will be dumped\n mode: specify writing options i.e. binary or unicode\n \"\"\"\n with open(file_name, mode) as f:\n pickle.dump(data, f, protocol=4)\n\n @staticmethod\n def read_data(file_name, mode=\"rb\"):\n \"\"\"read dumped/pickled data\n \n Args:\n file_name: file path where data will be dumped\n mode: specify writing options i.e. binary or unicode\n \"\"\"\n with open(file_name, mode) as f:\n data = pickle.load(f)\n return data\n\n @staticmethod\n def log_progress(line, outfile, mode=\"a\"):\n \"\"\"write data to a file\n \n Args:\n line: string representing data to be written out\n outfile: file path where data will be written/logged\n mode: specify writing options i.e. append, write\n \"\"\"\n with open(outfile, mode) as f:\n f.write(line)\n\n\nclass AStarNode(object):\n \"\"\"class representing A* node to be used with A* searcher and viterbi for generating k-decoded list\n \n Args:\n cost: float representing the score/unnormalized probability of a sequence up to given position\n position: integer representing the current position in the sequence\n pi_c: prefix or state code of the label\n label: label of the current position in a sequence\n frwdlink: a link to :class:`AStarNode` node\n \n Attributes:\n cost: float representing the score/unnormalized probability of a sequence up to given position\n position: integer representing the current position in the sequence\n pi_c: prefix or state code of the label\n label: label of the current position in a sequence\n frwdlink: a link to :class:`AStarNode` node\n \n \"\"\"\n\n def __init__(self, cost, position, pi_c, label, frwdlink):\n self.cost = cost\n self.position = position\n self.pi_c = pi_c\n self.label = label\n self.frwdlink = frwdlink\n\n def print_node(self):\n \"\"\"print the info about a node\"\"\"\n statement = \"cost: {}, position: {}, pi_code: {}, label: {}, \".format(\n self.cost, self.position, self.pi_c, self.label\n )\n if self.frwdlink:\n statement += \"forward_link: {}\".format(self.frwdlink)\n else:\n statement += \"forward_link: None\"\n print(statement)\n\n\nclass AStarAgenda(object):\n \"\"\"class containing a heap where instances of :class:`AStarNode` class will be pushed \n \n the push operation will use the score matrix (built using viterbi algorithm)\n representing the unnormalized probability of the sequences ending at every position \n with the different available prefixes/states\n \n Attributes:\n qagenda: queue where instances of :class:`AStarNode` are pushed\n entry_count: counter that keeps track of the entries and associate each entry(node)\n with a unique number. It is useful for resolving nodes with equal costs\n \n \"\"\"\n\n def __init__(self):\n self.qagenda = []\n self.entry_count = 0\n\n def push(self, astar_node, cost):\n \"\"\"push instance of :class:`AStarNode` with its associated cost to the heap\n \n Args:\n astar_node: instance of :class:`AStarNode` class\n cost: float representing the score/unnormalized probability of a sequence up to given position\n \"\"\"\n heapq.heappush(self.qagenda, (-cost, self.entry_count, astar_node))\n self.entry_count += 1\n\n def pop(self):\n \"\"\"pop nodes with highest score from the heap\n \"\"\"\n astar_node = heapq.heappop(self.qagenda)[-1]\n return astar_node\n\n\nclass FO_AStarSearcher(object):\n \"\"\"A* star searcher associated with first-order CRF model such as :class:`FirstOrderCRF`\n \n Args:\n Y_codebook_rev: a reversed version of dictionary comprising the set of states each assigned a unique code\n \n Attributes:\n Y_codebook_rev: a reversed version of dictionary comprising the set of states each assigned a unique code\n \"\"\"\n\n def __init__(self, Y_codebook_rev):\n self.Y_codebook_rev = Y_codebook_rev\n\n def infer_labels(self, top_node, back_track):\n \"\"\"decode sequence by inferring labels\n \n Args:\n top_node: instance of :class:`AStarNode` class\n back_track: dictionary containing back pointers built using dynamic programming algorithm\n \"\"\"\n Y_codebook_rev = self.Y_codebook_rev\n # decoding the sequence\n # print(\"we are decoding\")\n # top_node.print_node()\n y_c = top_node.pi_c\n pos = top_node.position\n Y_decoded = []\n Y_decoded.append(y_c)\n t = pos - 1\n while t > 0:\n y_c_tplus1 = Y_decoded[-1]\n y_c_t = back_track[t + 1, y_c_tplus1]\n Y_decoded.append(y_c_t)\n t -= 1\n Y_decoded.reverse()\n Y_decoded = [Y_codebook_rev[y_code] for y_code in Y_decoded]\n\n while top_node.frwdlink:\n y = top_node.frwdlink.label\n Y_decoded.append(y)\n top_node = top_node.frwdlink\n # print(Y_decoded)\n return Y_decoded\n\n def search(self, alpha, back_track, T, K):\n \"\"\"A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences\n \n Args:\n alpha: score matrix build using the viterbi algorithm\n back_track: back_pointers dictionary tracking the best paths to every state\n T: last decoded position of a sequence (in this context, it is the alpha.shape[0])\n K: number of top decoded sequences to be returned\n \n Returns:\n topk_list: top-K list of decoded sequences\n \n \n \"\"\"\n # push the best astar nodes to the queue (i.e. the states at time T)\n q = AStarAgenda()\n r = set()\n c = 0\n Y_codebook_rev = self.Y_codebook_rev\n # create nodes from the states at time T\n for y_c in Y_codebook_rev:\n cost = alpha[T, y_c]\n pos = T\n frwdlink = None\n label = Y_codebook_rev[y_c]\n node = AStarNode(cost, pos, y_c, label, frwdlink)\n # node.print_node()\n q.push(node, cost)\n\n track = []\n topk_list = []\n try:\n while c < K:\n # print(\"heap size \", len(q.qagenda))\n top_node = q.pop()\n track.append(top_node)\n\n for i in reversed(range(2, top_node.position + 1)):\n # best previous state at pos = i-1\n curr_y_c = top_node.pi_c\n bestprev_y_c = back_track[i, curr_y_c]\n pos = i - 1\n for prev_y_c in Y_codebook_rev:\n # create a new astar node\n if prev_y_c != bestprev_y_c:\n label = Y_codebook_rev[prev_y_c]\n cost = alpha[pos, prev_y_c]\n s = AStarNode(cost, pos, prev_y_c, label, top_node)\n q.push(s, cost)\n\n # create the backlink of the previous top_node (i.e. create a node from the best_y_c)\n cost = alpha[pos, bestprev_y_c]\n label = Y_codebook_rev[bestprev_y_c]\n top_node = AStarNode(cost, pos, y_c, label, top_node)\n\n # decode and check if it is not saved already in topk list\n y_labels = self.infer_labels(track[-1], back_track)\n # print(y_labels)\n signature = \"\".join(y_labels)\n if signature not in r:\n r.add(signature)\n topk_list.append(y_labels)\n c += 1\n track.pop()\n except (KeyError, IndexError) as e:\n # consider logging the error\n print(e)\n\n finally:\n # print('r ', r)\n # print('topk ', topk_list)\n return topk_list\n\n\nclass HO_AStarSearcher(object):\n \"\"\"A* star searcher associated with higher-order CRF model such as :class:`HOCRFAD`\n \n Args:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n\n Attributes:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n \"\"\"\n\n def __init__(self, P_codebook_rev, P_elems):\n self.P_codebook_rev = P_codebook_rev\n self.P_elems = P_elems\n\n def get_node_label(self, pi_code):\n \"\"\"get the the label/state given a prefix code\n \n Args:\n pi_code: prefix code which is an element of :attr:`P_codebook_rev`\n \"\"\"\n\n pi = self.P_codebook_rev[pi_code]\n y = self.P_elems[pi][-1]\n return y\n\n def infer_labels(self, top_node, back_track):\n \"\"\"decode sequence by inferring labels\n \n Args:\n top_node: instance of :class:`AStarNode` class\n back_track: dictionary containing back pointers tracking the best paths to every state\n \"\"\"\n # decoding the sequence\n # print(\"we are decoding\")\n # top_node.print_node()\n y = top_node.label\n pi_c = top_node.pi_c\n pos = top_node.position\n Y_decoded = []\n Y_decoded.append((pi_c, y))\n # print(\"t={}, p_T_code={}, p_T={}, y_T ={}\".format(T, p_T_code, p_T, y_T))\n t = pos - 1\n while t > 0:\n p_tplus1_c = Y_decoded[-1][0]\n p_t_c, y_t = back_track[t + 1, p_tplus1_c]\n # print(\"t={}, (t+1, p_t_code)=({}, {})->({},{})\".format(t, t+1, P_codebook[p_tplus1], p_t, y_t))\n Y_decoded.append((p_t_c, y_t))\n t -= 1\n Y_decoded.reverse()\n Y_decoded = [y for (__, y) in Y_decoded]\n\n while top_node.frwdlink:\n y = top_node.frwdlink.label\n Y_decoded.append(y)\n top_node = top_node.frwdlink\n # print(Y_decoded)\n return Y_decoded\n\n def search(self, alpha, back_track, T, K):\n \"\"\"A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences\n \n Args:\n alpha: score matrix build using the viterbi algorithm\n back_track: back_pointers dictionary tracking the best paths to every state\n T: last decoded position of a sequence (in this context, it is the alpha.shape[0])\n K: number of top decoded sequences to be returned\n \n Returns:\n topk_list: top-K list of decoded sequences\n \n \n \"\"\"\n # push the best astar nodes to the queue (i.e. the pi's at time T)\n q = AStarAgenda()\n r = set()\n c = 0\n P_codebook_rev = self.P_codebook_rev\n # create nodes from the pi's at time T\n for pi_c in P_codebook_rev:\n cost = alpha[T, pi_c]\n pos = T\n frwdlink = None\n label = self.get_node_label(pi_c)\n node = AStarNode(cost, pos, pi_c, label, frwdlink)\n # node.print_node()\n q.push(node, cost)\n\n track = []\n topk_list = []\n try:\n while c < K:\n # print(\"heap size \", len(q.qagenda))\n top_node = q.pop()\n track.append(top_node)\n\n for i in reversed(range(2, top_node.position + 1)):\n best_prev_pi_c, best_y = back_track[i, top_node.pi_c]\n pos = i - 1\n for prev_pi_c in P_codebook_rev:\n # create a new astar node\n if prev_pi_c != best_prev_pi_c:\n label = self.get_node_label(prev_pi_c)\n cost = alpha[pos, prev_pi_c]\n s = AStarNode(cost, pos, prev_pi_c, label, top_node)\n q.push(s, cost)\n\n # create the backlink of the top_node\n cost = alpha[pos, best_prev_pi_c]\n top_node = AStarNode(cost, pos, best_prev_pi_c, best_y, top_node)\n\n # decode and check if it is not saved already in topk list\n y_labels = self.infer_labels(track[-1], back_track)\n # print(y_labels)\n sig = \"\".join(y_labels)\n if sig not in r:\n r.add(sig)\n topk_list.append(y_labels)\n c += 1\n track.pop()\n except (KeyError, IndexError) as e:\n # consider logging the error\n print(e)\n\n finally:\n # print('r ', r)\n # print('topk ', topk_list)\n return topk_list\n\n\nclass HOSemi_AStarSearcher(object):\n \"\"\"A* star searcher associated with higher-order CRF model such as :class:`HOSemiCRFAD`\n \n Args:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n\n Attributes:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n \"\"\"\n\n def __init__(self, P_codebook_rev, pi_elems):\n self.P_codebook_rev = P_codebook_rev\n self.pi_elems = pi_elems\n\n def get_node_label(self, pi_code):\n \"\"\"get the the label/state given a prefix code\n \n Args:\n pi_code: prefix code which is an element of :attr:`P_codebook_rev`\n \"\"\"\n pi = self.P_codebook_rev[pi_code]\n y = self.pi_elems[pi][-1]\n return y\n\n def infer_labels(self, top_node, back_track):\n \"\"\"decode sequence by inferring labels\n \n Args:\n top_node: instance of :class:`AStarNode` class\n back_track: dictionary containing back pointers tracking the best paths to every state\n \"\"\"\n # decoding the sequence\n # print(\"we are decoding\")\n # top_node.print_node()\n y = top_node.label\n pi_c = top_node.pi_c\n pos = top_node.position\n Y_decoded = []\n\n d, pt_c, yt = back_track[pos, pi_c]\n for _ in range(d + 1):\n Y_decoded.append(y)\n\n t = pos - d - 1\n while t > 0:\n new_d, new_pt_c, new_yt = back_track[t, pt_c]\n for _ in range(new_d + 1):\n Y_decoded.append(yt)\n t = t - new_d - 1\n pt_c = new_pt_c\n yt = new_yt\n Y_decoded.reverse()\n\n while top_node.frwdlink:\n y = top_node.frwdlink.label\n Y_decoded.append(y)\n top_node = top_node.frwdlink\n # print(Y_decoded)\n return Y_decoded\n\n def search(self, alpha, back_track, T, K):\n \"\"\"A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences\n \n Args:\n alpha: score matrix build using the viterbi algorithm\n back_track: back_pointers dictionary tracking the best paths to every state\n T: last decoded position of a sequence (in this context, it is the alpha.shape[0])\n K: number of top decoded sequences to be returned\n \n Returns:\n topk_list: top-K list of decoded sequences\n \n \n \"\"\"\n # push the best astar nodes to the queue (i.e. the pi's at time T)\n q = AStarAgenda()\n r = set()\n c = 0\n P_codebook_rev = self.P_codebook_rev\n\n # create nodes from the pi's at time T\n for pi_c in P_codebook_rev:\n cost = alpha[T, pi_c]\n pos = T\n frwdlink = None\n label = self.get_node_label(pi_c)\n node = AStarNode(cost, pos, pi_c, label, frwdlink)\n # node.print_node()\n q.push(node, cost)\n\n track = []\n topk_list = []\n try:\n while c < K:\n # print(\"heap size \", len(q.qagenda))\n top_node = q.pop()\n track.append(top_node)\n while True:\n curr_pos = top_node.position\n if curr_pos == 1:\n break\n d, best_prev_pi_c, best_prev_y = back_track[curr_pos, top_node.pi_c]\n prev_pos = curr_pos - d - 1\n for prev_pi_c in P_codebook_rev:\n # create a new astar node\n if prev_pi_c != best_prev_pi_c:\n label = self.get_node_label(prev_pi_c)\n cost = alpha[prev_pos, prev_pi_c]\n s = AStarNode(cost, prev_pos, prev_pi_c, label, top_node)\n q.push(s, cost)\n\n # create the backlink of the top_node\n cost = alpha[prev_pos, best_prev_pi_c]\n top_node = AStarNode(\n cost, prev_pos, best_prev_pi_c, best_prev_y, top_node\n )\n\n # decode and check if it is not saved already in topk list\n y_labels = self.infer_labels(track[-1], back_track)\n # print(y_labels)\n sig = \"\".join(y_labels)\n if sig not in r:\n r.add(sig)\n topk_list.append(y_labels)\n c += 1\n track.pop()\n except (KeyError, IndexError) as e:\n # consider logging the error\n print(e)\n\n finally:\n # print('r ', r)\n # print('topk ', topk_list)\n return topk_list\n\n\nclass TemplateGenerator(object):\n \"\"\"template generator class for feature/function template generation\n \"\"\"\n\n def __init__(self):\n pass\n\n def generate_template_XY(self, attr_name, x_spec, y_spec, template):\n r\"\"\"generate template XY for the feature extraction\n \n Args:\n attr_name: string representing the attribute name of the atomic observations/tokens\n x_spec: tuple of the form (n-gram, range)\n that is we can specify the n-gram features required in a specific range/window\n for an observation token ``attr_name``\n y_spec: string specifying how to join/combine the features on the X observation level\n with labels on the Y level. \n \n Example of passed options would be:\n - one state (i.e. current state) by passing ``1-state`` or \n - two states (i.e. current and previous state) by passing ``2-states`` or\n - one and two states (i.e. mix/combine observation features with one state model and two states models)\n by passing ``1-state:2-states``. Higher order models support models with states > 2 such as ``3-states`` and above. \n template: dictionary that accumulates the generated feature template for all attributes\n \n Example:\n \n suppose we have `word` attribute referenced by 'w' and we need to use the current word\n with the current label (i.e. unigram of words with the current label) in a range of (0,1)\n \n ::\n \n templateXY = {}\n generate_template_XY('w', ('1-gram', range(0, 1)), '1-state', templateXY)\n \n we can also specify a two states/labels features at the Y level\n \n ::\n \n generate_template_XY('w', ('1-gram', range(0, 1)), '1-state:2-states', templateXY)\n \n .. note ::\n this can be applied for every attribute name and accumulated in the `template` dictionary\n \"\"\"\n ngram_options, wsize = x_spec\n templateX = self._traverse_x(attr_name, ngram_options, wsize)\n templateY = self.generate_template_Y(y_spec)\n templateXY = self._mix_template_XY(templateX, templateY)\n # update the template we are building\n self._update_template(template, templateXY)\n\n def _update_template(self, template, templateXY):\n \"\"\"update the accumulated template with the current generated templateXY\n \n Args:\n template: dictionary of the accumulated template for the different offsets\n and attribute names\n templateXY: dictionary of the form ``{attr_name:{x_offset:(y_offsets)}}``\n \"\"\"\n for attr_name in templateXY:\n if attr_name in template:\n for x_offset in templateXY[attr_name]:\n template[attr_name][x_offset] = templateXY[attr_name][x_offset]\n else:\n template[attr_name] = templateXY[attr_name]\n\n def _traverse_x(self, attr_name, ngram_options, wsize):\n \"\"\"generate template on the X observation level only\n \n Args:\n attr_name: string representing the attribute name of the atomic observations/tokens\n ngram_options: string specifying the n-grams (i.e. ``1-gram``) it also supports multiple\n specification such as ``1-gram:2-gram`` where each is separated by a colon\n wsize: a range specifying the window size where the template operates\n \n \"\"\"\n options = ngram_options.split(\":\")\n l = list(wsize)\n template = {attr_name: {}}\n for option in options:\n n = int(option.split(\"-\")[0])\n ngram_list = self.generate_ngram(l, n)\n for offset in ngram_list:\n template[attr_name][offset] = None\n return template\n\n def generate_template_Y(self, ngram_options):\n \"\"\"generate template on the Y labels level\n \n Args:\n ngram_options: string specifying the number of states to be use (i.e. ``1-state``).\n It also supports multiple specification such as ``1-state:2-states`` \n where each is separated by a colon\n \n \"\"\"\n template = {\"Y\": []}\n options = ngram_options.split(\":\")\n for option in options:\n max_order = int(option.split(\"-\")[0])\n template[\"Y\"] += self._traverse_y(max_order, accumulative=False)[\"Y\"]\n return template\n\n @staticmethod\n def _traverse_y(max_order, accumulative=True):\n \"\"\"generate the y template\"\"\"\n attr_name = \"Y\"\n template = {attr_name: []}\n if accumulative:\n for j in range(max_order):\n offsets_y = [-i for i in range(j + 1)]\n offsets_y = tuple(reversed(offsets_y))\n template[attr_name].append(offsets_y)\n else:\n offsets_y = [-i for i in range(max_order)]\n offsets_y = tuple(reversed(offsets_y))\n template[attr_name].append(offsets_y)\n\n return template\n\n @staticmethod\n def _mix_template_XY(templateX, templateY):\n \"\"\"mix and join the template on the X observation level with the Y level\n \n Args:\n templateX: dictionary of the form ``{attr_name:{x_offset:None}}``\n e.g. ``{'w': {(0,): None}}``\n templateY: dictionary of the form ``{'Y':[y_offset]}``\n e.g. ``{'Y': [(0,), (-1, 0)]}``\n .. note::\n \n - x_offset is a tuple of offsets representing the ngram options needed \n such as (0,) for unigram and (-1,0) for bigram\n \n - y_offset is a tuple of offsets representing the number of states options needed \n such as (0,) for 1-state and (-1,0) for 2-states and (-2,-1,0) for 3-states\n \"\"\"\n template_XY = deepcopy(templateX)\n for attr_name in template_XY:\n for offset_x in template_XY[attr_name]:\n template_XY[attr_name][offset_x] = tuple(templateY[\"Y\"])\n return template_XY\n\n @staticmethod\n def generate_ngram(l, n):\n \"\"\"n-gram generator based on the length of the window and the ngram option\n \n Args:\n l: list of positions of the range representing the window size (i.e. list(wsize))\n n: integer representing the n-gram option (i.e. 1 for unigram, 2 for bigram, etc..)\n \"\"\"\n ngram_list = []\n for i in range(0, len(l)):\n elem = tuple(l[i : i + n])\n if len(elem) != n:\n break\n ngram_list.append(elem)\n\n return ngram_list\n\n @staticmethod\n def generate_combinations(n):\n \"\"\"generates all possible combinations based on the maximum number of ngrams n\n \n Args:\n n: integer specifying the maximum/greatest ngram option\n \n \"\"\"\n option_names = []\n start = 1\n for i in range(start, n + 1):\n option_names.append(\"{}-gram\".format(i))\n\n config = {}\n for i in range(start, n + 1):\n config[i] = list(combinations(option_names, i))\n\n config_combinations = {}\n for c_list in config.values():\n for c_tup in c_list:\n key_name = \":\".join(c_tup)\n config_combinations[key_name] = set()\n elemkeys = config_combinations.keys()\n for option_i in config_combinations:\n s = config_combinations[option_i]\n for option_j in elemkeys:\n s.add(option_j)\n config_combinations[option_i] = s\n return config_combinations\n\n\nclass BoundNode(object):\n \"\"\"boundary entity class used when generating all possible partitions within specified constraint\n \n Args:\n parent: instance of :class:`BoundNode` \n boundary: tuple (u,v) representing the current boundary\n \"\"\"\n\n def __init__(self, parent, boundary):\n self.parent = parent\n self.boundary = boundary\n self.children = []\n\n def add_child(self, child):\n \"\"\"add link to the child nodes\"\"\"\n self.children.append(child)\n\n def get_child(self):\n \"\"\"retrieve child nodes\"\"\"\n return self.children.pop()\n\n def get_signature(self):\n \"\"\"retrieve the id of the node\"\"\"\n return id(self)\n\n\ndef generate_partitions(\n boundary, L, patt_len, bound_node_map, depth_node_map, parent_node, depth=1\n):\n \"\"\"generate all possible partitions within the range of segment length and model order\n \n it transforms the partitions into a tree of nodes starting from the root node\n that uses `boundary` argument in its construction\n \n Args:\n boundary: tuple (u,v) representing the current boundary in a sequence\n L: integer representing the maximum length a segment could be constructed\n patt_len: integer representing the maximum model order\n bound_node_map: dictionary that keeps track of all possible partitions represented as\n instances of :class:`BoundNode`\n depth_node_map: dictionary that arranges the generated nodes by their depth in the tree\n parent_node: instance of :class:`BoundNode` or None in case of the root node\n depth: integer representing the maximum depth of the tree to be reached before stopping \n \"\"\"\n if depth >= patt_len:\n return\n\n if parent_node:\n if boundary in bound_node_map:\n curr_node = bound_node_map[boundary]\n else:\n curr_node = BoundNode(parent_node, boundary)\n bound_node_map[boundary] = curr_node\n if depth in depth_node_map:\n depth_node_map[depth].append(curr_node)\n else:\n depth_node_map[depth] = [curr_node]\n else:\n # setup root node\n curr_node = BoundNode(None, boundary)\n bound_node_map[boundary] = curr_node\n depth_node_map[depth] = [curr_node]\n\n u = boundary[0] - 1\n v = u\n depth += 1\n\n for d in range(L):\n if u - d < 1:\n break\n upd_boundary = (u - d, v)\n if upd_boundary in bound_node_map:\n child = bound_node_map[upd_boundary]\n else:\n child = BoundNode(curr_node, upd_boundary)\n bound_node_map[upd_boundary] = child\n if depth in depth_node_map:\n depth_node_map[depth].append(child)\n else:\n depth_node_map[depth] = [child]\n curr_node.add_child(child)\n generate_partitions(\n upd_boundary, L, patt_len, bound_node_map, depth_node_map, child, depth\n )\n\n\ndef generate_partition_boundaries(depth_node_map):\n \"\"\"generate partitions of the boundaries generated in :func:`generate_partitions` function\n \n Args:\n depth_node_map: dictionary that arranges the generated nodes by their depth in the tree\n it is constructed using :func:`generate_partitions` function\n \"\"\"\n g = {}\n depths = sorted(depth_node_map, reverse=True)\n\n for depth in depths:\n g[depth] = []\n nodes = depth_node_map[depth]\n for curr_node in nodes:\n l = []\n l.append(curr_node.boundary)\n while True:\n curr_node = curr_node.parent\n if curr_node:\n l.append(curr_node.boundary)\n else:\n g[depth].append(l)\n break\n\n return g\n\n\ndef delete_directory(directory):\n if os.path.isdir(directory):\n shutil.rmtree(directory)\n\n\ndef delete_file(filepath):\n check = os.path.isfile(filepath)\n if check:\n os.remove(filepath)\n\n\ndef create_directory(folder_name, directory=\"current\"):\n \"\"\"create directory/folder (if it does not exist) and returns the path of the directory\n \n Args:\n folder_name: string representing the name of the folder to be created\n \n Keyword Arguments:\n directory: string representing the directory where to create the folder\n if `current` then the folder will be created in the current directory\n \"\"\"\n if directory == \"current\":\n path_current_dir = os.path.dirname(__file__)\n else:\n path_current_dir = directory\n path_new_dir = os.path.join(path_current_dir, folder_name)\n if not os.path.exists(path_new_dir):\n os.makedirs(path_new_dir)\n return path_new_dir\n\n\ndef generate_datetime_str():\n \"\"\"generate string composed of the date and time\"\"\"\n datetime_now = datetime.now()\n datetime_str = \"{}_{}_{}-{}_{}_{}_{}\".format(\n datetime_now.year,\n datetime_now.month,\n datetime_now.day,\n datetime_now.hour,\n datetime_now.minute,\n datetime_now.second,\n datetime_now.microsecond,\n )\n return datetime_str\n\n\n# def vectorized_logsumexp(vec):\n# \"\"\"vectorized version of log sum exponential operation\n#\n# Args:\n# vec: numpy vector where entries are in the log domain\n# \"\"\"\n# with numpy.errstate(invalid='warn'):\n# max_a = numpy.max(vec)\n# try:\n# res = max_a + numpy.log(numpy.sum(numpy.exp(vec - max_a)))\n# except Warning:\n# res = max_a\n# return(res)\n\n\ndef vectorized_logsumexp(vec):\n \"\"\"vectorized version of log sum exponential operation\n \n Args:\n vec: numpy vector where entries are in the log domain\n \"\"\"\n max_a = numpy.max(vec)\n if max_a != -numpy.inf:\n return max_a + numpy.log(numpy.sum(numpy.exp(vec - max_a)))\n # case where max_a == -numpy.inf\n return max_a\n\n\ndef generate_updated_model(\n modelparts_dir,\n modelrepr_class,\n model_class,\n aextractor_obj,\n fextractor_class,\n seqrepresenter_class,\n ascaler_class=None,\n):\n \"\"\"update/regenerate CRF models using the saved parts/components\n \n Args:\n modelparts_dir: string representing the directory where model parts are saved\n modelrepr_class: name of the model representation class to be used which has \n suffix `ModelRepresentation` such as :class:`HOCRFADModelRepresentation`\n model_class: name of the CRF model class such as :class:`HOCRFAD`\n aextractor_class: name of the attribute extractor class such as :class:`NERSegmentAttributeExtractor`\n fextractor_class: name of the feature extractor class used such as :class:`HOFeatureExtractor`\n seqrepresenter_class: name of the sequence representer class such as :class:`SeqsRepresenter`\n ascaler_class: name of the attribute scaler class such as :class:`AttributeScaler`\n \n .. note::\n \n This function is equivalent to :func:`generate_trained_model` function. However, this function\n uses explicit specification of the arguments (i.e. specifying explicitly the classes to be used)\n \n \n \"\"\"\n from pyseqlab.attributes_extraction import GenericAttributeExtractor\n\n ycodebook = ReaderWriter.read_data(os.path.join(modelparts_dir, \"MR_Ycodebook\"))\n mfeatures = ReaderWriter.read_data(os.path.join(modelparts_dir, \"MR_modelfeatures\"))\n mfeatures_codebook = ReaderWriter.read_data(\n os.path.join(modelparts_dir, \"MR_modelfeaturescodebook\")\n )\n L = ReaderWriter.read_data(os.path.join(modelparts_dir, \"MR_L\"))\n\n # generate model representation\n new_mrepr = modelrepr_class()\n new_mrepr.modelfeatures = mfeatures\n new_mrepr.modelfeatures_codebook = mfeatures_codebook\n new_mrepr.Y_codebook = ycodebook\n new_mrepr.L = L\n new_mrepr.generate_instance_properties()\n\n # generate attribute extractor\n if type(aextractor_obj) == type(GenericAttributeExtractor): # case it is a class\n new_attrextractor = aextractor_obj()\n else: # case it is an instance of a class\n new_attrextractor = aextractor_obj\n\n # generate feature extractor\n templateX = ReaderWriter.read_data(os.path.join(modelparts_dir, \"FE_templateX\"))\n templateY = ReaderWriter.read_data(os.path.join(modelparts_dir, \"FE_templateY\"))\n new_fextractor = fextractor_class(templateX, templateY, new_attrextractor.attr_desc)\n\n # generate sequence representer\n new_seqrepr = seqrepresenter_class(new_attrextractor, new_fextractor)\n\n # generate attribute scaler if applicable\n if ascaler_class:\n scaling_info = ReaderWriter.read_data(\n os.path.join(modelparts_dir, \"AS_scalinginfo\")\n )\n method = ReaderWriter.read_data(os.path.join(modelparts_dir, \"AS_method\"))\n new_attrscaler = ascaler_class(scaling_info, method)\n new_seqrepr.attr_scaler = new_attrscaler\n\n # generate crf instance\n new_crfmodel = model_class(new_mrepr, new_seqrepr, {})\n new_crfmodel.weights = ReaderWriter.read_data(\n os.path.join(modelparts_dir, \"weights\")\n )\n return new_crfmodel\n\n\ndef generate_trained_model(modelparts_dir, aextractor_obj):\n \"\"\"regenerate trained CRF models using the saved trained model parts/components\n \n Args:\n modelparts_dir: string representing the directory where model parts are saved\n aextractor_class: name of the attribute extractor class such as :class:`NERSegmentAttributeExtractor`\n\n \"\"\"\n # parse the class description file\n class_desc = []\n with open(os.path.join(modelparts_dir, \"class_desc.txt\"), \"r\") as f:\n for line in f:\n class_desc.append(line.strip())\n\n from pyseqlab.features_extraction import (\n HOFeatureExtractor,\n FOFeatureExtractor,\n SeqsRepresenter,\n )\n\n seqrepresenter_class = SeqsRepresenter\n if class_desc[1] == \"HOCRFAD\":\n from pyseqlab.ho_crf_ad import HOCRFAD, HOCRFADModelRepresentation\n\n modelrepr_class = HOCRFADModelRepresentation\n model_class = HOCRFAD\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"HOCRF\":\n from pyseqlab.ho_crf import HOCRF, HOCRFModelRepresentation\n\n modelrepr_class = HOCRFModelRepresentation\n model_class = HOCRF\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"HOSemiCRFAD\":\n from pyseqlab.hosemi_crf_ad import HOSemiCRFAD, HOSemiCRFADModelRepresentation\n\n modelrepr_class = HOSemiCRFADModelRepresentation\n model_class = HOSemiCRFAD\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"HOSemiCRF\":\n from pyseqlab.hosemi_crf import HOSemiCRF, HOSemiCRFModelRepresentation\n\n modelrepr_class = HOSemiCRFModelRepresentation\n model_class = HOSemiCRF\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"FirstOrderCRF\":\n from pyseqlab.fo_crf import FirstOrderCRF, FirstOrderCRFModelRepresentation\n\n modelrepr_class = FirstOrderCRFModelRepresentation\n model_class = FirstOrderCRF\n fextractor_class = FOFeatureExtractor\n\n # generate attribute scaler if applicable\n if class_desc[-1] != \"None\":\n from pyseqlab.attributes_extraction import AttributeScaler\n\n ascaler_class = AttributeScaler\n else:\n ascaler_class = None\n\n trained_model = generate_updated_model(\n modelparts_dir,\n modelrepr_class,\n model_class,\n aextractor_obj,\n fextractor_class,\n seqrepresenter_class,\n ascaler_class,\n )\n\n return trained_model\n\n\ndef split_data(seqs_id, options):\n r\"\"\"utility function for splitting dataset (i.e. training/testing and cross validation)\n \n Args:\n seqs_id: list of processed sequence ids\n options: dictionary comprising of the options on how to split data\n \n Example:\n To perform cross validation, we need to specify\n - cross-validation for the `method`\n - the number of folds for the `k_fold`\n \n ::\n \n options = {'method':'cross_validation',\n 'k_fold':number\n }\n \n To perform random splitting, we need to specify\n - random for the `method`\n - number of splits for the `num_splits`\n - size of the training set in percentage for the `trainset_size`\n \n ::\n \n options = {'method':'random',\n 'num_splits':number,\n 'trainset_size':percentage\n }\n \"\"\"\n N = len(seqs_id)\n data_split = {}\n method = options.get(\"method\")\n if method == None:\n method = \"cross_validation\"\n if method == \"cross_validation\":\n k_fold = options.get(\"k_fold\")\n if type(k_fold) != int:\n # use 10 fold cross validation\n k_fold = 10\n elif k_fold <= 0:\n k_fold = 10\n batch_size = int(numpy.ceil(N / k_fold))\n test_seqs = seqs_id.copy()\n seqs_len = len(test_seqs)\n # numpy.random.shuffle(test_seqs)\n indx = numpy.arange(0, seqs_len + 1, batch_size)\n if indx[-1] < seqs_len:\n indx = numpy.append(indx, [seqs_len])\n\n for i in range(len(indx) - 1):\n data_split[i] = {}\n current_test_seqs = test_seqs[indx[i] : indx[i + 1]]\n data_split[i][\"test\"] = current_test_seqs\n data_split[i][\"train\"] = list(set(seqs_id) - set(current_test_seqs))\n\n elif method == \"random\":\n num_splits = options.get(\"num_splits\")\n if type(num_splits) != int:\n num_splits = 5\n trainset_size = options.get(\"trainset_size\")\n if type(trainset_size) != int:\n # 80% of the data set is training and 20% for testing\n trainset_size = 80\n elif trainset_size <= 0 or trainset_size >= 100:\n trainset_size = 80\n for i in range(num_splits):\n data_split[i] = {}\n current_train_seqs = numpy.random.choice(\n seqs_id, int(N * trainset_size / 100), replace=False\n )\n data_split[i][\"train\"] = list(current_train_seqs)\n data_split[i][\"test\"] = list(set(seqs_id) - set(current_train_seqs))\n\n return data_split\n\n\n\"\"\"split data based on sequences length\n we need to execute the three functions in order:\n (1) :func:`group_seqs_by_length`\n (2) :func:`weighted_sample`\n (3) :func:`aggregate_weightedsample`\n\"\"\"\n\n\ndef group_seqs_by_length(seqs_info):\n \"\"\"group sequences by their length\n \n Args:\n seqs_info: dictionary comprsing info about the sequences\n it has this form {seq_id:{T:length of sequence}}\n \n .. note::\n \n sequences that are with unique sequence length are grouped together as singeltons\n \"\"\"\n grouped_seqs = {}\n for seq_id, seq_info in seqs_info.items():\n T = seq_info[\"T\"]\n if T in grouped_seqs:\n grouped_seqs[T].append(seq_id)\n else:\n grouped_seqs[T] = [seq_id]\n # loop to regroup single sequences\n singelton = [T for T, seqs_id in grouped_seqs.items() if len(seqs_id) == 1]\n singelton_seqs = []\n for T in singelton:\n singelton_seqs += grouped_seqs[T]\n del grouped_seqs[T]\n\n grouped_seqs[\"singleton\"] = singelton_seqs\n return grouped_seqs\n\n\ndef weighted_sample(grouped_seqs, trainset_size):\n \"\"\"get a random split of the grouped sequences\n \n Args:\n grouped_seqs: dictionary of the grouped sequences based on their length\n it is obtained using :func:`group_seqs_by_length` function\n trainset_size: integer representing the size of the training set in percentage\n \n \"\"\"\n options = {\"method\": \"random\", \"num_splits\": 1, \"trainset_size\": trainset_size}\n wsample = {}\n for group_var, seqs_id in grouped_seqs.items():\n # quota = trainset_size*count_seqs[group_var]/total\n data_split = split_data(seqs_id, options)\n wsample[group_var] = data_split[0]\n return wsample\n\n\ndef aggregate_weightedsample(w_sample):\n \"\"\"represent the random picked sample for training/testing\n \n Args:\n w_sample: dictionary representing a random split of the grouped sequences\n by their length. it is obtained using :func:`weighted_sample` function\n \"\"\"\n wdata_split = {\"train\": [], \"test\": []}\n for grouping_var in w_sample:\n for data_cat in w_sample[grouping_var]:\n wdata_split[data_cat] += w_sample[grouping_var][data_cat]\n return {0: wdata_split}\n\n\n##################################\n\n\ndef nested_cv(seqs_id, outer_kfold, inner_kfold):\n \"\"\"generate nested cross-validation division of sequence ids\n \"\"\"\n outer_split = split_data(\n seqs_id, {\"method\": \"cross_validation\", \"k_fold\": outer_kfold}\n )\n cv_hierarchy = {}\n for outerfold, outer_datasplit in outer_split.items():\n cv_hierarchy[\"{}_{}\".format(\"outer\", outerfold)] = outer_datasplit\n curr_train_seqs = outer_datasplit[\"train\"]\n inner_split = split_data(\n curr_train_seqs, {\"method\": \"cross_validation\", \"k_fold\": inner_kfold}\n )\n for innerfold, inner_datasplit in inner_split.items():\n cv_hierarchy[\n \"{}_{}_{}_{}\".format(\"outer\", outerfold, \"inner\", innerfold)\n ] = inner_datasplit\n return cv_hierarchy\n\n\ndef get_conll00():\n current_dir = os.path.dirname(os.path.realpath(__file__))\n root_dir = os.path.dirname(current_dir)\n files_info = {\n \"train_short_main.txt\": (\"main\", True, \" \"),\n \"train_short_none.txt\": ((\"w\", \"pos\"), True, \" \"),\n \"train_short_per_sequence.txt\": (\"per_sequence\", True, \" \"),\n }\n for file_name in files_info:\n parser = DataFileParser()\n print(file_name)\n file_path = os.path.join(root_dir, \"tests\", \"dataset\", \"conll00\", file_name)\n for seq in parser.read_file(\n file_path,\n header=files_info[file_name][0],\n y_ref=files_info[file_name][1],\n column_sep=files_info[file_name][2],\n ):\n print(seq)\n\n\nif __name__ == \"__main__\":\n pass\n # get_conll00()\n",
"\"\"\"\n@author: ahmed allam <[email protected]>\n\n\"\"\"\n\nimport os\nfrom copy import deepcopy\nfrom collections import OrderedDict\nimport numpy\nfrom .utilities import ReaderWriter, create_directory, vectorized_logsumexp\n\n\nclass LCRFModelRepresentation(object):\n \"\"\"Model representation that will hold data structures to be used in :class:`LCRF` class\n \n Attributes:\n modelfeatures: set of features defining the model\n modelfeatures_codebook: dictionary mapping each features in :attr:`modelfeatures` to a unique code\n Y_codebook: dictionary mapping the set of states (i.e. tags) to a unique code each\n L: length of longest segment\n Z_codebook: dictionary for the set Z, mapping each element to unique number/code \n Z_len: dictionary comprising the length of each element in :attr:`Z_codebook`\n Z_elems: dictionary comprising the composing elements of each member in the Z set (:attr:`Z_codebook`)\n Z_numchar: dictionary comprising the number of characters of each member in the Z set (:attr:`Z_codebook`)\n patts_len: set of lengths extracted from :attr:`Z_len` (i.e. set(Z_len.values()))\n max_patts_len: maximum pattern length used in the model\n modelfeatures_inverted: inverted model features (i.e inverting the :attr:`modelfeatures` dictionary)\n ypatt_features: state features (i.e. y pattern features) that depend only on the states\n ypatt_activestates: possible/potential activated y patterns/features using the observation features\n num_features: total number of features in the model\n num_states: total number of states in the model\n \"\"\"\n\n def __init__(self):\n self.modelfeatures = None\n self.modelfeatures_codebook = None\n self.Y_codebook = None\n self.L = None\n self.Z_codebook = None\n self.Z_len = None\n self.Z_elems = None\n self.Z_numchar = None\n self.patts_len = None\n self.max_patt_len = None\n self.modelfeatures_inverted = None\n self.ypatt_features = None\n self.ypatt_activestates = None\n self.num_features = None\n self.num_states = None\n\n def setup_model(self, modelfeatures, states, L):\n \"\"\"setup and create the model representation\n \n Creates all maps and codebooks needed by the :class:`LCRF` class\n \n Args:\n modelfeatures: set of features defining the model\n states: set of states (i.e. tags)\n L: length of longest segment\n \"\"\"\n self.modelfeatures = modelfeatures\n self.modelfeatures_codebook = self.get_modelfeatures_codebook()\n self.Y_codebook = self.get_modelstates_codebook(states)\n self.L = L\n self.generate_instance_properties()\n\n def generate_instance_properties(self):\n \"\"\"generate instance properties that will be later used by :class:`LCRF` class\n \"\"\"\n self.Z_codebook = self.get_Z_pattern()\n self.Z_len, self.Z_elems, self.Z_numchar = self.get_Z_info()\n self.patts_len = set(self.Z_len.values())\n self.max_patt_len = max(self.patts_len)\n\n self.modelfeatures_inverted, self.ypatt_features = (\n self.get_inverted_modelfeatures()\n )\n self.ypatt_activestates = self.find_activated_states(\n self.ypatt_features, self.patts_len\n )\n\n self.num_features = self.get_num_features()\n self.num_states = self.get_num_states()\n\n def get_modelfeatures_codebook(self):\n r\"\"\"setup model features codebook\n \n it flatten :attr:`modelfeatures` and map each element to a unique code\n :attr:`modelfeatures` are represented in a dictionary with this form::\n \n {y_patt_1:{featureA:value, featureB:value, ...}\n y_patt_2:{featureA:value, featureC:value, ...}}\n \n Example::\n \n modelfeatures:\n {'B-PP': Counter({'w[0]=at': 1,\n 'w[0]=by': 1,\n 'w[0]=for': 4,\n ...\n }),\n 'B-PP|B-NP': Counter({'w[0]=16': 1,\n 'w[0]=July': 1,\n 'w[0]=Nomura': 1,\n ...\n }),\n ...\n }\n modelfeatures_codebook:\n {('B-PP','w[0]=at'): 1,\n ('B-PP','w[0]=by'): 2,\n ('B-PP','w[0]=for'): 3,\n ...\n }\n \n \"\"\"\n modelfeatures = self.modelfeatures\n codebook = {}\n code = 0\n for y_patt, featuresum in modelfeatures.items():\n for feature in featuresum:\n # fkey = y_patt + \"&&\" + feature\n codebook[(y_patt, feature)] = code\n code += 1\n return codebook\n\n def get_modelstates_codebook(self, states):\n \"\"\"create states codebook by mapping each state to a unique code/number\n \n Args:\n states: set of tags identified in training sequences\n \n Example::\n \n states = {'B-PP', 'B-NP', ...}\n states_codebook = {'B-PP':1, 'B-NP':2 ...}\n \"\"\"\n return {s: i for (i, s) in enumerate(states)}\n\n def get_Z_pattern(self):\n \"\"\"create a codebook from set Z by mapping each element to unique number/code\n \n Z is set of y patterns used in the model features\n \n Example::\n \n Z = {'O|B-VP|B-NP', 'O|B-VP', 'O', 'B-VP', 'B-NP', ...}\n Z_codebook = {'O|B-VP|B-NP':1, 'O|B-VP':2, 'O':3, 'B-VP':5, 'B-NP':4, ...}\n \"\"\"\n modelfeatures = self.modelfeatures\n Z_codebook = {y_patt: index for index, y_patt in enumerate(modelfeatures)}\n return Z_codebook\n\n def get_Z_info(self):\n \"\"\"get the properties of Z set\n \"\"\"\n Z_codebook = self.Z_codebook\n Z_len = {}\n Z_elems = {}\n Z_numchar = {}\n for z in Z_codebook:\n elems = z.split(\"|\")\n Z_len[z] = len(elems)\n Z_elems[z] = elems\n Z_numchar[z] = len(z)\n return (Z_len, Z_elems, Z_numchar)\n\n def get_inverted_modelfeatures(self):\n r\"\"\"invert :attr:`modelfeatures` instance variable\n \n Example::\n \n modelfeatures_inverted = \n {'w[0]=take': {1: {'I-VP'}, 2: {'I-VP|I-VP'}, 3: {'I-VP|I-VP|I-VP'}},\n 'w[0]=the': {1: {'B-NP'},\n 2: {'B-PP|B-NP', 'I-VP|B-NP'},\n 3: {'B-NP|B-PP|B-NP', B-VP|I-VP|B-NP', ...}\n },\n ...\n }\n \n ypatt_features = {'B-NP', 'B-PP|B-NP', ..}\n \"\"\"\n modelfeatures = self.modelfeatures\n Z_len = self.Z_len\n inverted_features = {}\n ypatt_features = set()\n\n for y_patt, featuredict in modelfeatures.items():\n z_len = Z_len[y_patt]\n # get features that are based only on y_patts\n if y_patt in featuredict:\n ypatt_features.add(y_patt)\n for feature in featuredict:\n if feature in inverted_features:\n if z_len in inverted_features[feature]:\n inverted_features[feature][z_len].add(y_patt)\n else:\n s = set()\n s.add(y_patt)\n inverted_features[feature][z_len] = s\n else:\n s = set()\n s.add(y_patt)\n inverted_features[feature] = {z_len: s}\n return (inverted_features, ypatt_features)\n\n def keep_longest_elems(self, s):\n \"\"\"used to figure out longest suffix and prefix on sets \n \"\"\"\n longest_elems = {}\n for tup, l in s.items():\n longest_elems[tup] = max(l, key=len)\n return longest_elems\n\n def check_suffix(self, token, ref_str):\n # check if ref_str ends with the token\n # return(ref_str[len(ref_str)-len(token):] == token)\n return ref_str.endswith(token)\n\n def check_prefix(self, token, ref_str):\n # check if ref_str starts with a token\n # return(ref_str[:len(token)] == token)\n return ref_str.startswith(token)\n\n def get_num_features(self):\n \"\"\"return total number of features in the model\n \"\"\"\n return len(self.modelfeatures_codebook)\n\n def get_num_states(self):\n \"\"\"return total number of states identified by the model in the training set\n \"\"\"\n return len(self.Y_codebook)\n\n def represent_globalfeatures(self, seq_featuresum):\n \"\"\"represent features extracted from sequences using :attr:`modelfeatures_codebook`\n \n Args:\n seq_featuresum: dictionary of sequence global features representing F(X,Y)\n \"\"\"\n modelfeatures_codebook = self.modelfeatures_codebook\n windx_fval = {}\n for y_patt, seg_features in seq_featuresum.items():\n for featurename in seg_features:\n # fkey = y_patt + \"&&\" + featurename\n fkey = (y_patt, featurename)\n if fkey in modelfeatures_codebook:\n windx_fval[modelfeatures_codebook[fkey]] = seg_features[featurename]\n count = len(windx_fval)\n return (\n numpy.fromiter(windx_fval.keys(), numpy.uint32, count),\n numpy.fromiter(windx_fval.values(), numpy.float64, count),\n )\n\n def represent_activefeatures(self, activefeatures):\n windx_fval = {}\n for z_patt in activefeatures:\n count = len(activefeatures[z_patt])\n windx_fval[z_patt] = (\n numpy.fromiter(activefeatures[z_patt].keys(), numpy.uint32, count),\n numpy.fromiter(activefeatures[z_patt].values(), numpy.float64, count),\n )\n return windx_fval\n\n def accumulate_activefeatures(self, activefeatures, accumfeatures):\n for z_patt in activefeatures:\n if z_patt in accumfeatures:\n accumfeatures[z_patt].update(activefeatures[z_patt])\n else:\n accumfeatures[z_patt] = activefeatures[z_patt]\n\n def join_segfeatures_filteredstates(self, seg_features, filtered_states):\n \"\"\"represent detected active features while parsing sequences\n \n Args:\n activestates: dictionary of the form {'patt_len':{patt_1, patt_2, ...}}\n seg_features: dictionary of the observation features. It has the form \n {featureA_name:value, featureB_name:value, ...} \n \"\"\"\n modelfeatures_codebook = self.modelfeatures_codebook\n activefeatures = {}\n for z_len in filtered_states:\n z_patt_set = filtered_states[z_len]\n for z_patt in z_patt_set:\n windx_fval = {}\n for seg_featurename in seg_features:\n fkey = (z_patt, seg_featurename)\n # print(\"filtering ...\")\n # print(\"zpatt \", z_patt)\n # print(\"fkey \", fkey)\n if fkey in modelfeatures_codebook:\n windx_fval[modelfeatures_codebook[fkey]] = seg_features[\n seg_featurename\n ]\n if windx_fval:\n activefeatures[z_patt] = windx_fval\n return activefeatures\n\n def represent_ypatt_filteredstates(self, filtered_states):\n \"\"\"represent detected active features while parsing sequences\n \n Args:\n activestates: dictionary of the form {'patt_len':{patt_1, patt_2, ...}}\n seg_features: dictionary of the observation features. It has the form \n {featureA_name:value, featureB_name:value, ...} \n \"\"\"\n modelfeatures = self.modelfeatures\n modelfeatures_codebook = self.modelfeatures_codebook\n activefeatures = {}\n for z_len in filtered_states:\n z_patt_set = filtered_states[z_len]\n for z_patt in z_patt_set:\n windx_fval = {}\n if z_patt in modelfeatures[z_patt]:\n fkey = (z_patt, z_patt)\n # print(\"filtering ...\")\n # print(\"zpatt \", z_patt)\n # print(\"fkey \", fkey)\n windx_fval[modelfeatures_codebook[fkey]] = 1\n if windx_fval:\n activefeatures[z_patt] = windx_fval\n return activefeatures\n\n def find_seg_activefeatures(self, seg_features, allowed_z_len):\n \"\"\"finds active features based on the observation/segment features\n \n Args:\n seg_features:\n allowed_z_len:\n \"\"\"\n modelfeatures_codebook = self.modelfeatures_codebook\n modelfeatures_inverted = self.modelfeatures_inverted\n activefeatures = {}\n # use segment features plus the activated states\n for seg_featurename in seg_features:\n if seg_featurename in modelfeatures_inverted:\n for z_len in allowed_z_len:\n if z_len in modelfeatures_inverted[seg_featurename]:\n for zpatt in modelfeatures_inverted[seg_featurename][z_len]:\n fkey = (zpatt, seg_featurename)\n # print(\"zpatt \", zpatt)\n # print(\"fkey \", fkey)\n if zpatt in activefeatures:\n activefeatures[zpatt][\n modelfeatures_codebook[fkey]\n ] = seg_features[seg_featurename]\n else:\n activefeatures[zpatt] = {\n modelfeatures_codebook[fkey]: seg_features[\n seg_featurename\n ]\n }\n return activefeatures\n\n def find_ypatt_activefeatures(self, allowed_z_len):\n \"\"\"finds the label and state transition features (if applicable -- in case it is modeled)\n \n Args:\n allowed_z_len:\n \"\"\"\n modelfeatures_codebook = self.modelfeatures_codebook\n ypatt_activestates = self.ypatt_activestates\n activefeatures = {}\n # check if ypattern features are modeled\n for z_len in allowed_z_len:\n if z_len in ypatt_activestates:\n for zpatt in ypatt_activestates[z_len]:\n fkey = (zpatt, zpatt)\n # print(\"zpatt \", zpatt)\n # print(\"fkey \", fkey)\n if zpatt in activefeatures:\n activefeatures[zpatt][modelfeatures_codebook[fkey]] = 1\n else:\n activefeatures[zpatt] = {modelfeatures_codebook[fkey]: 1}\n\n return activefeatures\n\n def find_activated_states(self, seg_features, allowed_z_len):\n \"\"\"identify possible activated y patterns/features using the observation features\n \n Args:\n seg_features: dictionary of the observation features. It has the form \n {featureA_name:value, featureB_name:value, ...} \n allowed_z_len: set of permissible order/length of y features\n {1,2,3} -> means up to third order y features are allowed\n \"\"\"\n modelfeatures_inverted = self.modelfeatures_inverted\n active_states = {}\n for feature in seg_features:\n if feature in modelfeatures_inverted:\n factivestates = modelfeatures_inverted[feature]\n for z_len in factivestates:\n if z_len in allowed_z_len:\n if z_len in active_states:\n active_states[z_len].update(factivestates[z_len])\n else:\n active_states[z_len] = set(factivestates[z_len])\n # print(\"active_states from func \", active_states)\n return active_states\n\n def filter_activated_states(self, activated_states, accum_active_states, boundary):\n \"\"\"filter/prune states and y features \n \n Args:\n activaed_states: dictionary containing possible active states/y features\n it has the form {patt_len:{patt_1, patt_2, ...}}\n accum_active_states: dictionary of only possible active states by position\n it has the form {pos_1:{state_1, state_2, ...}}\n boundary: tuple (u,v) representing the current boundary in the sequence\n \"\"\"\n\n Z_elems = self.Z_elems\n filtered_activestates = {}\n __, pos = boundary\n\n for z_len in activated_states:\n if z_len == 1:\n continue\n start_pos = pos - z_len + 1\n if (start_pos, start_pos) in accum_active_states:\n filtered_activestates[z_len] = set()\n for z_patt in activated_states[z_len]:\n check = True\n zelems = Z_elems[z_patt]\n for i in range(z_len):\n pos_bound = (start_pos + i, start_pos + i)\n if pos_bound not in accum_active_states:\n check = False\n break\n if zelems[i] not in accum_active_states[pos_bound]:\n check = False\n break\n if check:\n filtered_activestates[z_len].add(z_patt)\n return filtered_activestates\n\n def save(self, folder_dir):\n \"\"\"save main model data structures\n \"\"\"\n model_info = {\n \"MR_modelfeatures\": self.modelfeatures,\n \"MR_modelfeaturescodebook\": self.modelfeatures_codebook,\n \"MR_Ycodebook\": self.Y_codebook,\n \"MR_L\": self.L,\n }\n for name in model_info:\n ReaderWriter.dump_data(model_info[name], os.path.join(folder_dir, name))\n\n\nclass LCRF(object):\n \"\"\"linear chain CRF model \n \n Args:\n model: an instance of :class:`LCRFModelRepresentation` class\n seqs_representer: an instance of :class:`SeqsRepresenter` class\n seqs_info: dictionary holding sequences info\n \n Keyword Args:\n load_info_fromdisk: integer from 0 to 5 specifying number of cached data \n to be kept in memory. 0 means keep everything while\n 5 means load everything from disk\n \n Attributes:\n model: an instance of :class:`LCRFModelRepresentation` class\n weights: a numpy vector representing feature weights\n seqs_representer: an instance of :class:`SeqsRepresenter` class\n seqs_info: dictionary holding sequences info\n beam_size: determines the size of the beam for state pruning\n fun_dict: a function map\n def_cached_entities: a list of the names of cached entities sorted (descending)\n based on estimated space required in memory \n \n \"\"\"\n\n def __init__(self, model, seqs_representer, seqs_info, load_info_fromdisk=5):\n\n self.model = model\n self.weights = numpy.zeros(model.num_features, dtype=\"longdouble\")\n self.seqs_representer = seqs_representer\n self.seqs_info = seqs_info\n self.func_dict = {\n \"alpha\": self._load_alpha,\n \"beta\": self._load_beta,\n \"activated_states\": self.load_activatedstates,\n \"seg_features\": self.load_segfeatures,\n \"globalfeatures\": self.load_globalfeatures,\n \"globalfeatures_per_boundary\": self.load_globalfeatures,\n \"activefeatures\": self.load_activefeatures,\n \"Y\": self._load_Y,\n }\n\n self.def_cached_entities = self.cached_entitites(load_info_fromdisk)\n # default beam size covers all available states\n self.beam_size = len(self.model.Y_codebook)\n\n def cached_entitites(self, load_info_fromdisk):\n \"\"\"construct list of names of cached entities in memory\n \"\"\"\n ondisk_info = [\n \"activefeatures\",\n \"seg_features\",\n \"activated_states\",\n \"globalfeatures_per_boundary\",\n \"globalfeatures\",\n \"Y\",\n ]\n def_cached_entities = ondisk_info[:load_info_fromdisk]\n return def_cached_entities\n\n @property\n def seqs_info(self):\n return self._seqs_info\n\n @seqs_info.setter\n def seqs_info(self, info_dict):\n # make a copy of the passed seqs_info dictionary\n self._seqs_info = deepcopy(info_dict)\n\n def identify_activefeatures(\n self, seq_id, boundary, accum_activestates, apply_filter=True\n ):\n \"\"\"determine model active features for a given sequence at defined boundary\n \n Main task:\n - determine model active features in a given boundary\n - update the accum_activestates dictionary \n \n Args:\n seq_id: integer representing unique id assigned to the sequence\n boundary: tuple (u,v) defining the boundary under consideration\n accum_activestates: dictionary of the form {(u,v):{state_1, state_2, ...}}\n it keeps track of the active states in each boundary\n \"\"\"\n\n model = self.model\n max_patt_len = model.max_patt_len\n patts_len = model.patts_len\n ypatt_features = model.ypatt_features\n # default length of a state/tag\n state_len = 1\n seg_features = self.seqs_info[seq_id][\"seg_features\"][boundary]\n\n start_state_flag = False\n if (\n \"__START__\" in model.Y_codebook\n ): # first order model is used with max_patt_len = 2\n start_state_flag = True\n apply_filter = True\n\n u, __ = boundary\n if u == 1 and start_state_flag:\n accum_activestates[0, 0] = {\"__START__\"}\n # print(\"boundary \", boundary)\n # print('seg_features ', seg_features)\n # print(\"accum_activestates \", accum_activestates)\n if u < max_patt_len:\n # case when we use first-order CRF model -- max_patt_len = 2\n if start_state_flag:\n max_len = max_patt_len\n else:\n max_len = u\n else:\n max_len = max_patt_len\n # determine allowed z patterns length (i.e. pattern order)\n allowed_z_len = {z_len for z_len in patts_len if z_len <= max_len}\n\n # print(\"apply filter \", apply_filter)\n if not apply_filter: # case of no filtering\n seg_activefeatures = model.find_seg_activefeatures(\n seg_features, allowed_z_len\n )\n ypatt_activefeatures = model.find_ypatt_activefeatures(allowed_z_len)\n # combine both\n accumfeatures = seg_activefeatures\n model.accumulate_activefeatures(ypatt_activefeatures, accumfeatures)\n\n else: # case of filtering\n seg_activefeatures = model.find_seg_activefeatures(\n seg_features, {state_len}\n )\n ypatt_activefeatures = model.find_ypatt_activefeatures({state_len})\n # determine activate states with order 0 (i.e. length =1)\n zero_order_activatedstates = set(seg_activefeatures.keys())\n zero_order_activatedstates.update(set(ypatt_activefeatures.keys()))\n accum_activestates[boundary] = zero_order_activatedstates\n # remove states with zero order (i.e. length = 1)\n allowed_z_len.remove(state_len)\n seg_activated_states = model.find_activated_states(\n seg_features, allowed_z_len\n )\n seg_filtered_states = model.filter_activated_states(\n seg_activated_states, accum_activestates, boundary\n )\n seg_activefeatures_addendum = model.join_segfeatures_filteredstates(\n seg_features, seg_filtered_states\n )\n\n ypatt_activated_states = model.find_activated_states(\n ypatt_features, allowed_z_len\n )\n ypatt_filtered_states = model.filter_activated_states(\n ypatt_activated_states, accum_activestates, boundary\n )\n ypatt_activefeatures_addendum = model.represent_ypatt_filteredstates(\n ypatt_filtered_states\n )\n\n # join all the active features\n accumfeatures = seg_activefeatures\n model.accumulate_activefeatures(ypatt_activefeatures, accumfeatures)\n model.accumulate_activefeatures(seg_activefeatures_addendum, accumfeatures)\n model.accumulate_activefeatures(\n ypatt_activefeatures_addendum, accumfeatures\n )\n\n activefeatures = model.represent_activefeatures(accumfeatures)\n\n return activefeatures\n\n def generate_activefeatures(self, seq_id):\n \"\"\"construct a dictionary of model active features identified given a sequence\n \n Main task:\n - generate active features for every boundary of the sequence \n\n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n # to be used when using gradient-based methods for learning\n T = self.seqs_info[seq_id][\"T\"]\n L = self.model.L\n accum_activestates = {}\n activefeatures_perboundary = {}\n ypatt_activestates = self.model.ypatt_activestates\n # zero-order state/tag has state_len = 1 (i.e. using only one state)\n state_len = 1\n apply_filter = True\n # check if we are modeling label bias terms or having categorical features\n if state_len in ypatt_activestates or self.seqs_representer.attr_scaler:\n apply_filter = False\n\n for j in range(1, T + 1):\n for d in range(L):\n u = j - d\n if u <= 0:\n break\n v = j\n boundary = (u, v)\n # identify active features\n active_features = self.identify_activefeatures(\n seq_id, boundary, accum_activestates, apply_filter=apply_filter\n )\n activefeatures_perboundary[boundary] = active_features\n return activefeatures_perboundary\n\n def compute_forward_vec(self, w, seq_id):\n \"\"\"compute the forward matrix (alpha matrix)\n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \n .. warning::\n \n implementation of this method is in the child class\n \"\"\"\n # to be implemented in the child class\n pass\n\n def compute_backward_vec(self, w, seq_id):\n \"\"\"compute the backward matrix (beta matrix)\n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \n .. warning::\n \n implementation of this method is in the child class\n \"\"\"\n # to be implemented in the child class\n pass\n\n def compute_marginals(self, seq_id):\n \"\"\"compute the marginal (i.e. probability of each y pattern at each position)\n \n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n .. warning::\n \n implementation of this method is in the child class\n \"\"\"\n # to be implemented in the child class\n pass\n\n def compute_feature_expectation(self, seq_id, P_marginals):\n \"\"\"compute the features expectations (i.e. expected count of the feature based on learned model)\n \n Args:\n seq_id: integer representing unique id assigned to the sequence\n P_marginals: probability matrix for y patterns at each position in time\n \n .. warning::\n \n implementation of this method is in the child class\n \"\"\"\n # to be implemented in the child class\n pass\n\n def compute_seq_loglikelihood(self, w, seq_id):\n \"\"\"computes the conditional log-likelihood of a sequence (i.e. :math:`p(Y|X;w)`) \n \n it is used as a cost function for the single sequence when trying to estimate parameters w\n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n # print(\"-\"*40)\n # print(\"... Evaluating compute_seq_loglikelihood() ...\")\n\n # we need global features and alpha matrix to be ready -- order is important\n l = OrderedDict()\n l[\"globalfeatures\"] = (seq_id, False)\n l[\"activefeatures\"] = (seq_id,)\n l[\"alpha\"] = (w, seq_id)\n\n self.check_cached_info(seq_id, l)\n # get the p(X;w) -- probability of the sequence under parameter w\n Z = self.seqs_info[seq_id][\"Z\"]\n w_indx, f_val = self.seqs_info[seq_id][\"globalfeatures\"]\n # log(p(Y|X;w))\n loglikelihood = numpy.dot(w[w_indx], f_val) - Z\n self.seqs_info[seq_id][\"loglikelihood\"] = loglikelihood\n\n return loglikelihood\n\n def compute_seq_gradient(self, w, seq_id, grad):\n r\"\"\"compute the gradient of conditional log-likelihood with respect to the parameters vector w (:math:`\\frac{\\partial p(Y|X;w)}{\\partial w}`)\n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \"\"\"\n # print(\"-\"*40)\n # print(\"... Evaluating compute_seq_gradient() ...\")\n\n # we need alpha, beta, global features and active features to be ready\n l = OrderedDict()\n l[\"globalfeatures\"] = (seq_id, False)\n l[\"activefeatures\"] = (seq_id,)\n l[\"alpha\"] = (w, seq_id)\n l[\"beta\"] = (w, seq_id)\n self.check_cached_info(seq_id, l)\n # compute marginal probability of y patterns at every position\n P_marginal = self.compute_marginals(seq_id)\n # compute features expectation\n self.compute_feature_expectation(seq_id, P_marginal, grad)\n target_indx = numpy.where(grad != 0)[0]\n # get global features count of the reference sequence\n gwindx, gfval = self.seqs_info[seq_id][\"globalfeatures\"]\n grad[target_indx] *= -1\n grad[gwindx] += gfval\n # update target_indx\n target_indx = numpy.unique(numpy.concatenate((target_indx, gwindx)))\n # target_indx = numpy.where(grad!=0)[0]\n return target_indx\n\n def compute_seqs_loglikelihood(self, w, seqs_id):\n \"\"\"computes the conditional log-likelihood of training sequences \n \n it is used as a cost/objective function for the whole training sequences when trying to estimate parameters w\n \n Args:\n w: weight vector (numpy vector)\n seqs_id: list of integer representing unique ids of sequences used for training\n \n \"\"\"\n seqs_loglikelihood = 0\n for seq_id in seqs_id:\n seqs_loglikelihood += self.compute_seq_loglikelihood(w, seq_id)\n return seqs_loglikelihood\n\n def compute_seqs_gradient(self, w, seqs_id):\n \"\"\"compute the gradient of conditional log-likelihood with respect to the parameters vector w\n \n Args:\n w: weight vector (numpy vector)\n seqs_id: list of integer representing unique ids of sequences used for training\n \n \"\"\"\n seqs_grad = numpy.zeros(len(w))\n seq_grad = numpy.zeros(len(w))\n for seq_id in seqs_id:\n target_indx = self.compute_seq_gradient(w, seq_id, seq_grad)\n seqs_grad[target_indx] += seq_grad[target_indx]\n seq_grad.fill(0)\n return seqs_grad\n\n def _load_alpha(self, w, seq_id):\n \"\"\"compute and load the alpha matrix in :attr:`seqs_info`\n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \n .. note::\n \n - seg_features (per boundary) dictionary should be available in :attr:`seqs.info`\n - activated_states (per boundary) dictionary should be available in :attr:`seqs.info`\n\n \"\"\"\n seq_info = self.seqs_info[seq_id]\n seq_info[\"alpha\"] = self.compute_forward_vec(w, seq_id)\n seq_info[\"Z\"] = vectorized_logsumexp(seq_info[\"alpha\"][-1, :])\n # print(\"... Computing alpha probability ...\")\n\n def _load_beta(self, w, seq_id):\n \"\"\"compute and load the beta matrix in :attr:`seqs_info`\n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \n .. note:: \n \n - fpotential per boundary dictionary should be available in :attr:`seqs.info`\n \"\"\"\n seq_info = self.seqs_info[seq_id]\n seq_info[\"beta\"] = self.compute_backward_vec(w, seq_id)\n # print(\"... Computing beta probability ...\")\n\n def _load_Y(self, seq_id):\n \"\"\"load the Y sequence and the boundaries in :attr:`seqs_info`\n \n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n seq = self._load_seq(seq_id, target=\"seq\")\n self.seqs_info[seq_id][\"Y\"] = {\n \"flat_y\": seq.flat_y,\n \"boundaries\": seq.y_sboundaries,\n }\n # print(\"... loading Y ...\")\n\n def load_activatedstates(self, seq_id):\n \"\"\"load sequence activated states in :attr:`seqs_info`\n\n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n seqs_info = self.seqs_info\n seqs_representer = self.seqs_representer\n activated_states = seqs_representer.get_seq_activatedstates(seq_id, seqs_info)\n seqs_info[seq_id][\"activated_states\"] = activated_states\n # print(\"... loading activated states ...\")\n\n def load_segfeatures(self, seq_id):\n \"\"\"load sequence observation features in :attr:`seqs_info`\n \n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n seqs_info = self.seqs_info\n seqs_representer = self.seqs_representer\n seg_features = seqs_representer.get_seq_segfeatures(seq_id, seqs_info)\n self.seqs_info[seq_id][\"seg_features\"] = seg_features\n # print(\"... loading segment features ...\")\n\n def load_activefeatures(self, seq_id):\n \"\"\"load sequence model identified active features in :attr:`seqs_info`\n\n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n seqs_representer = self.seqs_representer\n activefeatures = seqs_representer.get_seq_activefeatures(seq_id, self.seqs_info)\n if not activefeatures:\n # check if activated_states and seg_features are loaded\n l = {}\n # l['activated_states'] = (seq_id, )\n l[\"seg_features\"] = (seq_id,)\n self.check_cached_info(seq_id, l)\n activefeatures = self.generate_activefeatures(seq_id)\n seq_dir = self.seqs_info[seq_id][\"activefeatures_dir\"]\n ReaderWriter.dump_data(\n activefeatures, os.path.join(seq_dir, \"activefeatures\")\n )\n self.seqs_info[seq_id][\"activefeatures\"] = activefeatures\n\n def load_globalfeatures(self, seq_id, per_boundary=True):\n \"\"\"load sequence global features in :attr:`seqs_info`\n \n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n Keyword Args:\n per_boundary: boolean representing if the required global features dictionary \n is represented by boundary (i.e. True) or aggregated (i.e. False)\n \n \"\"\"\n seqs_representer = self.seqs_representer\n gfeatures, exception_fired = seqs_representer.get_seq_globalfeatures(\n seq_id, self.seqs_info, per_boundary=per_boundary\n )\n # print(\"per_boundary \", per_boundary)\n # print(gfeatures_perboundary)\n if per_boundary:\n fname = \"globalfeatures_per_boundary\"\n else:\n fname = \"globalfeatures\"\n if exception_fired:\n gfeatures = self.model.represent_globalfeatures(gfeatures)\n seq_dir = self.seqs_info[seq_id][\"globalfeatures_dir\"]\n ReaderWriter.dump_data(\n gfeatures, os.path.join(seq_dir, \"globalfeatures_repr\")\n )\n self.seqs_info[seq_id][fname] = gfeatures\n\n # print(self.seqs_info[seq_id][fname])\n # print(\"loading globalfeatures\")\n\n def load_imposter_globalfeatures(self, seq_id, y_imposter, seg_other_symbol):\n \"\"\"load imposter sequence global features in :attr:`seqs_info`\n\n Args:\n seq_id: integer representing unique id assigned to the sequence\n y_imposter: the imposter sequence generated using viterbi decoder\n seg_other_sybmol: If it is specified, then the task is a segmentation problem \n (in this case we need to specify the non-entity/other element)\n else if it is None (default), then it is considered as sequence labeling problem\n\n \"\"\"\n seqs_representer = self.seqs_representer\n imposter_gfeatures_perboundary, y_imposter_boundaries = seqs_representer.get_imposterseq_globalfeatures(\n seq_id, self.seqs_info, y_imposter, seg_other_symbol\n )\n return (imposter_gfeatures_perboundary, y_imposter_boundaries)\n\n def represent_globalfeature(self, gfeatures, boundaries):\n \"\"\"represent extracted sequence global features \n \n two representation could be applied:\n - (1) features identified by boundary (i.e. f(X,Y))\n - (2) features identified and aggregated across all positions in the sequence (i.e. F(X, Y))\n \n\n Args:\n gfeatures: dictionary representing the extracted sequence features (i.e F(X, Y))\n boundaries: if specified (i.e. list of boundaries), then the required representation\n is global features per boundary (i.e. option (1))\n else (i.e. None or empty list), then the required representation is the\n aggregated global features (option(2))\n \"\"\"\n seqs_representer = self.seqs_representer\n windx_fval = seqs_representer.represent_gfeatures(\n gfeatures, self.model, boundaries=boundaries\n )\n return windx_fval\n\n def _load_seq(self, seq_id, target=\"seq\"):\n \"\"\"load/return components of the sequence which is an instance of :class:`SequenceStruct`\n \n Args:\n seq_id: integer representing unique id assigned to the sequence\n \n Keyword Args:\n target: string from {'seq', 'Y', 'X'}\n \n \"\"\"\n seqs_representer = self.seqs_representer\n seq = seqs_representer.load_seq(seq_id, self.seqs_info)\n if target == \"seq\":\n return seq\n elif target == \"Y\":\n return seq.Y\n elif target == \"X\":\n return seq.X\n\n def check_cached_info(self, seq_id, entity_names):\n \"\"\"check and load required data elements/entities for every computation step\n\n Args:\n seq_id: integer representing unique id assigned to the sequence\n entity_name: list of names of the data elements need to be loaded in :attr:`seqs.info` dictionary\n needed while performing computation\n \n .. note::\n \n order of elements in the entity_names list is **important**\n \n \"\"\"\n seq_info = self.seqs_info[seq_id]\n func_dict = self.func_dict\n none_type = type(None)\n for varname, args in entity_names.items():\n if type(seq_info.get(varname)) == none_type:\n func_dict[varname](*args)\n\n def clear_cached_info(self, seqs_id, cached_entities=[]):\n \"\"\"clear/clean loaded data elements/entities in :attr:`seqs.info` dictionary\n\n Args:\n seqs_id: list of integers representing the unique ids of the training sequences\n\n Keyword Args:\n cached_entities: list of data entities to be cleared for the :attr:`seqs.info` dictionary\n \n .. note::\n \n order of elements in the entity_names list is **important**\n \n \"\"\"\n args = self.def_cached_entities + cached_entities\n for seq_id in seqs_id:\n seq_info = self.seqs_info[seq_id]\n for varname in args:\n if varname in seq_info:\n seq_info[varname] = None\n\n def save_model(self, folder_dir):\n \"\"\"save model data structures\n \n Args:\n folder_dir: string representing directory where files are pickled/dumped\n \"\"\"\n # to clean things before pickling the model\n # print(self.seqs_info)\n self.seqs_info.clear()\n self.seqs_representer.save(folder_dir)\n self.model.save(folder_dir)\n # save weights\n ReaderWriter.dump_data(self.weights, os.path.join(folder_dir, \"weights\"))\n # write classes used into a file\n class_desc = []\n class_desc.append(str(self.model.__class__).split(\".\")[-1].split(\"'\")[0])\n class_desc.append(str(self.__class__).split(\".\")[-1].split(\"'\")[0])\n class_desc.append(\n str(self.seqs_representer.__class__).split(\".\")[-1].split(\"'\")[0]\n )\n class_desc.append(\n str(self.seqs_representer.feature_extractor.__class__)\n .split(\".\")[-1]\n .split(\"'\")[0]\n )\n class_desc.append(\n str(self.seqs_representer.attr_extractor.__class__)\n .split(\".\")[-1]\n .split(\"'\")[0]\n )\n if self.seqs_representer.attr_scaler:\n class_desc.append(\n str(self.seqs_representer.attr_scaler.__class__)\n .split(\".\")[-1]\n .split(\"'\")[0]\n )\n else:\n class_desc.append(\"None\")\n with open(os.path.join(folder_dir, \"class_desc.txt\"), \"a\") as f:\n f.write(\"\\n\".join(class_desc))\n\n # print('seqs_info from LCRF ', self.seqs_info)\n\n def decode_seqs(self, decoding_method, out_dir, **kwargs):\n r\"\"\"decode sequences (i.e. infer labels of sequence of observations)\n \n Args:\n decoding_method: a string referring to type of decoding {viterbi, per_state_decoding}\n out_dir: string representing the working directory (path) where sequence processing will take place\n \n Keyword Arguments:\n file_name: the name of the file in case decoded sequences are required to be written\n sep: separator (default '\\t') between the columns when writing decoded sequences to file\n procseqs_foldername: string representing the folder name where intermediary data and parsing would take place\n beam_size: integer determining the size of the beam while decoding\n seqs: a list comprising of sequences that are instances of :class:`SequenceStruct` class to be decoded\n (used for decoding test data or any new/unseen data -- sequences)\n seqs_info: dictionary containing the info about the sequences to decode \n (used for decoding training sequences)\n seqs_dict: a dictionary comprising of sequence ids as keys and corresponding sequences that are instances of :class:`SequenceStruct` class to be decoded\n as values\n .. note:: \n \n for keyword arguments only one of {``seqs`` , ``seqs_info``, ``seqs_dict``} option need to be specified\n \n \"\"\"\n\n w = self.weights\n\n if decoding_method == \"perstate_decoding\":\n decoder = self.perstate_posterior_decoding\n else:\n decoder = self.viterbi\n\n file_name = kwargs.get(\"file_name\")\n if file_name:\n # file to write the sequences with their predicted labels\n corpus_fname = \"decoding_seqs\"\n out_file = os.path.join(create_directory(corpus_fname, out_dir), file_name)\n if kwargs.get(\"sep\"):\n sep = kwargs[\"sep\"]\n else:\n # default separator is tab\n sep = \"\\t\"\n\n beam_size = kwargs.get(\"beam_size\")\n if not beam_size:\n beam_size = self.beam_size\n\n unique_id = False\n procseqs_foldername = kwargs.get(\"procseqs_foldername\")\n if not procseqs_foldername:\n unique_id = True\n procseqs_foldername = \"processed_seqs\"\n\n if kwargs.get(\"seqs_info\"):\n self.seqs_info = kwargs[\"seqs_info\"]\n N = len(self.seqs_info)\n else:\n if kwargs.get(\"seqs\"):\n seqs = kwargs[\"seqs\"]\n seqs_dict = {i + 1: seqs[i] for i in range(len(seqs))}\n elif kwargs.get(\"seqs_dict\"):\n seqs_dict = kwargs[\"seqs_dict\"]\n else:\n raise (\n \"You need to specify one of the following keyword arguments {``seqs`` , ``seqs_info``, ``seqs_dict``} \"\n )\n seqs_id = list(seqs_dict.keys())\n N = len(seqs_id)\n seqs_info = self.seqs_representer.prepare_seqs(\n seqs_dict, procseqs_foldername, out_dir, unique_id=unique_id\n )\n self.seqs_representer.scale_attributes(seqs_id, seqs_info)\n self.seqs_representer.extract_seqs_modelactivefeatures(\n seqs_id, seqs_info, self.model, \"processed_seqs\", learning=False\n )\n self.seqs_info = seqs_info\n\n seqs_pred = {}\n seqs_info = self.seqs_info\n counter = 0\n for seq_id in seqs_info:\n Y_pred, __ = decoder(w, seq_id, beam_size)\n seq = ReaderWriter.read_data(\n os.path.join(seqs_info[seq_id][\"globalfeatures_dir\"], \"sequence\")\n )\n if file_name:\n self.write_decoded_seqs([seq], [Y_pred], out_file, sep)\n seqs_pred[seq_id] = {\"seq\": seq, \"Y_pred\": Y_pred}\n # clear added info per sequence\n self.clear_cached_info([seq_id])\n counter += 1\n print(\"sequence decoded -- {} sequences are left\".format(N - counter))\n\n # clear seqs_info\n self.seqs_info.clear()\n return seqs_pred\n\n def write_decoded_seqs(self, ref_seqs, Y_pred_seqs, out_file, sep=\"\\t\"):\n \"\"\"write inferred sequences on file\n \n Args:\n ref_seqs: list of sequences that are instances of :class:`SequenceStruct`\n Y_pred_seqs: list of list of tags decoded for every reference sequence\n out_file: string representing out file where data is written\n sep: separator used while writing on out file\n \"\"\"\n for i in range(len(ref_seqs)):\n Y_pred_seq = Y_pred_seqs[i]\n ref_seq = ref_seqs[i]\n T = ref_seq.T\n line = \"\"\n for t in range(1, T + 1):\n for field_name in ref_seq.X[t]:\n line += ref_seq.X[t][field_name] + sep\n if ref_seq.flat_y:\n line += ref_seq.flat_y[t - 1] + sep\n line += Y_pred_seq[t - 1]\n line += \"\\n\"\n line += \"\\n\"\n ReaderWriter.log_progress(line, out_file)\n\n def prune_states(self, j, delta, beam_size):\n \"\"\"prune states that fall off the specified beam size\n \n Args:\n j: current position (integer) in the sequence\n delta: score matrix \n beam_size: specified size of the beam (integer)\n \n .. warning::\n \n implementation of this method is in the child class\n \"\"\"\n pass\n\n def viterbi(self, w, seq_id, beam_size, stop_off_beam=False, y_ref=[], K=1):\n \"\"\"decode sequences using viterbi decoder \n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n beam_size: integer representing the size of the beam\n \n Keyword Arguments:\n stop_off_beam: boolean indicating if to stop when the reference state \\\n falls off the beam (used in perceptron/search based learning)\n y_ref: reference sequence list of labels (used while learning)\n K: integer indicating number of decoded sequences required (i.e. top-k list)\n \n .. warning::\n \n implementation of this method is in the child class\n \n \"\"\"\n pass\n\n def validate_forward_backward_pass(self, w, seq_id):\n \"\"\"check the validity of the forward backward pass \n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n self.clear_cached_info([seq_id])\n # this will compute alpha and beta matrices and save them in seqs_info dict\n l = OrderedDict()\n l[\"activefeatures\"] = (seq_id,)\n l[\"alpha\"] = (w, seq_id)\n l[\"beta\"] = (w, seq_id)\n self.check_cached_info(seq_id, l)\n\n alpha = self.seqs_info[seq_id][\"alpha\"]\n beta = self.seqs_info[seq_id][\"beta\"]\n\n Z_alpha = vectorized_logsumexp(alpha[-1, :])\n Z_beta = numpy.min(beta[1, :])\n raw_diff = numpy.abs(Z_alpha - Z_beta)\n\n print(\"alpha[-1,:] = {}\".format(alpha[-1, :]))\n print(\"beta[1,:] = {}\".format(beta[1, :]))\n print(\"Z_alpha : {}\".format(Z_alpha))\n print(\"Z_beta : {}\".format(Z_beta))\n print(\"Z_aplha - Z_beta {}\".format(raw_diff))\n\n rel_diff = raw_diff / (Z_alpha + Z_beta)\n print(\"rel_diff : {}\".format(rel_diff))\n self.clear_cached_info([seq_id])\n # print(\"seqs_info {}\".format(self.seqs_info))\n return (raw_diff, rel_diff)\n\n def check_gradient(self, w, seq_id):\n \"\"\"implementation of finite difference method similar to ``scipy.optimize.check_grad()``\n \n Args:\n w: weight vector (numpy vector)\n seq_id: integer representing unique id assigned to the sequence\n \n \"\"\"\n print(\"checking gradient...\")\n self.clear_cached_info([seq_id])\n epsilon = 1e-4\n w_dim = len(w)\n w = numpy.random.randn(w_dim)\n # basis vector\n ei = numpy.zeros(w_dim, dtype=\"longdouble\")\n grad = numpy.zeros(w_dim, dtype=\"longdouble\")\n for i in range(len(w)):\n ei[i] = epsilon\n l_wplus = self.compute_seq_loglikelihood(w + ei, seq_id)\n self.clear_cached_info([seq_id])\n l_wminus = self.compute_seq_loglikelihood(w - ei, seq_id)\n self.clear_cached_info([seq_id])\n grad[i] = (l_wplus - l_wminus) / (2 * epsilon)\n ei[i] = 0\n estimated_grad = self.compute_seqs_gradient(w, [seq_id])\n diff = numpy.abs(-grad + estimated_grad)\n avg_diff = numpy.mean(diff)\n print(\"difference between both gradients: \\n {}\".format(diff))\n print(\"average difference = {}\".format(avg_diff))\n # clear seq_id info\n self.clear_cached_info([seq_id])\n return avg_diff\n\n def validate_gradient(self, w, seq_id):\n print(\n \"checking gradient using approach mentioned in (Bottou, 2012) 'Stochastic Gradient Descent Tricks' paper...\"\n )\n self.clear_cached_info([seq_id])\n epsilons = [1e-6, 1e-8, 1e-10]\n rounds = 5\n res = {}\n # generate a random initial weight w\n for __ in range(rounds):\n for epsilon in epsilons:\n w0 = numpy.random.rand(len(w))\n l0 = self.compute_seq_loglikelihood(w0, seq_id)\n self.clear_cached_info([seq_id])\n g = self.compute_seqs_gradient(w0, [seq_id])\n self.clear_cached_info([seq_id])\n delta = -epsilon * g\n w_prime = w0 + delta\n l_prime = self.compute_seq_loglikelihood(w_prime, seq_id)\n # clear seq_id info\n self.clear_cached_info([seq_id])\n # verify that l_prime = l0 + epsilon*g\n diff = numpy.abs(l0 + numpy.dot(delta, g) - l_prime)\n if epsilon in res:\n res[epsilon].append(diff)\n else:\n res[epsilon] = [diff]\n diff_concat = []\n for eps, diff_array in res.items():\n print(\"epsilon = \", eps)\n print(\"difference across 5 random initializations of w \", diff_array)\n diff_concat += diff_array\n avg_diff = numpy.mean(numpy.asarray(diff_concat))\n print(\n \"Average gradient difference across all epsilons and initializations is \",\n avg_diff,\n )\n return avg_diff\n\n def validate_expected_featuresum(self, w, seqs_id):\n \"\"\"validate expected feature computation\n \n Args:\n w: weight vector (numpy vector)\n seqs_id: list of integers representing unique id assigned to the sequences\n \"\"\"\n self.clear_cached_info(seqs_id)\n grad = self.compute_seqs_gradient(w, seqs_id)\n abs_grad = numpy.abs(grad)\n avg_diff = numpy.mean(abs_grad)\n print(\n \"difference between empirical feature sum and model's expected feature sum: \\n {}\".format(\n avg_diff\n )\n )\n print(\"average difference is {}\".format(avg_diff))\n self.clear_cached_info(seqs_id)\n return avg_diff\n\n\nif __name__ == \"__main__\":\n pass\n"
] | [
[
"numpy.arange",
"numpy.max",
"numpy.append",
"numpy.ceil",
"numpy.exp"
],
[
"numpy.dot",
"numpy.abs",
"numpy.min",
"numpy.asarray",
"numpy.concatenate",
"numpy.random.randn",
"numpy.mean",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
r-peng/pyscf | [
"9a14f9bcc63bc75f5939cb4d00eb47861d8d8989"
] | [
"pyscf/cc/__init__.py"
] | [
"# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n'''\nCoupled Cluster\n===============\n\nSimple usage::\n\n >>> from pyscf import gto, scf, cc\n >>> mol = gto.M(atom='H 0 0 0; H 0 0 1')\n >>> mf = scf.RHF(mol).run()\n >>> cc.CCSD(mf).run()\n\n:func:`cc.CCSD` returns an instance of CCSD class. Followings are parameters\nto control CCSD calculation.\n\n verbose : int\n Print level. Default value equals to :class:`Mole.verbose`\n max_memory : float or int\n Allowed memory in MB. Default value equals to :class:`Mole.max_memory`\n conv_tol : float\n converge threshold. Default is 1e-7.\n conv_tol_normt : float\n converge threshold for norm(t1,t2). Default is 1e-5.\n max_cycle : int\n max number of iterations. Default is 50.\n diis_space : int\n DIIS space size. Default is 6.\n diis_start_cycle : int\n The step to start DIIS. Default is 0.\n direct : bool\n AO-direct CCSD. Default is False.\n async_io : bool\n Allow for asynchronous function execution. Default is True.\n incore_complete : bool\n Avoid all I/O. Default is False.\n frozen : int or list\n If integer is given, the inner-most orbitals are frozen from CC\n amplitudes. Given the orbital indices (0-based) in a list, both\n occupied and virtual orbitals can be frozen in CC calculation.\n\n\nSaved results\n\n converged : bool\n CCSD converged or not\n e_tot : float\n Total CCSD energy (HF + correlation)\n t1, t2 : \n t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt)\n l1, l2 : \n Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt)\n'''\n\nfrom pyscf.cc import ccsd\nfrom pyscf.cc import ccsd_lambda\nfrom pyscf.cc import ccsd_rdm\nfrom pyscf.cc import addons\nfrom pyscf.cc import rccsd\nfrom pyscf.cc import uccsd\nfrom pyscf.cc import gccsd\nfrom pyscf.cc import eom_rccsd\nfrom pyscf.cc import eom_uccsd\nfrom pyscf.cc import eom_gccsd\nfrom pyscf import scf\n\ndef CCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = ccsd.CCSD.__doc__\n if isinstance(mf, scf.uhf.UHF):\n return UCCSD(mf, frozen, mo_coeff, mo_occ)\n elif isinstance(mf, scf.ghf.GHF):\n return GCCSD(mf, frozen, mo_coeff, mo_occ)\n else:\n return RCCSD(mf, frozen, mo_coeff, mo_occ)\n\nscf.hf.SCF.CCSD = CCSD\n\n\ndef RCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = ccsd.CCSD.__doc__\n import numpy\n from pyscf import lib\n from pyscf.soscf import newton_ah\n from pyscf.cc import dfccsd\n\n if isinstance(mf, scf.uhf.UHF):\n raise RuntimeError('RCCSD cannot be used with UHF method.')\n elif isinstance(mf, scf.rohf.ROHF):\n lib.logger.warn(mf, 'RCCSD method does not support ROHF method. ROHF object '\n 'is converted to UHF object and UCCSD method is called.')\n mf = scf.addons.convert_to_uhf(mf)\n return UCCSD(mf, frozen, mo_coeff, mo_occ)\n\n if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.hf.RHF):\n mf = scf.addons.convert_to_rhf(mf)\n\n if getattr(mf, 'with_df', None):\n return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)\n\n elif numpy.iscomplexobj(mo_coeff) or numpy.iscomplexobj(mf.mo_coeff):\n return rccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)\n\n else:\n return ccsd.CCSD(mf, frozen, mo_coeff, mo_occ)\n\n\ndef UCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = uccsd.UCCSD.__doc__\n from pyscf.soscf import newton_ah\n\n if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.uhf.UHF):\n mf = scf.addons.convert_to_uhf(mf)\n\n if getattr(mf, 'with_df', None):\n raise NotImplementedError('DF-UCCSD')\n else:\n return uccsd.UCCSD(mf, frozen, mo_coeff, mo_occ)\n\n\ndef GCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = gccsd.GCCSD.__doc__\n from pyscf.soscf import newton_ah\n\n if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.ghf.GHF):\n mf = scf.addons.convert_to_ghf(mf)\n\n if getattr(mf, 'with_df', None):\n raise NotImplementedError('DF-GCCSD')\n else:\n return gccsd.GCCSD(mf, frozen, mo_coeff, mo_occ)\n\n\ndef FNOCCSD(mf, thresh=1e-6, pct_occ=None, nvir_act=None):\n \"\"\"Frozen natural orbital CCSD\n\n Attributes:\n thresh : float\n Threshold on NO occupation numbers. Default is 1e-6.\n pct_occ : float\n Percentage of total occupation number. Default is None. If present, overrides `thresh`.\n \"\"\"\n #from pyscf import mp\n #pt = mp.MP2(mf).set(verbose=0).run()\n from pyscf.mp.mp2 import MP2\n pt = MP2(mf).set(verbose=0).run()\n frozen, no_coeff = pt.make_fno(thresh=thresh, pct_occ=pct_occ, nvir_act=nvir_act)\n #pt_no = mp.MP2(mf, frozen=frozen, mo_coeff=no_coeff).set(verbose=0).run() #avoid DF\n pt_no = MP2(mf, frozen=frozen, mo_coeff=no_coeff).set(verbose=0).run()\n mycc = ccsd.CCSD(mf, frozen=frozen, mo_coeff=no_coeff) #avoid DF\n mycc.delta_emp2 = pt.e_corr - pt_no.e_corr\n from pyscf.lib import logger\n def _finalize(self):\n '''Hook for dumping results and clearing up the object.'''\n if self.converged:\n logger.info(self, 'FNO-%s converged', self.__class__.__name__)\n else:\n logger.note(self, 'FNO-%s not converged', self.__class__.__name__)\n logger.note(self, 'E(FNO-%s) = %.16g E_corr = %.16g',\n self.__class__.__name__, self.e_tot, self.e_corr)\n logger.note(self, 'E(FNO-%s+delta-MP2) = %.16g E_corr = %.16g',\n self.__class__.__name__, self.e_tot+self.delta_emp2, \n self.e_corr+self.delta_emp2)\n return self\n mycc._finalize = _finalize.__get__(mycc, mycc.__class__)\n return mycc\n"
] | [
[
"numpy.iscomplexobj"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
toobaz/statsmodels | [
"2d4aad9a14619ce0c84d4c7bca9dacd66b2be566",
"2d4aad9a14619ce0c84d4c7bca9dacd66b2be566",
"2d4aad9a14619ce0c84d4c7bca9dacd66b2be566",
"2d4aad9a14619ce0c84d4c7bca9dacd66b2be566"
] | [
"statsmodels/tsa/vector_ar/irf.py",
"statsmodels/sandbox/distributions/tests/_est_fit.py",
"statsmodels/nonparametric/bandwidths.py",
"statsmodels/tsa/vector_ar/dynamic.py"
] | [
"\"\"\"\nImpulse reponse-related code\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nimport numpy.linalg as la\nimport scipy.linalg as L\n\nfrom scipy import stats\n\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.tools.tools import chain_dot\n#from statsmodels.tsa.api import VAR\n\nimport statsmodels.tsa.tsatools as tsa\nimport statsmodels.tsa.vector_ar.plotting as plotting\nimport statsmodels.tsa.vector_ar.util as util\n\nmat = np.array\n\nclass BaseIRAnalysis(object):\n \"\"\"\n Base class for plotting and computing IRF-related statistics, want to be\n able to handle known and estimated processes\n \"\"\"\n\n def __init__(self, model, P=None, periods=10, order=None, svar=False):\n self.model = model\n self.periods = periods\n self.neqs, self.lags, self.T = model.neqs, model.k_ar, model.nobs\n\n self.order = order\n\n if P is None:\n sigma = model.sigma_u\n\n # TODO, may be difficult at the moment\n # if order is not None:\n # indexer = [model.get_eq_index(name) for name in order]\n # sigma = sigma[:, indexer][indexer, :]\n\n # if sigma.shape != model.sigma_u.shape:\n # raise ValueError('variable order is wrong length')\n\n P = la.cholesky(sigma)\n\n self.P = P\n\n self.svar = svar\n\n self.irfs = model.ma_rep(periods)\n if svar:\n self.svar_irfs = model.svar_ma_rep(periods, P=P)\n else:\n self.orth_irfs = model.orth_ma_rep(periods)\n\n self.cum_effects = self.irfs.cumsum(axis=0)\n if svar:\n self.svar_cum_effects = self.svar_irfs.cumsum(axis=0)\n else:\n self.orth_cum_effects = self.orth_irfs.cumsum(axis=0)\n\n self.lr_effects = model.long_run_effects()\n if svar:\n self.svar_lr_effects = np.dot(model.long_run_effects(), P)\n else:\n self.orth_lr_effects = np.dot(model.long_run_effects(), P)\n\n\n # auxiliary stuff\n self._A = util.comp_matrix(model.coefs)\n\n def cov(self, *args, **kwargs):\n raise NotImplementedError\n\n def cum_effect_cov(self, *args, **kwargs):\n raise NotImplementedError\n\n def plot(self, orth=False, impulse=None, response=None,\n signif=0.05, plot_params=None, subplot_params=None,\n plot_stderr=True, stderr_type='asym', repl=1000,\n seed=None, component=None):\n \"\"\"\n Plot impulse responses\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n impulse : string or int\n variable providing the impulse\n response : string or int\n variable affected by the impulse\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n subplot_params : dict\n To pass to subplot plotting funcions. Example: if fonts are too big,\n pass {'fontsize' : 8} or some number to your taste.\n plot_params : dict\n\n plot_stderr: bool, default True\n Plot standard impulse response error bands\n stderr_type: string\n 'asym': default, computes asymptotic standard errors\n 'mc': monte carlo standard errors (use rpl)\n repl: int, default 1000\n Number of replications for Monte Carlo and Sims-Zha standard errors\n seed: int\n np.random.seed for Monte Carlo replications\n component: array or vector of principal component indices\n \"\"\"\n periods = self.periods\n model = self.model\n svar = self.svar\n\n if orth and svar:\n raise ValueError(\"For SVAR system, set orth=False\")\n\n if orth:\n title = 'Impulse responses (orthogonalized)'\n irfs = self.orth_irfs\n elif svar:\n title = 'Impulse responses (structural)'\n irfs = self.svar_irfs\n else:\n title = 'Impulse responses'\n irfs = self.irfs\n\n if plot_stderr == False:\n stderr = None\n\n elif stderr_type not in ['asym', 'mc', 'sz1', 'sz2','sz3']:\n raise ValueError(\"Error type must be either 'asym', 'mc','sz1','sz2', or 'sz3'\")\n else:\n if stderr_type == 'asym':\n stderr = self.cov(orth=orth)\n if stderr_type == 'mc':\n stderr = self.errband_mc(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed)\n if stderr_type == 'sz1':\n stderr = self.err_band_sz1(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed,\n component=component)\n if stderr_type == 'sz2':\n stderr = self.err_band_sz2(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed,\n component=component)\n if stderr_type == 'sz3':\n stderr = self.err_band_sz3(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed,\n component=component)\n\n plotting.irf_grid_plot(irfs, stderr, impulse, response,\n self.model.names, title, signif=signif,\n subplot_params=subplot_params,\n plot_params=plot_params, stderr_type=stderr_type)\n\n def plot_cum_effects(self, orth=False, impulse=None, response=None,\n signif=0.05, plot_params=None,\n subplot_params=None, plot_stderr=True,\n stderr_type='asym', repl=1000, seed=None):\n \"\"\"\n Plot cumulative impulse response functions\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n impulse : string or int\n variable providing the impulse\n response : string or int\n variable affected by the impulse\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n subplot_params : dict\n To pass to subplot plotting funcions. Example: if fonts are too big,\n pass {'fontsize' : 8} or some number to your taste.\n plot_params : dict\n\n plot_stderr: bool, default True\n Plot standard impulse response error bands\n stderr_type: string\n 'asym': default, computes asymptotic standard errors\n 'mc': monte carlo standard errors (use rpl)\n repl: int, default 1000\n Number of replications for monte carlo standard errors\n seed: int\n np.random.seed for Monte Carlo replications\n\n \"\"\"\n\n if orth:\n title = 'Cumulative responses responses (orthogonalized)'\n cum_effects = self.orth_cum_effects\n lr_effects = self.orth_lr_effects\n else:\n title = 'Cumulative responses'\n cum_effects = self.cum_effects\n lr_effects = self.lr_effects\n\n if stderr_type not in ['asym', 'mc']:\n raise TypeError\n else:\n if stderr_type == 'asym':\n stderr = self.cum_effect_cov(orth=orth)\n if stderr_type == 'mc':\n stderr = self.cum_errband_mc(orth=orth, repl=repl,\n signif=signif, seed=seed)\n if not plot_stderr:\n stderr = None\n\n plotting.irf_grid_plot(cum_effects, stderr, impulse, response,\n self.model.names, title, signif=signif,\n hlines=lr_effects, subplot_params=subplot_params,\n plot_params=plot_params, stderr_type=stderr_type)\n\nclass IRAnalysis(BaseIRAnalysis):\n \"\"\"\n Impulse response analysis class. Computes impulse responses, asymptotic\n standard errors, and produces relevant plots\n\n Parameters\n ----------\n model : VAR instance\n\n Notes\n -----\n Using Lutkepohl (2005) notation\n \"\"\"\n def __init__(self, model, P=None, periods=10, order=None, svar=False):\n BaseIRAnalysis.__init__(self, model, P=P, periods=periods,\n order=order, svar=svar)\n\n self.cov_a = model._cov_alpha\n self.cov_sig = model._cov_sigma\n\n # memoize dict for G matrix function\n self._g_memo = {}\n\n def cov(self, orth=False):\n \"\"\"\n Compute asymptotic standard errors for impulse response coefficients\n\n Notes\n -----\n Lutkepohl eq 3.7.5\n\n Returns\n -------\n \"\"\"\n if orth:\n return self._orth_cov()\n\n covs = self._empty_covm(self.periods + 1)\n covs[0] = np.zeros((self.neqs ** 2, self.neqs ** 2))\n for i in range(1, self.periods + 1):\n Gi = self.G[i - 1]\n covs[i] = chain_dot(Gi, self.cov_a, Gi.T)\n\n return covs\n\n def errband_mc(self, orth=False, svar=False, repl=1000,\n signif=0.05, seed=None, burn=100):\n \"\"\"\n IRF Monte Carlo integrated error bands\n \"\"\"\n model = self.model\n periods = self.periods\n if svar == True:\n return model.sirf_errband_mc(orth=orth, repl=repl, T=periods,\n signif=signif, seed=seed,\n burn=burn, cum=False)\n else:\n return model.irf_errband_mc(orth=orth, repl=repl, T=periods,\n signif=signif, seed=seed,\n burn=burn, cum=False)\n def err_band_sz1(self, orth=False, svar=False, repl=1000,\n signif=0.05, seed=None, burn=100, component=None):\n \"\"\"\n IRF Sims-Zha error band method 1. Assumes symmetric error bands around\n mean.\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n repl : int, default 1000\n Number of MC replications\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n seed : int, default None\n np.random seed\n burn : int, default 100\n Number of initial simulated obs to discard\n component : neqs x neqs array, default to largest for each\n Index of column of eigenvector/value to use for each error band\n Note: period of impulse (t=0) is not included when computing\n principle component\n\n References\n ----------\n Sims, Christopher A., and Tao Zha. 1999. \"Error Bands for Impulse\n Response\". Econometrica 67: 1113-1155.\n \"\"\"\n\n model = self.model\n periods = self.periods\n if orth:\n irfs = self.orth_irfs\n elif svar:\n irfs = self.svar_irfs\n else:\n irfs = self.irfs\n neqs = self.neqs\n irf_resim = model.irf_resim(orth=orth, repl=repl, T=periods, seed=seed,\n burn=100)\n q = util.norm_signif_level(signif)\n\n W, eigva, k =self._eigval_decomp_SZ(irf_resim)\n\n if component != None:\n if np.shape(component) != (neqs,neqs):\n raise ValueError(\"Component array must be \" + str(neqs) + \" x \" + str(neqs))\n if np.argmax(component) >= neqs*periods:\n raise ValueError(\"Atleast one of the components does not exist\")\n else:\n k = component\n\n # here take the kth column of W, which we determine by finding the largest eigenvalue of the covaraince matrix\n lower = np.copy(irfs)\n upper = np.copy(irfs)\n for i in xrange(neqs):\n for j in xrange(neqs):\n lower[1:,i,j] = irfs[1:,i,j] + W[i,j,:,k[i,j]]*q*np.sqrt(eigva[i,j,k[i,j]])\n upper[1:,i,j] = irfs[1:,i,j] - W[i,j,:,k[i,j]]*q*np.sqrt(eigva[i,j,k[i,j]])\n\n\n return lower, upper\n\n def err_band_sz2(self, orth=False, repl=1000, signif=0.05,\n seed=None, burn=100, component=None):\n \"\"\"\n IRF Sims-Zha error band method 2.\n\n This method Does not assume symmetric error bands around mean.\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n repl : int, default 1000\n Number of MC replications\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n seed : int, default None\n np.random seed\n burn : int, default 100\n Number of initial simulated obs to discard\n component : neqs x neqs array, default to largest for each\n Index of column of eigenvector/value to use for each error band\n Note: period of impulse (t=0) is not included when computing\n principle component\n\n References\n ----------\n Sims, Christopher A., and Tao Zha. 1999. \"Error Bands for Impulse\n Response\". Econometrica 67: 1113-1155.\n \"\"\"\n model = self.model\n periods = self.periods\n if orth:\n irfs = self.orth_irfs\n elif svar:\n irfs = self.svar_irfs\n else:\n irfs = self.irfs\n neqs = self.neqs\n irf_resim = model.irf_resim(orth=orth, repl=repl, T=periods, seed=seed,\n burn=100)\n\n W, eigva, k = self._eigval_decomp_SZ(irf_resim)\n\n if component != None:\n if np.shape(component) != (neqs,neqs):\n raise ValueError(\"Component array must be \" + str(neqs) + \" x \" + str(neqs))\n if np.argmax(component) >= neqs*periods:\n raise ValueError(\"Atleast one of the components does not exist\")\n else:\n k = component\n\n gamma = np.zeros((repl, periods+1, neqs, neqs))\n for p in xrange(repl):\n for i in xrange(neqs):\n for j in xrange(neqs):\n gamma[p,1:,i,j] = W[i,j,k[i,j],:] * irf_resim[p,1:,i,j]\n\n gamma_sort = np.sort(gamma, axis=0) #sort to get quantiles\n indx = round(signif/2*repl)-1,round((1-signif/2)*repl)-1\n\n lower = np.copy(irfs)\n upper = np.copy(irfs)\n for i in xrange(neqs):\n for j in xrange(neqs):\n lower[:,i,j] = irfs[:,i,j] + gamma_sort[indx[0],:,i,j]\n upper[:,i,j] = irfs[:,i,j] + gamma_sort[indx[1],:,i,j]\n\n return lower, upper\n\n def err_band_sz3(self, orth=False, repl=1000, signif=0.05,\n seed=None, burn=100, component=None):\n \"\"\"\n IRF Sims-Zha error band method 3. Does not assume symmetric error bands around mean.\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n repl : int, default 1000\n Number of MC replications\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n seed : int, default None\n np.random seed\n burn : int, default 100\n Number of initial simulated obs to discard\n component : vector length neqs, default to largest for each\n Index of column of eigenvector/value to use for each error band\n Note: period of impulse (t=0) is not included when computing\n principle component\n\n References\n ----------\n Sims, Christopher A., and Tao Zha. 1999. \"Error Bands for Impulse\n Response\". Econometrica 67: 1113-1155.\n \"\"\"\n\n model = self.model\n periods = self.periods\n if orth:\n irfs = self.orth_irfs\n elif svar:\n irfs = self.svar_irfs\n else:\n irfs = self.irfs\n neqs = self.neqs\n irf_resim = model.irf_resim(orth=orth, repl=repl, T=periods, seed=seed,\n burn=100)\n stack = np.zeros((neqs, repl, periods*neqs))\n\n #stack left to right, up and down\n\n for p in xrange(repl):\n for i in xrange(neqs):\n stack[i, p,:] = np.ravel(irf_resim[p,1:,:,i].T)\n\n stack_cov=np.zeros((neqs, periods*neqs, periods*neqs))\n W = np.zeros((neqs, periods*neqs, periods*neqs))\n eigva = np.zeros((neqs, periods*neqs))\n k = np.zeros((neqs))\n\n if component != None:\n if np.size(component) != (neqs):\n raise ValueError(\"Component array must be of length \" + str(neqs))\n if np.argmax(component) >= neqs*periods:\n raise ValueError(\"Atleast one of the components does not exist\")\n else:\n k = component\n\n #compute for eigen decomp for each stack\n for i in xrange(neqs):\n stack_cov[i] = np.cov(stack[i],rowvar=0)\n W[i], eigva[i], k[i] = util.eigval_decomp(stack_cov[i])\n\n gamma = np.zeros((repl, periods+1, neqs, neqs))\n for p in xrange(repl):\n c=0\n for j in xrange(neqs):\n for i in xrange(neqs):\n gamma[p,1:,i,j] = W[j,k[j],i*periods:(i+1)*periods] * irf_resim[p,1:,i,j]\n if i == neqs-1:\n gamma[p,1:,i,j] = W[j,k[j],i*periods:] * irf_resim[p,1:,i,j]\n\n gamma_sort = np.sort(gamma, axis=0) #sort to get quantiles\n indx = round(signif/2*repl)-1,round((1-signif/2)*repl)-1\n\n lower = np.copy(irfs)\n upper = np.copy(irfs)\n for i in xrange(neqs):\n for j in xrange(neqs):\n lower[:,i,j] = irfs[:,i,j] + gamma_sort[indx[0],:,i,j]\n upper[:,i,j] = irfs[:,i,j] + gamma_sort[indx[1],:,i,j]\n\n return lower, upper\n\n def _eigval_decomp_SZ(self, irf_resim):\n \"\"\"\n Returns\n -------\n W: array of eigenvectors\n eigva: list of eigenvalues\n k: matrix indicating column # of largest eigenvalue for each c_i,j\n\n \"\"\"\n neqs = self.neqs\n periods = self.periods\n\n cov_hold = np.zeros((neqs, neqs, periods, periods))\n for i in xrange(neqs):\n for j in xrange(neqs):\n cov_hold[i,j,:,:] = np.cov(irf_resim[:,1:,i,j],rowvar=0)\n\n W = np.zeros((neqs, neqs, periods, periods))\n eigva = np.zeros((neqs, neqs, periods, 1))\n k = np.zeros((neqs, neqs))\n\n for i in xrange(neqs):\n for j in xrange(neqs):\n W[i,j,:,:], eigva[i,j,:,0], k[i,j] = util.eigval_decomp(cov_hold[i,j,:,:])\n return W, eigva, k\n\n @cache_readonly\n def G(self):\n # Gi matrices as defined on p. 111\n\n K = self.neqs\n\n # nlags = self.model.p\n # J = np.hstack((np.eye(K),) + (np.zeros((K, K)),) * (nlags - 1))\n\n def _make_g(i):\n # p. 111 Lutkepohl\n G = 0.\n for m in range(i):\n # be a bit cute to go faster\n idx = i - 1 - m\n if idx in self._g_memo:\n apow = self._g_memo[idx]\n else:\n apow = la.matrix_power(self._A.T, idx)\n # apow = np.dot(J, apow)\n apow = apow[:K]\n self._g_memo[idx] = apow\n\n # take first K rows\n piece = np.kron(apow, self.irfs[m])\n G = G + piece\n\n return G\n\n return [_make_g(i) for i in range(1, self.periods + 1)]\n\n def _orth_cov(self):\n # Lutkepohl 3.7.8\n\n Ik = np.eye(self.neqs)\n PIk = np.kron(self.P.T, Ik)\n H = self.H\n\n covs = self._empty_covm(self.periods + 1)\n for i in range(self.periods + 1):\n if i == 0:\n apiece = 0\n else:\n Ci = np.dot(PIk, self.G[i-1])\n apiece = chain_dot(Ci, self.cov_a, Ci.T)\n\n Cibar = np.dot(np.kron(Ik, self.irfs[i]), H)\n bpiece = chain_dot(Cibar, self.cov_sig, Cibar.T) / self.T\n\n # Lutkepohl typo, cov_sig correct\n covs[i] = apiece + bpiece\n\n return covs\n\n def cum_effect_cov(self, orth=False):\n \"\"\"\n Compute asymptotic standard errors for cumulative impulse response\n coefficients\n\n Parameters\n ----------\n orth : boolean\n\n Notes\n -----\n eq. 3.7.7 (non-orth), 3.7.10 (orth)\n\n Returns\n -------\n\n \"\"\"\n Ik = np.eye(self.neqs)\n PIk = np.kron(self.P.T, Ik)\n\n F = 0.\n covs = self._empty_covm(self.periods + 1)\n for i in range(self.periods + 1):\n if i > 0:\n F = F + self.G[i - 1]\n\n if orth:\n if i == 0:\n apiece = 0\n else:\n Bn = np.dot(PIk, F)\n apiece = chain_dot(Bn, self.cov_a, Bn.T)\n\n Bnbar = np.dot(np.kron(Ik, self.cum_effects[i]), self.H)\n bpiece = chain_dot(Bnbar, self.cov_sig, Bnbar.T) / self.T\n\n covs[i] = apiece + bpiece\n else:\n if i == 0:\n covs[i] = np.zeros((self.neqs**2, self.neqs**2))\n continue\n\n covs[i] = chain_dot(F, self.cov_a, F.T)\n\n return covs\n\n def cum_errband_mc(self, orth=False, repl=1000,\n signif=0.05, seed=None, burn=100):\n \"\"\"\n IRF Monte Carlo integrated error bands of cumulative effect\n \"\"\"\n model = self.model\n periods = self.periods\n return model.irf_errband_mc(orth=orth, repl=repl,\n T=periods, signif=signif, seed=seed, burn=burn, cum=True)\n\n def lr_effect_cov(self, orth=False):\n \"\"\"\n Returns\n -------\n\n \"\"\"\n lre = self.lr_effects\n Finfty = np.kron(np.tile(lre.T, self.lags), lre)\n Ik = np.eye(self.neqs)\n\n if orth:\n Binf = np.dot(np.kron(self.P.T, np.eye(self.neqs)), Finfty)\n Binfbar = np.dot(np.kron(Ik, lre), self.H)\n\n return (chain_dot(Binf, self.cov_a, Binf.T) +\n chain_dot(Binfbar, self.cov_sig, Binfbar.T))\n else:\n return chain_dot(Finfty, self.cov_a, Finfty.T)\n\n def stderr(self, orth=False):\n return np.array([tsa.unvec(np.sqrt(np.diag(c)))\n for c in self.cov(orth=orth)])\n\n def cum_effect_stderr(self, orth=False):\n return np.array([tsa.unvec(np.sqrt(np.diag(c)))\n for c in self.cum_effect_cov(orth=orth)])\n\n def lr_effect_stderr(self, orth=False):\n cov = self.lr_effect_cov(orth=orth)\n return tsa.unvec(np.sqrt(np.diag(cov)))\n\n def _empty_covm(self, periods):\n return np.zeros((periods, self.neqs ** 2, self.neqs ** 2),\n dtype=float)\n\n @cache_readonly\n def H(self):\n k = self.neqs\n Lk = tsa.elimination_matrix(k)\n Kkk = tsa.commutation_matrix(k, k)\n Ik = np.eye(k)\n\n # B = chain_dot(Lk, np.eye(k**2) + commutation_matrix(k, k),\n # np.kron(self.P, np.eye(k)), Lk.T)\n\n # return np.dot(Lk.T, L.inv(B))\n\n B = chain_dot(Lk,\n np.dot(np.kron(Ik, self.P), Kkk) + np.kron(self.P, Ik),\n Lk.T)\n\n return np.dot(Lk.T, L.inv(B))\n\n def fevd_table(self):\n pass\n\n\n",
"# NOTE: contains only one test, _est_cont_fit, that is renamed so that\n# nose doesn't run it\n# I put this here for the record and for the case when someone wants to\n# verify the quality of fit\n# with current parameters: relatively small sample size, default starting values\n# Ran 84 tests in 401.797s\n# FAILED (failures=15)\n\n\nimport numpy.testing as npt\nimport numpy as np\n\nfrom scipy import stats\n\nfrom distparams import distcont\n\n# this is not a proper statistical test for convergence, but only\n# verifies that the estimate and true values don't differ by too much\nn_repl1 = 1000 # sample size for first run\nn_repl2 = 5000 # sample size for second run, if first run fails\nthresh_percent = 0.25 # percent of true parameters for fail cut-off\nthresh_min = 0.75 # minimum difference estimate - true to fail test\n\n#distcont = [['genextreme', (3.3184017469423535,)]]\n\ndef _est_cont_fit():\n # this tests the closeness of the estimated parameters to the true\n # parameters with fit method of continuous distributions\n # Note: is slow, some distributions don't converge with sample size <= 10000\n\n for distname, arg in distcont:\n yield check_cont_fit, distname,arg\n\n\ndef check_cont_fit(distname,arg):\n distfn = getattr(stats, distname)\n rvs = distfn.rvs(size=n_repl1,*arg)\n est = distfn.fit(rvs) #,*arg) # start with default values\n\n truearg = np.hstack([arg,[0.0,1.0]])\n diff = est-truearg\n\n txt = ''\n diffthreshold = np.max(np.vstack([truearg*thresh_percent,\n np.ones(distfn.numargs+2)*thresh_min]),0)\n # threshold for location\n diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])\n\n if np.any(np.isnan(est)):\n raise AssertionError('nan returned in fit')\n else:\n if np.any((np.abs(diff) - diffthreshold) > 0.0):\n## txt = 'WARNING - diff too large with small sample'\n## print 'parameter diff =', diff - diffthreshold, txt\n rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)])\n est = distfn.fit(rvs) #,*arg)\n truearg = np.hstack([arg,[0.0,1.0]])\n diff = est-truearg\n if np.any((np.abs(diff) - diffthreshold) > 0.0):\n txt = 'parameter: %s\\n' % str(truearg)\n txt += 'estimated: %s\\n' % str(est)\n txt += 'diff : %s\\n' % str(diff)\n raise AssertionError('fit not very good in %s\\n' % distfn.name + txt)\n\n\n\nif __name__ == \"__main__\":\n import nose\n #nose.run(argv=['', __file__])\n nose.runmodule(argv=[__file__,'-s'], exit=False)\n",
"import numpy as np\nfrom scipy.stats import scoreatpercentile as sap\n\n#from scipy.stats import norm\n\ndef _select_sigma(X):\n \"\"\"\n Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.\n\n References\n ----------\n Silverman (1986) p.47\n \"\"\"\n# normalize = norm.ppf(.75) - norm.ppf(.25)\n normalize = 1.349\n# IQR = np.subtract.reduce(percentile(X, [75,25],\n# axis=axis), axis=axis)/normalize\n IQR = (sap(X, 75) - sap(X, 25))/normalize\n return np.minimum(np.std(X, axis=0, ddof=1), IQR)\n\n\n## Univariate Rule of Thumb Bandwidths ##\ndef bw_scott(x):\n \"\"\"\n Scott's Rule of Thumb\n\n Parameters\n ----------\n x : array-like\n Array for which to get the bandwidth\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns 1.059 * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ----------\n\n Scott, D.W. (1992) Multivariate Density Estimation: Theory, Practice, and\n Visualization.\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return 1.059 * A * n ** -.2\n\ndef bw_silverman(x):\n \"\"\"\n Silverman's Rule of Thumb\n\n Parameters\n ----------\n x : array-like\n Array for which to get the bandwidth\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns .9 * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ----------\n\n Silverman, B.W. (1986) `Density Estimation.`\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return .9 * A * n ** -.2\n\n## Plug-In Methods ##\n\n## Least Squares Cross-Validation ##\n\n## Helper Functions ##\n\nbandwidth_funcs = dict(scott=bw_scott,silverman=bw_silverman)\n\ndef select_bandwidth(x, bw, kernel):\n \"\"\"\n Selects bandwidth for a selection rule bw\n\n this is a wrapper around existing bandwidth selection rules\n\n Parameters\n ----------\n x : array-like\n Array for which to get the bandwidth\n bw : string\n name of bandwidth selection rule, currently \"scott\" and \"silverman\"\n are supported\n kernel : not used yet\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n \"\"\"\n bw = bw.lower()\n if bw not in [\"scott\",\"silverman\"]:\n raise ValueError(\"Bandwidth %s not understood\" % bw)\n#TODO: uncomment checks when we have non-rule of thumb bandwidths for diff. kernels\n# if kernel == \"gauss\":\n return bandwidth_funcs[bw](x)\n# else:\n# raise ValueError(\"Only Gaussian Kernels are currently supported\")\n\n",
"# pylint: disable=W0201\n\nimport numpy as np\n\nfrom statsmodels.tools.decorators import cache_readonly\n\nimport var_model as _model\nimport util\nimport plotting\n\nFULL_SAMPLE = 0\nROLLING = 1\nEXPANDING = 2\n\ntry:\n import pandas as pn\nexcept ImportError:\n pass\n\ndef _get_window_type(window_type):\n if window_type in (FULL_SAMPLE, ROLLING, EXPANDING):\n return window_type\n elif isinstance(window_type, basestring):\n window_type_up = window_type.upper()\n\n if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):\n return FULL_SAMPLE\n elif window_type_up == 'ROLLING':\n return ROLLING\n elif window_type_up == 'EXPANDING':\n return EXPANDING\n\n raise Exception('Unrecognized window type: %s' % window_type)\n\ndef require_pandas():\n try:\n import pandas as pn\n except ImportError:\n raise ImportError('pandas is required to use this code (for now)')\n\nclass DynamicVAR(object):\n \"\"\"\n Estimates time-varying vector autoregression (VAR(p)) using\n equation-by-equation least squares\n\n Parameters\n ----------\n data : pandas.DataFrame\n lag_order : int, default 1\n window : int\n window_type : {'expanding', 'rolling'}\n min_periods : int or None\n Minimum number of observations to require in window, defaults to window\n size if None specified\n trend : {'c', 'nc', 'ct', 'ctt'}\n TODO\n\n Returns\n -------\n **Attributes**:\n\n coefs : WidePanel\n items : coefficient names\n major_axis : dates\n minor_axis : VAR equation names\n \"\"\"\n def __init__(self, data, lag_order=1, window=None, window_type='expanding',\n trend='c', min_periods=None):\n require_pandas()\n\n self.lag_order = lag_order\n\n self.names = list(data.columns)\n self.neqs = len(self.names)\n\n self._y_orig = data\n\n # TODO: deal with trend\n self._x_orig = _make_lag_matrix(data, lag_order)\n self._x_orig['intercept'] = 1\n\n (self.y, self.x, self.x_filtered, self._index,\n self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)\n\n self.lag_order = lag_order\n self.trendorder = util.get_trendorder(trend)\n\n self._set_window(window_type, window, min_periods)\n\n def _set_window(self, window_type, window, min_periods):\n self._window_type = _get_window_type(window_type)\n\n if self._is_rolling:\n if window is None:\n raise Exception('Must pass window when doing rolling '\n 'regression')\n\n if min_periods is None:\n min_periods = window\n else:\n window = len(self.x)\n if min_periods is None:\n min_periods = 1\n\n self._window = int(window)\n self._min_periods = min_periods\n\n @cache_readonly\n def T(self):\n \"\"\"\n Number of time periods in results\n \"\"\"\n return len(self.result_index)\n\n @property\n def nobs(self):\n # Stub, do I need this?\n data = dict((eq, r.nobs) for eq, r in self.equations.iteritems())\n return pn.DataFrame(data)\n\n @cache_readonly\n def equations(self):\n eqs = {}\n for col, ts in self.y.iteritems():\n model = pn.ols(y=ts, x=self.x, window=self._window,\n window_type=self._window_type,\n min_periods=self._min_periods)\n\n eqs[col] = model\n\n return eqs\n\n @cache_readonly\n def coefs(self):\n \"\"\"\n Return dynamic regression coefficients as WidePanel\n \"\"\"\n data = {}\n for eq, result in self.equations.iteritems():\n data[eq] = result.beta\n\n panel = pn.WidePanel.fromDict(data)\n\n # Coefficient names become items\n return panel.swapaxes('items', 'minor')\n\n @property\n def result_index(self):\n return self.coefs.major_axis\n\n @cache_readonly\n def _coefs_raw(self):\n \"\"\"\n Reshape coefficients to be more amenable to dynamic calculations\n\n Returns\n -------\n coefs : (time_periods x lag_order x neqs x neqs)\n \"\"\"\n coef_panel = self.coefs.copy()\n del coef_panel['intercept']\n\n coef_values = coef_panel.swapaxes('items', 'major').values\n coef_values = coef_values.reshape((len(coef_values),\n self.lag_order,\n self.neqs, self.neqs))\n\n return coef_values\n\n @cache_readonly\n def _intercepts_raw(self):\n \"\"\"\n Similar to _coefs_raw, return intercept values in easy-to-use matrix\n form\n\n Returns\n -------\n intercepts : (T x K)\n \"\"\"\n return self.coefs['intercept'].values\n\n @cache_readonly\n def resid(self):\n data = {}\n for eq, result in self.equations.iteritems():\n data[eq] = result.resid\n\n return pn.DataFrame(data)\n\n def forecast(self, steps=1):\n \"\"\"\n Produce dynamic forecast\n\n Parameters\n ----------\n steps\n\n Returns\n -------\n forecasts : pandas.DataFrame\n \"\"\"\n output = np.empty((self.T - steps, self.neqs))\n\n y_values = self.y.values\n y_index_map = dict((d, idx) for idx, d in enumerate(self.y.index))\n result_index_map = dict((d, idx) for idx, d in enumerate(self.result_index))\n\n coefs = self._coefs_raw\n intercepts = self._intercepts_raw\n\n # can only produce this many forecasts\n forc_index = self.result_index[steps:]\n for i, date in enumerate(forc_index):\n # TODO: check that this does the right thing in weird cases...\n idx = y_index_map[date] - steps\n result_idx = result_index_map[date] - steps\n\n y_slice = y_values[:idx]\n\n forcs = _model.forecast(y_slice, coefs[result_idx],\n intercepts[result_idx], steps)\n\n output[i] = forcs[-1]\n\n return pn.DataFrame(output, index=forc_index, columns=self.names)\n\n def plot_forecast(self, steps=1, figsize=(10, 10)):\n \"\"\"\n Plot h-step ahead forecasts against actual realizations of time\n series. Note that forecasts are lined up with their respective\n realizations.\n\n Parameters\n ----------\n steps :\n \"\"\"\n import matplotlib.pyplot as plt\n\n fig, axes = plt.subplots(figsize=figsize, nrows=self.neqs,\n sharex=True)\n\n forc = self.forecast(steps=steps)\n dates = forc.index\n\n y_overlay = self.y.reindex(dates)\n\n for i, col in enumerate(forc.columns):\n ax = axes[i]\n\n y_ts = y_overlay[col]\n forc_ts = forc[col]\n\n y_handle = ax.plot(dates, y_ts.values, 'k.', ms=2)\n forc_handle = ax.plot(dates, forc_ts.values, 'k-')\n\n fig.legend((y_handle, forc_handle), ('Y', 'Forecast'))\n fig.autofmt_xdate()\n\n fig.suptitle('Dynamic %d-step forecast' % steps)\n\n # pretty things up a bit\n plotting.adjust_subplots(bottom=0.15, left=0.10)\n plt.draw_if_interactive()\n\n @property\n def _is_rolling(self):\n return self._window_type == ROLLING\n\n @cache_readonly\n def r2(self):\n \"\"\"Returns the r-squared values.\"\"\"\n data = dict((eq, r.r2) for eq, r in self.equations.iteritems())\n return pn.DataFrame(data)\n\nclass DynamicPanelVAR(DynamicVAR):\n \"\"\"\n Dynamic (time-varying) panel vector autoregression using panel ordinary\n least squares\n\n Parameters\n ----------\n \"\"\"\n def __init__(self, data, lag_order=1, window=None, window_type='expanding',\n trend='c', min_periods=None):\n self.lag_order = lag_order\n self.neqs = len(data.columns)\n\n self._y_orig = data\n\n # TODO: deal with trend\n self._x_orig = _make_lag_matrix(data, lag_order)\n self._x_orig['intercept'] = 1\n\n (self.y, self.x, self.x_filtered, self._index,\n self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)\n\n self.lag_order = lag_order\n self.trendorder = util.get_trendorder(trend)\n\n self._set_window(window_type, window, min_periods)\n\n\ndef _filter_data(lhs, rhs):\n \"\"\"\n Data filtering routine for dynamic VAR\n\n lhs : DataFrame\n original data\n rhs : DataFrame\n lagged variables\n\n Returns\n -------\n\n \"\"\"\n def _has_all_columns(df):\n return np.isfinite(df.values).sum(1) == len(df.columns)\n\n rhs_valid = _has_all_columns(rhs)\n if not rhs_valid.all():\n pre_filtered_rhs = rhs[rhs_valid]\n else:\n pre_filtered_rhs = rhs\n\n index = lhs.index.union(rhs.index)\n if not index.equals(rhs.index) or not index.equals(lhs.index):\n rhs = rhs.reindex(index)\n lhs = lhs.reindex(index)\n\n rhs_valid = _has_all_columns(rhs)\n\n lhs_valid = _has_all_columns(lhs)\n valid = rhs_valid & lhs_valid\n\n if not valid.all():\n filt_index = rhs.index[valid]\n filtered_rhs = rhs.reindex(filt_index)\n filtered_lhs = lhs.reindex(filt_index)\n else:\n filtered_rhs, filtered_lhs = rhs, lhs\n\n return filtered_lhs, filtered_rhs, pre_filtered_rhs, index, valid\n\ndef _make_lag_matrix(x, lags):\n data = {}\n columns = []\n for i in range(1, 1 + lags):\n lagstr = 'L%d.'% i\n lag = x.shift(i).rename(columns=lambda c: lagstr + c)\n data.update(lag._series)\n columns.extend(lag.columns)\n\n return pn.DataFrame(data, columns=columns)\n\nclass Equation(object):\n \"\"\"\n Stub, estimate one equation\n \"\"\"\n\n def __init__(self, y, x):\n pass\n\nif __name__ == '__main__':\n import pandas.util.testing as ptest\n\n ptest.N = 500\n data = ptest.makeTimeDataFrame().cumsum(0)\n\n var = DynamicVAR(data, lag_order=2, window_type='expanding')\n var2 = DynamicVAR(data, lag_order=2, window=10,\n window_type='rolling')\n\n\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.eye",
"numpy.linalg.matrix_power",
"numpy.kron",
"numpy.tile",
"numpy.sort",
"numpy.copy",
"numpy.cov",
"numpy.shape",
"numpy.argmax",
"numpy.size",
"numpy.linalg.cholesky",
"scipy.linalg.inv",
"numpy.ravel",
"numpy.zeros"
],
[
"numpy.isnan",
"numpy.hstack",
"numpy.abs",
"numpy.ones"
],
[
"numpy.std",
"scipy.stats.scoreatpercentile"
],
[
"pandas.util.testing.makeTimeDataFrame",
"pandas.WidePanel.fromDict",
"numpy.isfinite",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.draw_if_interactive",
"pandas.ols",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.19"
],
"scipy": [],
"tensorflow": []
}
] |
elischwat/hsfm-geomorph | [
"ddd7cd8a5434d04fef9cab7f16f15e7efde868c8"
] | [
"identify-imagery/nagap/identify-imagery-70s-90s.py"
] | [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.5.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Identify Imagery\n\n# +\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport contextily as ctx\n\nimport os\nimport cv2\nimport fiona \nimport geopandas as gpd\nimport re\nimport pandas as pd\n# enable fiona KML driver\ngpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw'\n# -\n\ndata_dir = '/data2/elilouis/hsfm-geomorph/data/'\n\n# ## Open up KML Files\n# Make sure to open all layers explicitly\n\n# +\nfile_paths = ['NAGAP_1970s.kml', 'NAGAP_1980s.kml', 'NAGAP_1990s.kml']\n\ndf_list = []\nfor path in file_paths:\n path = os.path.join(data_dir, path)\n for layer in fiona.listlayers(path):\n try:\n df_list.append(gpd.read_file(path, driver='KML', layer=layer))\n except ValueError:\n None\ndf = pd.concat(df_list)\n# -\n\nlen(df)\n\ndf.head()\n\n# Change CRS to Web Mercator for easy plotting\n\ndf = df.to_crs(epsg=3857)\n\n\n# ## Parse the Description data column\n\n# +\ndef parse_description(row):\n s = row.Description\n if s == '' or s is None:\n return pd.Series([None,None,None,None,None,None,None,None,None])\n else:\n lines = re.split(r'(<br>|</br>)\\s*', s)\n src = next((i for i in lines if 'src' in i), None)\n date = next((i for i in lines if 'Date' in i), None)\n location = next((i for i in lines if 'Location' in i), None)\n roll = next((i for i in lines if 'Roll' in i), None)\n frame = next((i for i in lines if 'Frame' in i), None)\n latitude = next((i for i in lines if 'Latitude' in i), None)\n longitude = next((i for i in lines if 'Longitude' in i), None)\n altitude = next((i for i in lines if 'Altitude' in i), None)\n type_ = next((i for i in lines if 'Type' in i), None)\n return pd.Series([\n None if src is None else src.split(':')[-1].replace('\"/>', \"\").replace(\"//\", \"\"),\n None if date is None else date.split(':')[-1],\n None if location is None else location.split(':')[-1],\n None if roll is None else roll.split(':')[-1],\n None if frame is None else frame.split(':')[-1],\n None if latitude is None else latitude.split(':')[-1].replace('°', ''),\n None if longitude is None else longitude.split(':')[-1].replace('°', ''),\n None if altitude is None else altitude.split(':')[-1],\n None if type_ is None else type_.split(':')[-1]\n ])\n\ndf[['src', 'date', 'location', 'roll', 'frame', \n 'latitude', 'longitude', 'altitude', 'type']] = df.apply(parse_description, axis=1)\n# -\n\nlen(df)\n\ndf.head(3)\n\ndf[df['roll'] == '74V5']\n\n# ## Read in the AOIs\n#\n# Lets change crs to web mercator right off the bat too.\n\naoi_gdf = gpd.read_file(data_dir + 'aois.geojson')\naoi_gdf = aoi_gdf.to_crs(epsg=3857)\n\nax = aoi_gdf.plot()\nctx.add_basemap(ax)\nplt.gcf().set_size_inches(8,8)\n\nbaker_polygon = aoi_gdf[aoi_gdf.name == 'Mt. Baker'].geometry.iloc[0]\nglacier_polygon = aoi_gdf[aoi_gdf.name == 'Glacier Peak'].geometry.iloc[0]\nrainier_polygon = aoi_gdf[aoi_gdf.name == 'Mt. Rainier'].geometry.iloc[0]\n\n# ## Look at locations of all images\n\nlen(df.date.unique())\n\nsrc = df[df.geometry.type=='Point']\nax = src.plot(markersize=0.25, facecolor='red')\nctx.add_basemap(ax)\nplt.gcf().set_size_inches(16,16)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n\nsns.distplot(src.geometry.to_crs('epsg:4326').x)\n\nsrc = df[df.geometry.type=='Point']\nsrc = src[src.geometry.to_crs('epsg:4326').y < 49]\nsrc = src[src.geometry.to_crs('epsg:4326').y > 45]\nsrc = src[src.geometry.to_crs('epsg:4326').x < -116]\nax = src.plot(markersize=0.25, facecolor='red')\nctx.add_basemap(ax)\nplt.gcf().set_size_inches(16,16)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n\ngdf.filename.str[:-4].unique()\n\n# +\nfig, axes = plt.subplots(1,2,figsize=(16,16),sharex=True, sharey=True)\n\ngdf = gpd.read_file(\"/data2/elilouis/baker/timesifted_image_footprints.geojson\").to_crs('epsg:3857')\n\n\nax = gdf.plot(ax=axes[1], alpha=0.5, edgecolor='k', )\nctx.add_basemap(axes[1])\naxes[1].get_xaxis().set_visible(False)\naxes[1].get_yaxis().set_visible(False)\n\nsrc = gdf[gdf.filename.str.contains('NAGAP_74V5')]\nsrc.plot(ax=axes[0], alpha=0.5, edgecolor='k', )\nctx.add_basemap(axes[0])\naxes[0].get_xaxis().set_visible(False)\naxes[0].get_yaxis().set_visible(False)\n\n# -\n\n# ## Look at locations of images in our AOIs\n\ndf.crs.to_epsg(), aoi_gdf.crs.to_epsg()\n\naoi_frames_and_paths = gpd.sjoin(df, aoi_gdf)\n\n# Format date column...\n\n# +\naoi_frames_and_paths['datetime'] = pd.to_datetime(aoi_frames_and_paths.date, errors='coerce')\naoi_frames_and_paths.date = aoi_frames_and_paths.datetime.dt.date\n\naoi_frames_df = aoi_frames_and_paths[\n aoi_frames_and_paths.geometry.type=='Point']\n\naoi_paths_df = aoi_frames_and_paths[\n aoi_frames_and_paths.geometry.type!='Point']\n\n# -\n\n# Fix the data for the paths\n# \n# For all the path rows, `Name` really contains the `date` and `Name` columns smushed together\n\naoi_paths_df['date'] = aoi_paths_df.Name.apply(lambda x: pd.to_datetime(x.split('-')[-1]))\naoi_paths_df['Name'] = aoi_paths_df.Name.apply(lambda x: x.split('-')[0])\n\nax = aoi_frames_df.plot(markersize=7, facecolor='red', legend=True, \n column='date', categorical=True, \n legend_kwds={'bbox_to_anchor': (1.6, 1)})\nplt.gcf().set_size_inches(8,8)\nax.set(xlim=(-1.37e7,-1.345e7), ylim=(5.87e6,6.3e6))\nctx.add_basemap(ax)\n\nax = aoi_paths_df.plot(linewidth=1, column='date', categorical=True, legend=True,\n legend_kwds={'bbox_to_anchor': (1.6, 1)})\nplt.gcf().set_size_inches(8,8)\nax.set(xlim=(-1.37e7,-1.345e7), ylim=(5.87e6,6.3e6))\nctx.add_basemap(ax)\n\n# ## Examine image dates \n\naoi_paths_df.date.unique(),aoi_frames_df.date.unique()\n\nset(aoi_paths_df.date.unique()).difference(set(aoi_frames_df.date.unique()))\n\nset(aoi_frames_df.date.unique()).difference(set(aoi_paths_df.date.unique()))\n\n# # Identify Mt Rainier Imagery\n\n# ## Look at all images on Mt. Rainier\n\nrainier_frames_gdf = aoi_frames_df[\n aoi_frames_df.geometry.within(rainier_polygon)]\n\nax = rainier_frames_gdf.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\nplt.gcf().set_size_inches(10,10)\n\n# ## Look at all images in watershed-delineated subsections of Mt Rainier\n\n# If I want to focus on the Nisqally Glacier/River system, it looks like I should investigate imagery from all dates... I need a polygon for the Nisqally watershed.\n\n# ## Load Washington watershed geometries\n\n# !ls $data_dir\n\nwau_gdf = gpd.read_file(f'{data_dir}/Watershed_Administrative_Units-shp/wau.shp')\n\nwau_gdf.plot()\n\nwau_in_aois_gdf = gpd.sjoin(wau_gdf, aoi_gdf)\nrainier_waus_gdf = wau_in_aois_gdf[wau_in_aois_gdf.name == 'Mt. Rainier']\n\nax = rainier_waus_gdf.plot(column='WAU_ALIAS_',legend=True, markersize=80, legend_kwds={'bbox_to_anchor': (1.6, 1)}, alpha=0.6)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\nplt.gcf().set_size_inches(5,5)\n\n\n# ## Look at image locations and watershed delineations\n\n# +\ndef plot_frames_and_aoi_polygon(points, aoi_polygon = None, lims = None):\n ax = points.plot(column='date', categorical=True, markersize=20, legend=True)\n if aoi_polygon is not None:\n gpd.GeoDataFrame(ax = ax, geometry = pd.Series(aoi_polygon)).plot(legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\n ctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\n if lims is not None:\n ax.set(xlim=lims[0], ylim=lims[1])\n plt.gcf().set_size_inches(8,8)\n \nimport math \ndef plot_frames_and_aoi_date_separated(points, aoi_polygon = None, lims=None):\n groupby = points.groupby('date')\n fig, axes = plt.subplots(math.ceil(len(groupby.size().tolist())/4),4, figsize=(20,20), sharex=True, sharey=True)\n axes_flat = [item for sublist in axes for item in sublist]\n for key, group in groupby:\n ax = axes_flat.pop(0)\n if aoi_polygon is not None:\n gpd.GeoDataFrame(geometry = pd.Series(aoi_polygon)).plot(ax=ax, legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\n group.plot(ax=ax, column='date', categorical=True, markersize=40, legend=True)\n ctx.add_basemap(ax, source=ctx.providers.Stamen.Terrain)\n while len(axes_flat) > 0:\n ax = axes_flat.pop(0)\n if aoi_polygon is not None:\n gpd.GeoDataFrame(geometry = pd.Series(aoi_polygon)).plot(ax=ax, legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\n group.plot(ax=ax, column='date', categorical=True, markersize=40, legend=True)\n ctx.add_basemap(ax, source=ctx.providers.Stamen.Terrain)\n# plt.tight_layout()\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)\n\n\n# -\n\nax = rainier_waus_gdf.plot(legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\nax = rainier_frames_gdf.plot(column='date', categorical=True, markersize=20, ax=ax)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\nax.set(xlim=(-1.357e7,-1.3535e7), ylim=(5.905e6, 5.94e6))\nplt.gcf().set_size_inches(20,20)\n\n# The Kautz, Carbon, and Frying Pan watersheds look to have lots of images on different dates\n\n# ## Look at image locations in the Kautz, Carbon, and Frying Pan Watersheds\n\nwau_gdf.WAU_ALIAS_.where(\n wau_gdf.WAU_ALIAS_.str.contains('KAUTZ', na=False)\n).dropna()\n\nwau_gdf.WAU_ALIAS_.where(\n wau_gdf.WAU_ALIAS_.str.contains('CARBON', na=False)\n).dropna()\n\nwau_gdf.WAU_ALIAS_.where(\n wau_gdf.WAU_ALIAS_.str.contains('FRYING', na=False)\n).dropna()\n\nkautz_frames_df = aoi_frames_df[aoi_frames_df.geometry.within(wau_gdf.geometry.iloc[594])]\ncarbon_frames_df = aoi_frames_df[aoi_frames_df.geometry.within(wau_gdf.geometry.iloc[536])]\nfryingpan_frames_df = aoi_frames_df[aoi_frames_df.geometry.within(wau_gdf.geometry.iloc[564])]\n\nax = kautz_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\n\nax = carbon_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\n\nax = fryingpan_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\nx, y, arrow_length = 0.5, 0.5, 0.1\nax.annotate('N', xy=(x, y), xytext=(x, y-arrow_length),\n arrowprops=dict(facecolor='black', width=5, headwidth=15),\n ha='center', va='center', fontsize=20,\n xycoords=ax.transAxes)\n\nax = fryingpan_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\n\nplot_frames_and_aoi_polygon(fryingpan_frames_df, None)\n\nplot_frames_and_aoi_date_separated(fryingpan_frames_df, None)\n\n# ## Look at data in smaller watersheds, Nisqually and Carbon\n\nrainier_sub_aois = gpd.read_file(f\"{data_dir}/rainier_sub_aois.geojson\")\nrainier_sub_aois = rainier_sub_aois.to_crs(epsg=3857)\nnisqually_polygon = rainier_sub_aois[rainier_sub_aois.name=='nisqually'].geometry.iloc[0]\ncarbon_polygon = rainier_sub_aois[rainier_sub_aois.name=='carbon'].geometry.iloc[0]\nnisqually_frames = rainier_frames_gdf[rainier_frames_gdf.geometry.within(nisqually_polygon)]\ncarbon_frames = rainier_frames_gdf[rainier_frames_gdf.geometry.within(carbon_polygon)]\n\nlen(nisqually_frames), len(carbon_frames)\n\nplot_frames_and_aoi_polygon(nisqually_frames, nisqually_polygon)\n\nplot_frames_and_aoi_date_separated(nisqually_frames, nisqually_polygon)\n\nplot_frames_and_aoi_polygon(carbon_frames, carbon_polygon)\n\nplot_frames_and_aoi_date_separated(carbon_frames, carbon_polygon)\n\n\n# # Save Datasets to CSV for the HSFM Pipeline\n\ndef create_targets_list(kml_derived_df, output_path):\n #Open image name/UUID dataset\n pids_df = pd.read_csv(f'{data_dir}/glacier_names_pids.csv')\n filenames = kml_derived_df.apply(lambda r: ('NAGAP_' + r.roll + '_' + r.frame).replace(' ', ''), axis=1)\n pid_df = pids_df[pids_df.fileName.isin(filenames)]\n pid_df[[\n 'Year','Date','Location','Latitude','Longitude','Altitude','fileName','pid_tn','pid_jpeg','pid_tiff','_merge'\n ]].to_csv(output_path, index=None)\n return output_path\n\n\npids_df = pd.read_csv(f'{data_dir}/glacier_names_pids.csv')\n\n# ### Nisqually 1977\n\nsrc = nisqually_frames.groupby('date').get_group('1977-02-11')\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_nisqually_1977.csv'\n)\n\n# ### Nisqually 1980\n\nsrc = nisqually_frames.groupby('date').get_group('1980-9-10')\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_nisqually_1980.csv'\n)\n\n# ### Nisqually All\n\n# +\nsrc = nisqually_frames\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_nisqually_all_dates.csv'\n\n)\n# -\n\n# ### Carbon All\n\n# +\nsrc = carbon_frames\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_carbon_all_dates.csv'\n\n)\n# -\n# ### Frying Pan Watershed All\n\n# +\nsrc = fryingpan_frames_df\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_carbon_all_dates.csv'\n\n)\n# -\n\nsrc.roll = src.roll.apply(lambda x: x.strip())\n\nsrc.roll.iloc[0]\n\nsrc.Name.apply(lambda x: x[:4]) == src.roll\n\nsrc[src.Name.apply(lambda x: x[:4]) != src.roll]\n\n# ### Bandaid - Missing Lat/Long values\n#\n#\n# I later noticed missing Lat/Long values for a subset of images in 1974. Fix that here by getting lat/long info from the KML files.\n#\n# Note also that \"Name\" and \"roll\" columns do not agree.\n\nfixing = pd.read_csv('targets_carbon_all_dates.csv')\n\nfixing[fixing.Year == 1974]\n\ncarbon_frames['fileName'] = 'NAGAP_' + carbon_frames.roll + '_' + carbon_frames.frame\nto_merge = carbon_frames[carbon_frames.roll=='74V5'][['fileName', 'latitude', 'longitude']]\nto_merge\n\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_')]\n\n# These rows are in the same order so I can go ahead and assign the lat long values from the `to_merge` dataframe.\n\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_'), 'Latitude'] = to_merge['latitude'].tolist()\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_'), 'Longitude'] = to_merge['longitude'].tolist()\n\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_')]\n\nfixing.to_csv('targets_carbon_all_dates.csv')\n\n\n"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"pandas.read_csv",
"pandas.Series",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplots_adjust"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shreyasahasram08/growth-too-marshal | [
"f3fbf0043b50d3ffe56e6b4e06b0c1472fc10220"
] | [
"growth/too/tests/test_gcn.py"
] | [
"import datetime\nfrom unittest import mock\n\nfrom astropy import time\nfrom astropy import units as u\nimport gcn\nimport lxml.etree\nimport numpy as np\nimport pkg_resources\nimport pytest\n\nfrom .. import models\nfrom ..jinja import btoa\nfrom ..flask import app\nfrom ..gcn import handle, listen\nfrom . import mock_download_file\n\n\[email protected]_time('2017-08-17')\ndef test_freeze_time():\n \"\"\"Test that freezing time works.\"\"\"\n assert datetime.date.today() == datetime.date(2017, 8, 17)\n assert datetime.datetime.now() == datetime.datetime(2017, 8, 17)\n assert time.Time.now() == time.Time('2017-08-17')\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_grb180116a_gnd_pos(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/GRB180116A_Fermi_GBM_Gnd_Pos.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n # Check that we didn't write the unhelpful \"unknown\" short/long class\n dateobs = '2018-01-16T00:36:53'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['Fermi', 'GRB']\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.twilio.call_everyone.run')\[email protected]('growth.too.tasks.slack.slack_everyone.run')\[email protected]('astropy.io.fits.file.download_file', mock_download_file)\[email protected]_time('2019-08-21')\ndef test_grb180116a_fin_pos(mock_call_everyone, mock_slack_everyone,\n mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/GRB180116A_Fermi_GBM_Fin_Pos.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2018-01-16T00:36:53'\n event = models.Event.query.get(dateobs)\n assert event is not None\n *_, gcn_notice = event.gcn_notices\n assert gcn_notice.content == payload\n assert gcn_notice.notice_type == gcn.NoticeType.FERMI_GBM_FIN_POS\n assert time.Time(gcn_notice.date) == time.Time('2018-01-16T00:46:05')\n assert gcn_notice.ivorn == 'ivo://nasa.gsfc.gcn/Fermi#GBM_Fin_Pos2018-01-16T00:36:52.81_537755817_0-026' # noqa: E501\n assert gcn_notice.stream == 'Fermi'\n assert time.Time(gcn_notice.dateobs) - time.Time(dateobs) < 0.5 * u.second\n assert event.tags == ['Fermi', 'long', 'GRB']\n\n mock_call_everyone.assert_not_called()\n mock_slack_everyone.assert_not_called()\n\n localization, = event.localizations\n assert np.isclose(localization.flat_2d.sum(), 1.0)\n\n telescope = 'ZTF'\n filt = ['g', 'r', 'g']\n exposuretimes = [300.0, 300.0, 300.0]\n doReferences, doDither = True, False\n filterScheduleType = 'block'\n schedule_type = 'greedy'\n probability = 0.9\n plan_name = \"%s_%s_%s_%d_%d_%s_%d_%d\" % (localization.localization_name,\n \"\".join(filt), schedule_type,\n doDither, doReferences,\n filterScheduleType,\n exposuretimes[0],\n 100*probability)\n plan = models.Plan.query.filter_by(plan_name=plan_name,\n telescope=telescope).one()\n\n assert time.Time(plan.dateobs) - time.Time(dateobs) < 0.5 * u.second\n\n exposures = models.PlannedObservation.query.filter_by(\n dateobs=event.dateobs,\n telescope=telescope,\n plan_name=plan.plan_name).all()\n\n for exposure in exposures:\n field_id = exposure.field_id\n assert np.all(np.array(field_id) < 2000)\n assert np.all(np.array(exposure.exposure_time) > 0)\n assert np.all(np.array(exposure.weight) <= 1)\n\n assert np.isclose(plan.area, 651.6459456904389)\n\n # Try submitting some of the observing plans.\n flask.post(\n '/event/{}/plan'.format(dateobs),\n data={\n 'go': True,\n '{}_{}'.format(btoa(telescope), btoa(plan_name)): True\n }\n )\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\[email protected]('growth.too.tasks.skymaps.download.run')\ndef test_grb180116a_multiple_gcns(mock_download, mock_from_cone, mock_tile,\n mock_contour, celery, flask, mail):\n \"\"\"Test reading and ingesting all three GCNs. Make sure that there are\n no database conflicts.\"\"\"\n for notice_type in ['Alert', 'Flt_Pos', 'Gnd_Pos', 'Fin_Pos']:\n filename = 'data/GRB180116A_Fermi_GBM_' + notice_type + '.xml'\n payload = pkg_resources.resource_string(__name__, filename)\n root = lxml.etree.fromstring(payload)\n handle(payload, root)\n\n\[email protected](app.jinja_env.globals,\n {'now': lambda: time.Time('2018-04-22T21:55:30').datetime})\[email protected]('growth.too.tasks.twilio.text_everyone.run')\[email protected]('growth.too.tasks.twilio.call_everyone.run')\[email protected]('growth.too.tasks.slack.slack_everyone.run')\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\[email protected]('astropy.io.fits.file.download_file', mock_download_file)\ndef test_gbm_subthreshold(mock_from_cone, mock_tile, mock_contour,\n mock_call_everyone, mock_text_everyone,\n mock_slack_everyone, celery,\n flask, mail):\n \"\"\"Test reading and ingesting all three GCNs. Make sure that there are\n no database conflicts.\"\"\"\n filename = 'data/GRB180422.913_Subthreshold.xml'\n payload = pkg_resources.resource_string(__name__, filename)\n root = lxml.etree.fromstring(payload)\n handle(payload, root)\n\n event = models.Event.query.get('2018-04-22T21:54:11')\n assert event is not None\n gcn_notice, = event.gcn_notices\n assert gcn_notice.notice_type == gcn.NoticeType.FERMI_GBM_SUBTHRESH\n assert gcn_notice.stream == 'Fermi'\n assert event.tags == ['Fermi', 'short', 'transient']\n\n mock_text_everyone.assert_not_called()\n mock_call_everyone.assert_not_called()\n mock_slack_everyone.assert_not_called()\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_amon_151115(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/AMON_151115.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2015-11-15T11:53:44'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['AMON']\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_amon_icecube_gold_190730(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/AMON_ICECUBE_GOLD_190730.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2019-07-30T20:50:41'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['AMON']\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_amon_icecube_bronze_190819(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/AMON_ICECUBE_BRONZE_190819.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2019-08-19T17:34:24'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['AMON']\n\n\[email protected]('gcn.listen')\ndef test_listen(mock_listen):\n # Run function under test\n listen()\n\n # Check that GCN listener was invoked\n assert mock_listen.called_once_with(handle=handle)\n"
] | [
[
"numpy.array",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zgongaware/gonzo | [
"290eae429eb115793cdac3d0be69a064eb73f9a2"
] | [
"goza/chart.py"
] | [
"import matplotlib.pyplot as plt\n\n\nclass Chart:\n \"\"\"\n Chart class to create and format a basic pyplot figure\n \"\"\"\n def __init__(self, title=None, xlabel=None, ylabel=None, figsize=None):\n\n self.title = title if title else \"Unnamed Chart\"\n self.xlabel = xlabel if xlabel else \"X-Axis\"\n self.ylabel = ylabel if ylabel else \"Y-Axis\"\n self.figsize = figsize if figsize else (10, 8)\n\n # Create figure\n self.figure, self.ax = self.create_figure(self.figsize)\n\n # Format\n self.format_title()\n self.format_axes()\n\n def create_figure(self, figsize):\n \"\"\"\n Create plplot figure and axes objects and assign to Chart\n :param figsize:\n :return:\n \"\"\"\n self.figure, self.ax = plt.subplots(1, 1, figsize=figsize)\n\n return self.figure, self.ax\n\n def format_title(self, color=\"black\", fontsize=14):\n \"\"\"\n Format title, x label, and y label\n :return:\n \"\"\"\n self.ax.set_title(self.title, color=color, fontsize=fontsize)\n\n def format_axes(self, color=\"#605770\", fontsize=12):\n \"\"\"\n Format axes to my preference. Remove top/right spines and set colors on\n left/bottom spines, ticks, and tick labels\n :param color:\n :return:\n \"\"\"\n\n # Turn off top / right spines\n self.ax.spines[\"top\"].set_visible(False)\n self.ax.spines[\"right\"].set_visible(False)\n\n # Format left / bottom spines\n self.ax.spines[\"left\"].set_color(color)\n self.ax.spines[\"bottom\"].set_color(color)\n\n # Format ticks\n self.ax.tick_params(axis=\"x\", colors=color)\n self.ax.tick_params(axis=\"y\", colors=color)\n\n # Format labels\n self.ax.set_xlabel(self.xlabel, fontsize=fontsize)\n self.ax.set_ylabel(self.ylabel, fontsize=fontsize)\n\n @staticmethod\n def show():\n \"\"\"\n Show chart\n :return:\n \"\"\"\n plt.show()\n\n # TODO: save_figure method saving blank image.\n @staticmethod\n def save_figure(*args, **kwargs):\n \"\"\"\n Save figure to file\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n plt.savefig(*args, **kwargs)\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luomou97/ELMoForManyLangs | [
"3e97600baa3a4dde229c1e78c513785e7d50e8e1",
"b3de5f1dc1ac13638a930b49c41e1f1e0e185ca1"
] | [
"elmoformanylangs/modules/lstm.py",
"elmoformanylangs/biLM.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport copy\n\n\n# \\ref page 4, layers=2, forward + backward, concat[forward_projection, backward_projection]\nclass LstmbiLm(nn.Module):\n def __init__(self, config, use_cuda=False):\n super(LstmbiLm, self).__init__()\n self.config = config\n self.use_cuda = use_cuda\n \n self.encoder = nn.LSTM(self.config['encoder']['projection_dim'],\n self.config['encoder']['dim'],\n num_layers=self.config['encoder']['n_layers'], \n bidirectional=True,\n batch_first=True, \n dropout=self.config['dropout'])\n self.projection = nn.Linear(self.config['encoder']['dim'], self.config['encoder']['projection_dim'], bias=True)\n\n def forward(self, inputs):\n forward, backward = self.encoder(inputs)[0].split(self.config['encoder']['dim'], 2) # split dim=2 in stride config['encoder']['dim'], here half\n return torch.cat([self.projection(forward), self.projection(backward)], dim=2)\n",
"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport os\nimport errno\nimport sys\nimport codecs\nimport argparse\nimport time\nimport random\nimport logging\nimport json\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom .modules.elmo import ElmobiLm\nfrom .modules.lstm import LstmbiLm\nfrom .modules.token_embedder import ConvTokenEmbedder, LstmTokenEmbedder\nfrom .modules.embedding_layer import EmbeddingLayer\nfrom .modules.classify_layer import SoftmaxLayer, CNNSoftmaxLayer, SampledSoftmaxLayer\nfrom .dataloader import load_embedding\nfrom .utils import dict2namedtuple\nfrom collections import Counter\nimport numpy as np\n\nlogger = logging.getLogger('elmoformanylangs')\n\n\ndef divide(data, valid_size):\n valid_size = min(valid_size, len(data) // 10)\n random.shuffle(data)\n return data[valid_size:], data[:valid_size]\n\n\ndef break_sentence(sentence, max_sent_len):\n \"\"\"\n For example, for a sentence with 70 words, supposing the the `max_sent_len'\n is 30, break it into 3 sentences.\n\n :param sentence: list[str] the sentence\n :param max_sent_len:\n :return:\n \"\"\"\n ret = []\n cur = 0\n length = len(sentence)\n while cur < length:\n if cur + max_sent_len + 5 >= length:\n ret.append(sentence[cur: length])\n break\n ret.append(sentence[cur: min(length, cur + max_sent_len)])\n cur += max_sent_len\n return ret\n\n\ndef read_corpus(path, max_chars=None, max_sent_len=20):\n \"\"\"\n read raw text file\n :param path: str\n :param max_chars: int\n :param max_sent_len: int\n :return:\n \"\"\"\n data = []\n with codecs.open(path, 'r', encoding='utf-8') as fin:\n for line in fin:\n data.append('<bos>')\n for token in line.strip().split():\n if max_chars is not None and len(token) + 2 > max_chars:\n token = token[:max_chars - 2]\n data.append(token)\n data.append('<eos>')\n dataset = break_sentence(data, max_sent_len)\n return dataset\n\n\ndef create_one_batch(x, word2id, char2id, config, oov='<oov>', pad='<pad>', sort=True):\n \"\"\"\n\n :param x:\n :param word2id: dict\n :param char2id: dict\n :param config:\n :param oov:\n :param pad:\n :param sort:\n :return:\n \"\"\"\n batch_size = len(x)\n lst = list(range(batch_size))\n if sort:\n lst.sort(key=lambda l: -len(x[l]))\n\n x = [x[i] for i in lst]\n lens = [len(x[i]) for i in lst]\n max_len = max(lens)\n\n if word2id is not None:\n oov_id, pad_id = word2id.get(oov, None), word2id.get(pad, None)\n assert oov_id is not None and pad_id is not None\n batch_w = torch.LongTensor(batch_size, max_len).fill_(pad_id)\n for i, x_i in enumerate(x):\n for j, x_ij in enumerate(x_i):\n batch_w[i][j] = word2id.get(x_ij, oov_id)\n else:\n batch_w = None\n\n if char2id is not None:\n bow_id, eow_id, oov_id, pad_id = char2id.get('<eow>', None), char2id.get('<bow>', None), char2id.get(oov, None), char2id.get(pad, None)\n\n assert bow_id is not None and eow_id is not None and oov_id is not None and pad_id is not None\n\n if config['token_embedder']['name'].lower() == 'cnn':\n max_chars = config['token_embedder']['max_characters_per_token']\n assert max([len(w) for i in lst for w in x[i]]) + 2 <= max_chars\n elif config['token_embedder']['name'].lower() == 'lstm':\n max_chars = max([len(w) for i in lst for w in x[i]]) + 2 # counting the <bow> and <eow>\n\n batch_c = torch.LongTensor(batch_size, max_len, max_chars).fill_(pad_id)\n\n for i, x_i in enumerate(x):\n for j, x_ij in enumerate(x_i):\n batch_c[i][j][0] = bow_id\n if x_ij == '<bos>' or x_ij == '<eos>':\n batch_c[i][j][1] = char2id.get(x_ij)\n batch_c[i][j][2] = eow_id\n else:\n for k, c in enumerate(x_ij):\n batch_c[i][j][k + 1] = char2id.get(c, oov_id)\n batch_c[i][j][len(x_ij) + 1] = eow_id\n else:\n batch_c = None\n\n masks = [torch.LongTensor(batch_size, max_len).fill_(0), [], []]\n\n for i, x_i in enumerate(x):\n for j in range(len(x_i)):\n masks[0][i][j] = 1\n if j + 1 < len(x_i):\n masks[1].append(i * max_len + j)\n if j > 0: \n masks[2].append(i * max_len + j)\n\n assert len(masks[1]) <= batch_size * max_len\n assert len(masks[2]) <= batch_size * max_len\n\n masks[1] = torch.LongTensor(masks[1])\n masks[2] = torch.LongTensor(masks[2])\n\n return batch_w, batch_c, lens, masks\n\n\n# shuffle training examples and create mini-batches\ndef create_batches(x, batch_size, word2id, char2id, config, perm=None, shuffle=True, sort=True, use_cuda=False):\n \"\"\"\n\n :param x:\n :param batch_size:\n :param word2id:\n :param char2id:\n :param config:\n :param perm:\n :param shuffle:\n :param sort:\n :param use_cuda:\n :return:\n \"\"\"\n lst = perm or list(range(len(x)))\n if shuffle:\n random.shuffle(lst)\n\n if sort:\n lst.sort(key=lambda l: -len(x[l]))\n\n x = [x[i] for i in lst]\n\n sum_len = 0.0\n batches_w, batches_c, batches_lens, batches_masks = [], [], [], []\n size = batch_size\n nbatch = (len(x) - 1) // size + 1\n for i in range(nbatch):\n start_id, end_id = i * size, (i + 1) * size\n bw, bc, blens, bmasks = create_one_batch(x[start_id: end_id], word2id, char2id, config, sort=sort)\n sum_len += sum(blens)\n batches_w.append(bw)\n batches_c.append(bc)\n batches_lens.append(blens)\n batches_masks.append(bmasks)\n\n if sort:\n perm = list(range(nbatch))\n random.shuffle(perm)\n batches_w = [batches_w[i] for i in perm]\n batches_c = [batches_c[i] for i in perm]\n batches_lens = [batches_lens[i] for i in perm]\n batches_masks = [batches_masks[i] for i in perm]\n\n logger.info(\"{} batches, avg len: {:.1f}\".format(nbatch, sum_len / len(x)))\n return batches_w, batches_c, batches_lens, batches_masks\n\n\nclass Model(nn.Module):\n def __init__(self, config, word_emb_layer, char_emb_layer, n_class, use_cuda=False):\n super(Model, self).__init__() \n self.use_cuda = use_cuda\n self.config = config\n\n if config['token_embedder']['name'].lower() == 'cnn':\n self.token_embedder = ConvTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)\n elif config['token_embedder']['name'].lower() == 'lstm':\n self.token_embedder = LstmTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)\n\n if config['encoder']['name'].lower() == 'elmo':\n self.encoder = ElmobiLm(config, use_cuda)\n elif config['encoder']['name'].lower() == 'lstm':\n self.encoder = LstmbiLm(config, use_cuda)\n\n self.output_dim = config['encoder']['projection_dim']\n if config['classifier']['name'].lower() == 'softmax':\n self.classify_layer = SoftmaxLayer(self.output_dim, n_class)\n elif config['classifier']['name'].lower() == 'cnn_softmax':\n self.classify_layer = CNNSoftmaxLayer(self.token_embedder, self.output_dim, n_class,\n config['classifier']['n_samples'], config['classifier']['corr_dim'],\n use_cuda)\n elif config['classifier']['name'].lower() == 'sampled_softmax':\n self.classify_layer = SampledSoftmaxLayer(self.output_dim, n_class, config['classifier']['n_samples'], use_cuda)\n\n def forward(self, word_inp, chars_inp, mask_package):\n \"\"\"\n\n :param word_inp:\n :param chars_inp:\n :param mask_package: Tuple[]\n :return:\n \"\"\"\n classifier_name = self.config['classifier']['name'].lower()\n\n if self.training and classifier_name == 'cnn_softmax' or classifier_name == 'sampled_softmax':\n self.classify_layer.update_negative_samples(word_inp, chars_inp, mask_package[0])\n self.classify_layer.update_embedding_matrix()\n\n token_embedding = self.token_embedder(word_inp, chars_inp, (mask_package[0].size(0), mask_package[0].size(1)))\n token_embedding = F.dropout(token_embedding, self.config['dropout'], self.training)\n\n encoder_name = self.config['encoder']['name'].lower()\n if encoder_name == 'elmo':\n mask = Variable(mask_package[0].cuda()).cuda() if self.use_cuda else Variable(mask_package[0])\n encoder_output = self.encoder(token_embedding, mask)\n encoder_output = encoder_output[1]\n # [batch_size, len, hidden_size]\n elif encoder_name == 'lstm':\n encoder_output = self.encoder(token_embedding)\n else:\n raise ValueError('')\n\n encoder_output = F.dropout(encoder_output, self.config['dropout'], self.training)\n forward, backward = encoder_output.split(self.output_dim, 2)\n\n word_inp = Variable(word_inp)\n if self.use_cuda:\n word_inp = word_inp.cuda()\n\n mask1 = Variable(mask_package[1].cuda()).cuda() if self.use_cuda else Variable(mask_package[1])\n mask2 = Variable(mask_package[2].cuda()).cuda() if self.use_cuda else Variable(mask_package[2])\n\n forward_x = forward.contiguous().view(-1, self.output_dim).index_select(0, mask1)\n forward_y = word_inp.contiguous().view(-1).index_select(0, mask2)\n\n backward_x = backward.contiguous().view(-1, self.output_dim).index_select(0, mask2)\n backward_y = word_inp.contiguous().view(-1).index_select(0, mask1)\n\n return self.classify_layer(forward_x, forward_y), self.classify_layer(backward_x, backward_y)\n\n def save_model(self, path, save_classify_layer):\n torch.save(self.token_embedder.state_dict(), os.path.join(path, 'token_embedder.pkl')) \n torch.save(self.encoder.state_dict(), os.path.join(path, 'encoder.pkl'))\n if save_classify_layer:\n torch.save(self.classify_layer.state_dict(), os.path.join(path, 'classifier.pkl'))\n\n def load_model(self, path):\n self.token_embedder.load_state_dict(torch.load(os.path.join(path, 'token_embedder.pkl')))\n self.encoder.load_state_dict(torch.load(os.path.join(path, 'encoder.pkl')))\n self.classify_layer.load_state_dict(torch.load(os.path.join(path, 'classifier.pkl')))\n\n\ndef eval_model(model, valid):\n model.eval()\n if model.config['classifier']['name'].lower() == 'cnn_softmax' or \\\n model.config['classifier']['name'].lower() == 'sampled_softmax':\n model.classify_layer.update_embedding_matrix()\n total_loss, total_tag = 0.0, 0\n valid_w, valid_c, valid_lens, valid_masks = valid\n for w, c, lens, masks in zip(valid_w, valid_c, valid_lens, valid_masks):\n loss_forward, loss_backward = model.forward(w, c, masks)\n total_loss += loss_forward.data[0]\n n_tags = sum(lens)\n total_tag += n_tags\n model.train()\n return np.exp(total_loss / total_tag)\n\n\ndef train_model(epoch, opt, model, optimizer,\n train, valid, test, best_train, best_valid, test_result):\n \"\"\"\n Training model for one epoch\n\n :param epoch:\n :param opt:\n :param model:\n :param optimizer:\n :param train:\n :param best_train:\n :param valid:\n :param best_valid:\n :param test:\n :param test_result:\n :return:\n \"\"\"\n model.train()\n\n total_loss, total_tag = 0.0, 0\n cnt = 0\n start_time = time.time()\n\n train_w, train_c, train_lens, train_masks = train\n\n lst = list(range(len(train_w)))\n random.shuffle(lst)\n \n train_w = [train_w[l] for l in lst]\n train_c = [train_c[l] for l in lst]\n train_lens = [train_lens[l] for l in lst]\n train_masks = [train_masks[l] for l in lst]\n\n for w, c, lens, masks in zip(train_w, train_c, train_lens, train_masks):\n cnt += 1\n model.zero_grad()\n loss_forward, loss_backward = model.forward(w, c, masks)\n\n loss = (loss_forward + loss_backward) / 2.0\n total_loss += loss_forward.data[0]\n n_tags = sum(lens)\n total_tag += n_tags\n loss.backward()\n\n torch.nn.utils.clip_grad_norm(model.parameters(), opt.clip_grad)\n optimizer.step()\n if cnt * opt.batch_size % 1024 == 0:\n logger.info(\"Epoch={} iter={} lr={:.6f} train_ppl={:.6f} time={:.2f}s\".format(\n epoch, cnt, optimizer.param_groups[0]['lr'],\n np.exp(total_loss / total_tag), time.time() - start_time\n ))\n start_time = time.time()\n\n if cnt % opt.eval_steps == 0 or cnt % len(train_w) == 0:\n if valid is None:\n train_ppl = np.exp(total_loss / total_tag)\n logger.info(\"Epoch={} iter={} lr={:.6f} train_ppl={:.6f}\".format(\n epoch, cnt, optimizer.param_groups[0]['lr'], train_ppl))\n if train_ppl < best_train:\n best_train = train_ppl\n logger.info(\"New record achieved on training dataset!\")\n model.save_model(opt.model, opt.save_classify_layer) \n else:\n valid_ppl = eval_model(model, valid)\n logger.info(\"Epoch={} iter={} lr={:.6f} valid_ppl={:.6f}\".format(\n epoch, cnt, optimizer.param_groups[0]['lr'], valid_ppl))\n\n if valid_ppl < best_valid:\n model.save_model(opt.model, opt.save_classify_layer)\n best_valid = valid_ppl\n logger.info(\"New record achieved!\")\n\n if test is not None:\n test_result = eval_model(model, test)\n logger.info(\"Epoch={} iter={} lr={:.6f} test_ppl={:.6f}\".format(\n epoch, cnt, optimizer.param_groups[0]['lr'], test_result))\n return best_train, best_valid, test_result\n\n\ndef get_truncated_vocab(dataset, min_count):\n \"\"\"\n\n :param dataset:\n :param min_count: int\n :return:\n \"\"\"\n word_count = Counter()\n for sentence in dataset:\n word_count.update(sentence)\n\n word_count = list(word_count.items())\n word_count.sort(key=lambda x: x[1], reverse=True)\n\n i = 0\n for word, count in word_count:\n if count < min_count:\n break\n i += 1\n\n logger.info('Truncated word count: {0}.'.format(sum([count for word, count in word_count[i:]])))\n logger.info('Original vocabulary size: {0}.'.format(len(word_count)))\n return word_count[:i]\n\n\ndef train():\n cmd = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')\n cmd.add_argument('--seed', default=1, type=int, help='The random seed.')\n cmd.add_argument('--gpu', default=-1, type=int, help='Use id of gpu, -1 if cpu.')\n\n cmd.add_argument('--train_path', required=True, help='The path to the training file.')\n cmd.add_argument('--valid_path', help='The path to the development file.')\n cmd.add_argument('--test_path', help='The path to the testing file.')\n\n cmd.add_argument('--config_path', required=True, help='the path to the config file.')\n cmd.add_argument(\"--word_embedding\", help=\"The path to word vectors.\")\n\n cmd.add_argument('--optimizer', default='sgd', choices=['sgd', 'adam', 'adagrad'],\n help='the type of optimizer: valid options=[sgd, adam, adagrad]')\n cmd.add_argument(\"--lr\", type=float, default=0.01, help='the learning rate.')\n cmd.add_argument(\"--lr_decay\", type=float, default=0, help='the learning rate decay.')\n\n cmd.add_argument(\"--model\", required=True, help=\"path to save model\")\n \n cmd.add_argument(\"--batch_size\", \"--batch\", type=int, default=32, help='the batch size.')\n cmd.add_argument(\"--max_epoch\", type=int, default=100, help='the maximum number of iteration.')\n \n cmd.add_argument(\"--clip_grad\", type=float, default=5, help='the tense of clipped grad.')\n\n cmd.add_argument('--max_sent_len', type=int, default=20, help='maximum sentence length.')\n\n cmd.add_argument('--min_count', type=int, default=5, help='minimum word count.')\n\n cmd.add_argument('--max_vocab_size', type=int, default=150000, help='maximum vocabulary size.')\n\n cmd.add_argument('--save_classify_layer', default=False, action='store_true',\n help=\"whether to save the classify layer\")\n\n cmd.add_argument('--valid_size', type=int, default=0, help=\"size of validation dataset when there's no valid.\")\n cmd.add_argument('--eval_steps', required=False, type=int, help='report every xx batches.')\n\n opt = cmd.parse_args(sys.argv[2:])\n\n with open(opt.config_path, 'r') as fin:\n config = json.load(fin)\n\n # Dump configurations\n print(opt)\n print(config)\n\n # set seed.\n torch.manual_seed(opt.seed)\n random.seed(opt.seed)\n if opt.gpu >= 0:\n torch.cuda.set_device(opt.gpu)\n if opt.seed > 0:\n torch.cuda.manual_seed(opt.seed)\n\n use_cuda = opt.gpu >= 0 and torch.cuda.is_available()\n\n token_embedder_name = config['token_embedder']['name'].lower()\n token_embedder_max_chars = config['token_embedder'].get('max_characters_per_token', None)\n if token_embedder_name == 'cnn':\n train_data = read_corpus(opt.train_path, token_embedder_max_chars, opt.max_sent_len)\n elif token_embedder_name == 'lstm':\n train_data = read_corpus(opt.train_path, opt.max_sent_len)\n else:\n raise ValueError('Unknown token embedder name: {}'.format(token_embedder_name))\n\n logger.info('training instance: {}, training tokens: {}.'.format(len(train_data),\n sum([len(s) - 1 for s in train_data])))\n\n if opt.valid_path is not None:\n if token_embedder_name == 'cnn':\n valid_data = read_corpus(opt.valid_path, token_embedder_max_chars, opt.max_sent_len)\n elif token_embedder_name == 'lstm':\n valid_data = read_corpus(opt.valid_path, opt.max_sent_len)\n else:\n raise ValueError('Unknown token embedder name: {}'.format(token_embedder_name))\n logger.info('valid instance: {}, valid tokens: {}.'.format(len(valid_data),\n sum([len(s) - 1 for s in valid_data])))\n elif opt.valid_size > 0:\n train_data, valid_data = divide(train_data, opt.valid_size)\n logger.info('training instance: {}, training tokens after division: {}.'.format(\n len(train_data), sum([len(s) - 1 for s in train_data])))\n logger.info('valid instance: {}, valid tokens: {}.'.format(\n len(valid_data), sum([len(s) - 1 for s in valid_data])))\n else:\n valid_data = None\n\n if opt.test_path is not None:\n if token_embedder_name == 'cnn':\n test_data = read_corpus(opt.test_path, token_embedder_max_chars, opt.max_sent_len)\n elif token_embedder_name == 'lstm':\n test_data = read_corpus(opt.test_path, opt.max_sent_len)\n else:\n raise ValueError('Unknown token embedder name: {}'.format(token_embedder_name))\n logger.info('testing instance: {}, testing tokens: {}.'.format(\n len(test_data), sum([len(s) - 1 for s in test_data])))\n else:\n test_data = None\n\n if opt.word_embedding is not None:\n embs = load_embedding(opt.word_embedding)\n word_lexicon = {word: i for i, word in enumerate(embs[0])} \n else:\n embs = None\n word_lexicon = {}\n\n # Maintain the vocabulary. vocabulary is used in either WordEmbeddingInput or softmax classification\n vocab = get_truncated_vocab(train_data, opt.min_count)\n\n # Ensure index of '<oov>' is 0\n for special_word in ['<oov>', '<bos>', '<eos>', '<pad>']:\n if special_word not in word_lexicon:\n word_lexicon[special_word] = len(word_lexicon)\n\n for word, _ in vocab:\n if word not in word_lexicon:\n word_lexicon[word] = len(word_lexicon)\n\n # Word Embedding\n if config['token_embedder']['word_dim'] > 0:\n word_emb_layer = EmbeddingLayer(config['token_embedder']['word_dim'], word_lexicon, fix_emb=False, embs=embs)\n logger.info('Word embedding size: {0}'.format(len(word_emb_layer.word2id)))\n else:\n word_emb_layer = None\n logger.info('Vocabulary size: {0}'.format(len(word_lexicon)))\n\n # Character Lexicon\n if config['token_embedder']['char_dim'] > 0:\n char_lexicon = {}\n for sentence in train_data:\n for word in sentence:\n for ch in word:\n if ch not in char_lexicon:\n char_lexicon[ch] = len(char_lexicon)\n\n for special_char in ['<bos>', '<eos>', '<oov>', '<pad>', '<bow>', '<eow>']:\n if special_char not in char_lexicon:\n char_lexicon[special_char] = len(char_lexicon)\n\n char_emb_layer = EmbeddingLayer(config['token_embedder']['char_dim'], char_lexicon, fix_emb=False)\n logger.info('Char embedding size: {0}'.format(len(char_emb_layer.word2id)))\n else:\n char_lexicon = None\n char_emb_layer = None\n\n train = create_batches(\n train_data, opt.batch_size, word_lexicon, char_lexicon, config, use_cuda=use_cuda)\n\n if opt.eval_steps is None:\n opt.eval_steps = len(train[0])\n logger.info('Evaluate every {0} batches.'.format(opt.eval_steps))\n\n if valid_data is not None:\n valid = create_batches(\n valid_data, opt.batch_size, word_lexicon, char_lexicon, config, sort=False, shuffle=False, use_cuda=use_cuda)\n else:\n valid = None\n\n if test_data is not None:\n test = create_batches(\n test_data, opt.batch_size, word_lexicon, char_lexicon, config, sort=False, shuffle=False, use_cuda=use_cuda)\n else:\n test = None\n\n label_to_ix = word_lexicon\n logger.info('vocab size: {0}'.format(len(label_to_ix)))\n \n nclasses = len(label_to_ix)\n\n model = Model(config, word_emb_layer, char_emb_layer, nclasses, use_cuda)\n logger.info(str(model))\n if use_cuda:\n model = model.cuda()\n\n need_grad = lambda x: x.requires_grad\n if opt.optimizer.lower() == 'adam':\n optimizer = optim.Adam(filter(need_grad, model.parameters()), lr=opt.lr)\n elif opt.optimizer.lower() == 'sgd':\n optimizer = optim.SGD(filter(need_grad, model.parameters()), lr=opt.lr)\n elif opt.optimizer.lower() == 'adagrad':\n optimizer = optim.Adagrad(filter(need_grad, model.parameters()), lr=opt.lr)\n else:\n raise ValueError('Unknown optimizer {}'.format(opt.optimizer.lower()))\n\n try:\n os.makedirs(opt.model)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n if config['token_embedder']['char_dim'] > 0:\n with codecs.open(os.path.join(opt.model, 'char.dic'), 'w', encoding='utf-8') as fpo:\n for ch, i in char_emb_layer.word2id.items():\n print('{0}\\t{1}'.format(ch, i), file=fpo)\n\n with codecs.open(os.path.join(opt.model, 'word.dic'), 'w', encoding='utf-8') as fpo:\n for w, i in word_lexicon.items():\n print('{0}\\t{1}'.format(w, i), file=fpo)\n\n json.dump(vars(opt), codecs.open(os.path.join(opt.model, 'config.json'), 'w', encoding='utf-8'))\n\n best_train = 1e+8\n best_valid = 1e+8\n test_result = 1e+8\n\n for epoch in range(opt.max_epoch):\n best_train, best_valid, test_result = train_model(epoch, opt, model, optimizer,\n train, valid, test, best_train, best_valid, test_result)\n if opt.lr_decay > 0:\n optimizer.param_groups[0]['lr'] *= opt.lr_decay\n\n if valid_data is None:\n logger.info(\"best train ppl: {:.6f}.\".format(best_train))\n elif test_data is None:\n logger.info(\"best train ppl: {:.6f}, best valid ppl: {:.6f}.\".format(best_train, best_valid))\n else:\n logger.info(\"best train ppl: {:.6f}, best valid ppl: {:.6f}, test ppl: {:.6f}.\".format(best_train, best_valid, test_result))\n\n\ndef test():\n cmd = argparse.ArgumentParser('The testing components of')\n cmd.add_argument('--gpu', default=-1, type=int, help='use id of gpu, -1 if cpu.')\n cmd.add_argument(\"--input\", help=\"the path to the raw text file.\")\n cmd.add_argument(\"--model\", required=True, help=\"path to save model\")\n cmd.add_argument(\"--batch_size\", \"--batch\", type=int, default=1, help='the batch size.')\n args = cmd.parse_args(sys.argv[2:])\n\n if args.gpu >= 0:\n torch.cuda.set_device(args.gpu)\n use_cuda = args.gpu >= 0 and torch.cuda.is_available()\n \n args2 = dict2namedtuple(json.load(codecs.open(os.path.join(args.model, 'config.json'), 'r', encoding='utf-8')))\n\n with open(args2.config_path, 'r') as fin:\n config = json.load(fin)\n\n if config['token_embedder']['char_dim'] > 0:\n char_lexicon = {}\n with codecs.open(os.path.join(args.model, 'char.dic'), 'r', encoding='utf-8') as fpi:\n for line in fpi:\n tokens = line.strip().split('\\t')\n if len(tokens) == 1:\n tokens.insert(0, '\\u3000')\n token, i = tokens\n char_lexicon[token] = int(i)\n char_emb_layer = EmbeddingLayer(config['token_embedder']['char_dim'], char_lexicon, fix_emb=False)\n logger.info('char embedding size: ' + str(len(char_emb_layer.word2id)))\n else:\n char_lexicon = None\n char_emb_layer = None\n\n word_lexicon = {}\n with codecs.open(os.path.join(args.model, 'word.dic'), 'r', encoding='utf-8') as fpi:\n for line in fpi:\n tokens = line.strip().split('\\t')\n if len(tokens) == 1:\n tokens.insert(0, '\\u3000')\n token, i = tokens\n word_lexicon[token] = int(i)\n\n if config['token_embedder']['word_dim'] > 0:\n word_emb_layer = EmbeddingLayer(config['token_embedder']['word_dim'], word_lexicon, fix_emb=False, embs=None)\n logger.info('word embedding size: ' + str(len(word_emb_layer.word2id)))\n else:\n word_emb_layer = None\n \n model = Model(config, word_emb_layer, char_emb_layer, len(word_lexicon), use_cuda)\n\n if use_cuda:\n model.cuda()\n\n logger.info(str(model))\n model.load_model(args.model)\n if config['token_embedder']['name'].lower() == 'cnn':\n test = read_corpus(args.input, config['token_embedder']['max_characters_per_token'], max_sent_len=10000)\n elif config['token_embedder']['name'].lower() == 'lstm':\n test = read_corpus(args.input, max_sent_len=10000)\n else:\n raise ValueError('')\n\n test_w, test_c, test_lens, test_masks = create_batches(\n test, args.batch_size, word_lexicon, char_lexicon, config, sort=False, shuffle=False, use_cuda=use_cuda)\n\n test_result = eval_model(model, (test_w, test_c, test_lens, test_masks))\n\n logger.info(\"test_ppl={:.6f}\".format(test_result))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1 and sys.argv[1] == 'train':\n train()\n elif len(sys.argv) > 1 and sys.argv[1] == 'test':\n test()\n else:\n print('Usage: {0} [train|test] [options]'.format(sys.argv[0]), file=sys.stderr)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.LSTM"
],
[
"torch.LongTensor",
"torch.cuda.set_device",
"torch.nn.functional.dropout",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.exp",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rahuln/adapter-transformers | [
"ac3284547064686d31b95e5e1b078447a2199779",
"ac3284547064686d31b95e5e1b078447a2199779"
] | [
"src/transformers/adapters/model_mixin.py",
"examples/pytorch/multiple-choice/run_swag.py"
] | [
"import logging\nimport os\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom os.path import join\nfrom typing import Iterable, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\n\nfrom .composition import AdapterCompositionBlock, Fuse, Stack, parse_composition\nfrom .configuration import AdapterConfig, AdapterConfigBase, AdapterFusionConfig, get_adapter_config_hash\nfrom .context import AdapterSetup, ForwardContext\nfrom .hub_mixin import PushAdapterToHubMixin\nfrom .layer import AdapterLayer, AdapterLayerBase\nfrom .loading import AdapterFusionLoader, AdapterLoader, PredictionHeadLoader, WeightsLoader\nfrom .modeling import Adapter, GLOWCouplingBlock, NICECouplingBlock\nfrom .prefix_tuning import PrefixTuningPool, PrefixTuningShim\nfrom .utils import EMBEDDING_FILE, TOKENIZER_PATH, inherit_doc\nfrom .wrappers.configuration import wrap_config\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass InvertibleAdaptersMixin:\n \"\"\"Mixin for Transformer models adding invertible adapters.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.invertible_adapters = nn.ModuleDict(dict())\n\n # Make sure config is wrapped\n self.config = wrap_config(self.config)\n\n def add_invertible_adapter(self, adapter_name: str):\n \"\"\"\n Adds an invertible adapter module for the adapter with the given name. If the given adapter does not specify an\n invertible adapter config, this method does nothing.\n\n Args:\n adapter_name (str): The name of the adapter for which to add an invertible adapter module.\n \"\"\"\n if adapter_name in self.invertible_adapters:\n raise ValueError(f\"Model already contains an adapter module for '{adapter_name}'.\")\n adapter_config = self.config.adapters.match(\n adapter_name,\n config_type=AdapterConfig,\n location_key=\"inv_adapter\",\n )\n if adapter_config and adapter_config[\"inv_adapter\"]:\n if adapter_config[\"inv_adapter\"] == \"nice\":\n inv_adap = NICECouplingBlock(\n [[self.config.hidden_size]],\n non_linearity=adapter_config[\"non_linearity\"],\n reduction_factor=adapter_config[\"inv_adapter_reduction_factor\"],\n )\n elif adapter_config[\"inv_adapter\"] == \"glow\":\n inv_adap = GLOWCouplingBlock(\n [[self.config.hidden_size]],\n non_linearity=adapter_config[\"non_linearity\"],\n reduction_factor=adapter_config[\"inv_adapter_reduction_factor\"],\n )\n else:\n raise ValueError(f\"Invalid invertible adapter type '{adapter_config['inv_adapter']}'.\")\n self.invertible_adapters[adapter_name] = inv_adap\n self.invertible_adapters[adapter_name].apply(Adapter.init_bert_weights)\n\n def delete_invertible_adapter(self, adapter_name: str):\n if adapter_name in self.invertible_adapters:\n del self.invertible_adapters[adapter_name]\n\n def get_invertible_adapter(self):\n # TODO: Currently no fusion over invertible adapters, takes only very first language adapter position\n if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:\n first_adapter = self.config.adapters.active_setup.first()\n if first_adapter in self.invertible_adapters:\n return self.invertible_adapters[first_adapter]\n return None\n\n def enable_invertible_adapters(self, adapter_names):\n for adapter_name in adapter_names:\n if adapter_name in self.invertible_adapters:\n for param in self.invertible_adapters[adapter_name].parameters():\n param.requires_grad = True\n\n def invertible_adapters_forward(self, hidden_states, rev=False):\n # TODO: Currently no fusion over invertible adapters, takes only very first language adapter position\n if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:\n first_adapter = self.config.adapters.active_setup.first()\n if first_adapter in self.invertible_adapters:\n hidden_states = self.invertible_adapters[first_adapter](hidden_states, rev=rev)\n\n return hidden_states\n\n\nclass ModelAdaptersMixin(PushAdapterToHubMixin, ABC):\n \"\"\"Mixin for transformer models adding support for loading/ saving adapters.\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n if config.name_or_path and not os.path.exists(config.name_or_path):\n self.model_name = config.name_or_path\n else:\n self.model_name = None\n self.loaded_embeddings = {}\n self.shared_parameters = nn.ModuleDict()\n self._active_embedding = \"default\"\n\n # Make sure config is wrapped\n self.config = wrap_config(self.config)\n\n def _link_prefix_to_pool(self, layer):\n if isinstance(layer, PrefixTuningShim):\n layer.set_pool(self.base_model.prefix_tuning)\n\n def _init_adapter_modules(self, add_prefix_tuning_pool=True):\n \"\"\"\n This method initializes adapter modules and fusion modules from the model config.\n \"\"\"\n # Link all prefix tunings\n if add_prefix_tuning_pool:\n self.base_model.prefix_tuning = PrefixTuningPool(self.config)\n self.apply_to_adapter_layers(lambda i, layer: self._link_prefix_to_pool(layer))\n\n # Initialize adapters from config\n for adapter_name in self.config.adapters:\n self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))\n # Initialize fusion from config\n for fusion_name in self.config.adapters.fusions:\n self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(fusion_name))\n\n self.loaded_embeddings[\"default\"] = self.get_input_embeddings()\n\n # These methods have to be implemented by every deriving class:\n\n @abstractmethod\n def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:\n \"\"\"\n Iterates over all layers of the model.\n\n This abstract method has to ne implemented by every implementing model.\n \"\"\"\n pass\n\n def apply_to_adapter_layers(self, fn):\n \"\"\"\n Applies a function to all adapter layers of the model.\n \"\"\"\n for i, layer in self.iter_layers():\n for module in layer.modules():\n if isinstance(module, AdapterLayerBase):\n fn(i, module)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"Sets the model into mode for training the given adapters.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, True, False))\n for adapter_name in adapter_setup:\n if adapter_name in self.shared_parameters:\n for param in self.shared_parameters[adapter_name].values():\n param.requires_grad = True\n\n if isinstance(self, InvertibleAdaptersMixin):\n self.enable_invertible_adapters(adapter_setup.flatten())\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n if train_embeddings:\n self.get_input_embeddings().train()\n\n def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n warnings.warn(\n \"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.\",\n FutureWarning,\n )\n self.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, unfreeze_adapters, True))\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n # TODO implement fusion for invertible adapters\n\n def has_adapters(self):\n if not getattr(self.config, \"is_adaptable\", None):\n return False\n return len(self.config.adapters.adapters) > 0\n\n @property\n def has_parallel_adapters(self) -> bool:\n if self.config.adapters.active_setup:\n return self.config.adapters.active_setup.parallel_channels > 1\n else:\n return False\n\n @property\n def active_adapters(self) -> AdapterCompositionBlock:\n return self.config.adapters.active_setup\n\n @active_adapters.setter\n def active_adapters(self, adapter_setup: Union[list, AdapterCompositionBlock]):\n self.set_active_adapters(adapter_setup)\n\n def set_shared_parameters(self, param):\n self.shared_parameters = param\n\n def set_active_adapters(\n self, adapter_setup: Union[list, AdapterCompositionBlock], skip_layers: Optional[List[int]] = None\n ):\n \"\"\"\n Sets the adapter modules to be used by default in every forward pass. If no adapter with the given name is\n found, no module of the respective type will be activated.\n\n Args:\n adapter_setup (list):\n The list of adapters to be activated by default. Can be a fusion or stacking configuration.\n \"\"\"\n adapter_setup = parse_composition(adapter_setup, model_type=self.config.model_type)\n if adapter_setup:\n for adapter_name in adapter_setup.flatten():\n if adapter_name not in self.config.adapters.adapters:\n raise ValueError(\n f\"No adapter with name '{adapter_name}' found. Please make sure that all specified adapters are correctly loaded.\"\n )\n\n self.config.adapters.active_setup = adapter_setup\n self.config.adapters.skip_layers = skip_layers\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n\n adapter_name (str): The name of the adapter module to be added. config (str or dict or AdapterConfigBase,\n optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an\n exception is thrown. set_active (bool, optional): Set the adapter to be the active one. By default (False),\n the adapter is added but not activated.\n \"\"\"\n if isinstance(config, dict):\n config = AdapterConfigBase.load(config) # ensure config is ok and up-to-date\n # In case adapter already exists and we allow overwriting, explicitly delete the existing one first\n if overwrite_ok and adapter_name in self.config.adapters:\n self.delete_adapter(adapter_name)\n self.config.adapters.add(adapter_name, config=config)\n try:\n self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))\n # PHM Layer\n if self.config.adapters.match(adapter_name, AdapterConfig, location_key=\"phm_layer\"):\n self._add_shared_parameters(adapter_name, config)\n # Prefix Tuning\n for module in self.modules():\n if isinstance(module, PrefixTuningPool):\n module.confirm_prefix(adapter_name)\n if isinstance(self, InvertibleAdaptersMixin):\n self.add_invertible_adapter(adapter_name)\n except ValueError as ex:\n self.delete_adapter(adapter_name)\n raise ex\n if set_active:\n self.set_active_adapters(adapter_name)\n\n def _add_shared_parameters(self, adapter_name, adapter_config: AdapterConfig):\n self.shared_parameters[adapter_name] = (\n list(self.get_adapter(adapter_name)[0].values())[0].adapter_down[0].init_shared_parameters()\n )\n\n def add_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):\n warnings.warn(\n \"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.\",\n FutureWarning,\n )\n adapter_fusion_config = AdapterFusionConfig.from_dict(adapter_fusion_config).replace(**override_kwargs)\n self.add_adapter_fusion(adapter_names, adapter_fusion_config)\n\n def add_adapter_fusion(\n self,\n adapter_names: Union[Fuse, list, str],\n config=None,\n overwrite_ok: bool = False,\n set_active: bool = False,\n ):\n \"\"\"\n Adds AdapterFusion to the model with alll the necessary configurations and weight initializations\n\n Args:\n adapter_names (Fuse or list or str): AdapterFusion layer to add. Can be either:\n\n - a ``Fuse`` composition block\n - a list of adapter names to fuse\n - a comma-separated string of adapter names to fuse\n config (str or dict): adapter fusion configuration, can be either:\n\n - a string identifying a pre-defined adapter fusion configuration\n - a dictionary representing the adapter fusion configuration\n - the path to a file containing the adapter fusion configuration\n overwrite_ok (bool, optional):\n Overwrite an AdapterFusion layer with the same name if it exists. By default (False), an exception is\n thrown.\n set_active (bool, optional):\n Activate the added AdapterFusion. By default (False), the AdapterFusion is added but not activated.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_names = adapter_names.children\n elif isinstance(adapter_names, str):\n adapter_names = adapter_names.split(\",\")\n\n if isinstance(config, dict):\n config = AdapterFusionConfig.from_dict(config) # ensure config is ok and up-to-date\n # In case adapter already exists and we allow overwriting, explicitly delete the existing one first\n if overwrite_ok and self.config.adapters.get_fusion(adapter_names) is not None:\n self.delete_adapter_fusion(adapter_names)\n self.config.adapters.add_fusion(adapter_names, config=config)\n self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(adapter_names))\n if set_active:\n if not isinstance(adapter_names, list):\n adapter_names = adapter_names.split(\",\")\n self.set_active_adapters(Fuse(*adapter_names))\n\n def delete_adapter(self, adapter_name: str):\n \"\"\"\n Deletes the adapter with the specified name from the model.\n\n Args:\n adapter_name (str): The name of the adapter.\n \"\"\"\n if adapter_name not in self.config.adapters:\n logger.info(\"No adapter '%s' found for deletion. Skipping.\", adapter_name)\n return\n del self.config.adapters.adapters[adapter_name]\n self.apply_to_adapter_layers(lambda i, layer: layer.delete_adapter(adapter_name))\n if isinstance(self, InvertibleAdaptersMixin):\n self.delete_invertible_adapter(adapter_name)\n # Reset active adapters if this was the only active adapter\n if self.active_adapters == Stack(adapter_name):\n self.active_adapters = None\n\n def delete_adapter_fusion(self, adapter_names: Union[Fuse, list, str]):\n \"\"\"\n Deletes the AdapterFusion layer of the specified adapters.\n\n Args:\n adapter_names (Union[Fuse, list, str]): AdapterFusion layer to delete.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_fusion_name = \",\".join(adapter_names.children)\n elif isinstance(adapter_names, list):\n adapter_fusion_name = \",\".join(adapter_names)\n elif isinstance(adapter_names, str):\n adapter_fusion_name = adapter_names\n else:\n raise ValueError(\"Invalid AdapterFusion definition: {}\".format(adapter_names))\n\n if adapter_fusion_name not in self.config.adapters.fusions:\n logger.info(\"No AdapterFusion '%s' found for deletion. Skipping.\", adapter_fusion_name)\n return\n del self.config.adapters.fusions[adapter_fusion_name]\n self.apply_to_adapter_layers(lambda i, layer: layer.delete_fusion_layer(adapter_fusion_name))\n # Reset active adapters if this was the active setup\n if self.active_adapters == adapter_names:\n self.active_adapters = None\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using\n `load_adapter()`.\n\n Args:\n save_directory (str): Path to a directory where the adapter should be saved.\n adapter_name (str): Name of the adapter to be saved.\n\n Raises:\n ValueError: If the given adapter name is invalid.\n \"\"\"\n loader = AdapterLoader(self)\n loader.save(save_directory, adapter_name, meta_dict)\n # save additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.save(save_directory, adapter_name)\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_fusion_name = \",\".join(adapter_names.children)\n elif isinstance(adapter_names, list):\n adapter_fusion_name = \",\".join(adapter_names)\n elif isinstance(adapter_names, str):\n adapter_fusion_name = adapter_names\n else:\n raise ValueError(\"Invalid AdapterFusion definition: {}\".format(adapter_names))\n\n loader = AdapterFusionLoader(self)\n loader.save(save_directory, adapter_fusion_name, meta_dict)\n # save additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.save(save_directory, adapter_fusion_name)\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n \"\"\"\n Loads a pre-trained pytorch adapter module from the local file system or a remote location.\n\n Args:\n adapter_name_or_path (str): can be either:\n\n - the identifier of a pre-trained task adapter to be loaded from Adapter Hub\n - a path to a directory containing adapter weights saved using `model.saved_adapter()`\n - a URL pointing to a zip folder containing a saved adapter module\n config (dict or str, optional): The requested configuration of the adapter.\n If not specified, will be either: - the default adapter config for the requested adapter if specified -\n the global default adapter config\n version (str, optional): The version of the adapter to be loaded.\n model_name (str, optional): The string identifier of the pre-trained model.\n load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was\n saved will be used.\n source (str, optional): Identifier of the source(s) from where to load the adapter. Can be:\n\n - \"ah\" (default): search on AdapterHub.\n - \"hf\": search on HuggingFace model hub.\n - None: search on all sources\n leave_out: Dynamically drop adapter modules in the specified Transformer layers when loading the adapter.\n set_active (bool, optional):\n Set the loaded adapter to be the active one. By default (False), the adapter is loaded but not\n activated.\n\n Returns:\n str: The name with which the adapter was added to the model.\n \"\"\"\n loader = AdapterLoader(self)\n load_dir, load_name = loader.load(\n adapter_name_or_path,\n config,\n version,\n model_name,\n load_as,\n source=source,\n leave_out=leave_out,\n set_active=set_active,\n **kwargs,\n )\n # load additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.load(\n load_dir,\n load_as=load_as,\n loading_info=kwargs.get(\"loading_info\", None),\n main_load_name=load_name,\n id2label=id2label,\n set_active=set_active,\n )\n return load_name\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n \"\"\"\n Loads a pre-trained AdapterFusion layer from the local file system.\n\n Args:\n adapter_fusion_name_or_path (str):\n a path to a directory containing AdapterFusion weights saved using `model.save_adapter_fusion()`.\n load_as (str, optional): Load the AdapterFusion using this name.\n By default, the name with which the AdapterFusion layer was saved will be used.\n set_active (bool, optional):\n Activate the loaded AdapterFusion. By default (False), the AdapterFusion is loaded but not activated.\n\n Returns:\n str: The name with which the AdapterFusion was added to the model.\n \"\"\"\n\n loader = AdapterFusionLoader(self)\n load_dir, load_name = loader.load(adapter_fusion_name_or_path, load_as, set_active=set_active)\n # load additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.load(\n load_dir,\n load_as=load_as,\n loading_info=kwargs.get(\"loading_info\", None),\n main_load_name=load_name,\n set_active=set_active,\n )\n return load_name\n\n def save_all_adapters(\n self,\n save_directory: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves all adapters of this model together with their configuration to subfolders of the given location.\n\n Args:\n save_directory (str): Path to a directory where the adapters should be saved.\n \"\"\"\n for name in self.config.adapters:\n adapter_config = self.config.adapters.get(name)\n h = get_adapter_config_hash(adapter_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter(save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders)\n\n def save_all_adapter_fusions(\n self,\n save_directory: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves all AdapterFusion layers of this model together with their configuration to subfolders of the given\n location.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion layers should be saved.\n \"\"\"\n for name in self.config.adapters.fusions:\n adapter_fusion_config = self.config.adapters.get_fusion(name)\n h = get_adapter_config_hash(adapter_fusion_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter_fusion(\n save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders\n )\n\n def freeze_model(self, freeze=True):\n \"\"\"Freezes all weights of the model.\"\"\"\n # first freeze/ unfreeze all model weights\n for param in self.base_model.parameters():\n param.requires_grad = not freeze\n self.model_frozen = freeze\n\n def forward_context(self, context: ForwardContext, *args, **kwargs):\n \"\"\"\n This method is called by the ``ForwardContext`` at the beginning of the forward pass.\n \"\"\"\n # some warnings if we don't use available adapters\n active_adapters = getattr(self, \"active_adapters\", None) or AdapterSetup.get_context()\n if not active_adapters:\n if self.has_adapters():\n logger.warning(\"There are adapters available but none are activated for the forward pass.\")\n return\n\n context.adapters_parallelized = False\n # Add the shared parameters for the active adapters to the context\n context.shared_parameters = {\n name: param for name, param in self.shared_parameters.items() if name in active_adapters.flatten()\n }\n\n # Prefix tuning\n input_tensor = kwargs.get(\"input_ids\", None)\n if input_tensor is None:\n input_tensor = kwargs.get(\"decoder_input_ids\", None)\n if input_tensor is None:\n input_tensor = kwargs.get(\"attention_mask\", None)\n if input_tensor is None:\n input_tensor = args[0]\n context.prefix_states = self.base_model.prefix_tuning(input_tensor.shape[0])\n\n def load_embeddings(self, path: str, name: str):\n \"\"\"\n Load a saved embedding from the given path. If the embedding was saved with a tokenizer it is returned\n\n Args:\n path: the path to the saved embedding\n name: the name the embedding should be loaded as\n\n Returns: a tokenizer if it ws saved with the embedding otherwise None\n\n \"\"\"\n from ..models.auto.tokenization_auto import AutoTokenizer\n\n if name in self.loaded_embeddings:\n raise ValueError(\"An embedding with the name {} already exists\".format(name))\n tokenizer = None\n tokenizer_path = os.path.join(path, TOKENIZER_PATH)\n if os.path.isdir(tokenizer_path):\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n\n embedding_path = os.path.join(path, EMBEDDING_FILE)\n if not os.path.isfile(embedding_path):\n raise FileNotFoundError(\"No embeddings found at {}\".format(embedding_path))\n weights = torch.load(embedding_path)\n\n self.loaded_embeddings[name] = nn.Embedding.from_pretrained(weights)\n self.set_active_embeddings(name)\n return tokenizer\n\n def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):\n \"\"\"\n Add a new embedding to the model. If a reference embedding and reference tokenizer are provided tokens in the\n present in both tokenizers are initialized to the embedding in the reference_embedding.\n\n Args:\n name: the name of the embedding\n tokenizer: the tokenizer determining the vocab of the embedding\n reference_embedding:\n the reference embedding to use for initializing the embeddings of tokens present in the newly created\n embedding\n reference_tokenizer: the tokenizer providing the vocab for the reference embedding\n embedding_dim: the dimension of the embeddings (if None the hidden_size from the config is used)\n\n \"\"\"\n if name in self.loaded_embeddings:\n raise ValueError(\"An embedding with the name {} already exists\".format(name))\n if embedding_dim is None:\n embedding_dim = self.config.hidden_size\n embedding = nn.Embedding(tokenizer.vocab_size, embedding_dim)\n embedding.requires_grad_(False)\n if (reference_embedding is not None and reference_tokenizer is None) or (\n reference_tokenizer is not None and reference_embedding is None\n ):\n raise KeyError(\n \"Reference embedding and reference tokenizer are required to use initialize embeddings from reference embedding\"\n )\n if reference_embedding is not None and reference_tokenizer is not None:\n tokens = set(tokenizer.get_vocab().keys()) & set(reference_tokenizer.get_vocab().keys())\n reference_vocab = reference_tokenizer.get_vocab()\n vocab = tokenizer.get_vocab()\n for t in tokens:\n idx_reference = reference_vocab[t]\n idx = vocab[t]\n embedding.weight[idx] = self.loaded_embeddings[reference_embedding].weight[idx_reference].clone()\n embedding.train(False)\n self.loaded_embeddings[name] = embedding\n self.set_active_embeddings(name)\n\n def delete_embeddings(self, name):\n \"\"\"\n Deletes the embedding with the given name\n\n Args:\n name: The name of the embedding that should be deleted\n\n \"\"\"\n if name not in self.loaded_embeddings:\n raise ValueError(\"No embedding with name {}\".format(name))\n if self.active_embeddings == name:\n logger.warning(\"The active embedding is deleted. Setting the default embedding as active.\")\n self.set_active_embeddings(\"default\")\n del self.loaded_embeddings[name]\n\n def save_embeddings(self, path, name, tokenizer=None):\n \"\"\"\n Saves the embedding with the given name. If a tokenizer is passed as well the tokenizer is saved together with\n the embedding.\n\n Args:\n path: The path where the embedding should be saved\n name: The name of the embedding that should be saved\n tokenizer: optionally a tokenizer to save with the embedding (default is None)\n\n \"\"\"\n if self.active_embeddings == name:\n self.loaded_embeddings[name] = self.get_input_embeddings()\n os.makedirs(path, exist_ok=True)\n embedding_path = os.path.join(path, EMBEDDING_FILE)\n torch.save(self.loaded_embeddings[name].weight, embedding_path)\n if tokenizer:\n tokenizer_path = os.path.join(path, TOKENIZER_PATH)\n tokenizer.save_pretrained(tokenizer_path)\n\n def set_active_embeddings(self, name):\n \"\"\"\n Sets the active embedding for the forward pass of the model\n\n Args:\n name: The name of the embedding that should be used\n\n \"\"\"\n self.loaded_embeddings[self.active_embeddings] = self.get_input_embeddings()\n self.set_input_embeddings(self.loaded_embeddings[name])\n self._active_embedding = name\n\n @property\n def active_embeddings(self):\n return self._active_embedding\n\n def get_fusion_regularization_loss(self):\n reg_loss = 0.0\n\n target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)\n for i, layer in self.iter_layers():\n for module in layer.modules():\n if isinstance(module, AdapterLayer):\n for _, layer_fusion in module.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n return reg_loss\n\n def get_adapter(self, name) -> dict:\n \"\"\"\n Returns a dictionary with all weights of the adapter with the specified name.\n\n Args:\n name (str): The adapter name.\n\n Returns:\n dict: A nested dictionary containing the weights of the adapter. The dictionary is structured as follow:\n {<layer id>: {<module location>: <nn.Module>}}.\n \"\"\"\n destination = defaultdict(dict)\n\n # use a custom index to ensure numbering is from 0 to N layers\n for i, (_, layer) in enumerate(self.iter_layers()):\n for module in layer.modules():\n if isinstance(module, AdapterLayerBase):\n adapter_module = module.get_adapter(name)\n if adapter_module is not None:\n destination[i][module.location_key] = adapter_module\n\n return dict(destination)\n\n def eject_prefix_tuning(self, name: str):\n \"\"\"\n Converts the prefix tuning with the given name from the reparameterized form into the flat form.\n\n Args:\n name (str): The name of the prefix tuning.\n \"\"\"\n for module in self.modules():\n if isinstance(module, PrefixTuningPool):\n if name in module.prefix_tunings:\n module.prefix_tunings[name].eject()\n\n\n@inherit_doc\nclass ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):\n \"\"\"\n Mixin adding support for loading/ saving adapters to transformer models with head(s).\n \"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n self._convert_to_flex_head = False\n\n def set_shared_parameters(self, param):\n self.shared_parameters = param\n if self.base_model is not self:\n self.base_model.shared_parameters = self.shared_parameters\n\n def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:\n \"\"\"\n Iterates over all layers of the model.\n \"\"\"\n if self.base_model is self:\n return super().iter_layers()\n else:\n return self.base_model.iter_layers()\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n adapter_name (str): The name of the adapter module to be added.\n config (str or dict, optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional):\n Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.\n set_active (bool, optional):\n Set the adapter to be the active one. By default (False), the adapter is added but not activated.\n\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n else:\n self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"\n Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class\n that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter(adapter_setup, train_embeddings)\n else:\n self.base_model.train_adapter(adapter_setup, train_embeddings)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"\n Sets the model into mode for training of adapter fusion determined by a list of adapter names. If\n self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n else:\n self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n def save_head(self, save_directory: str, head_name: str = None):\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, name=head_name)\n\n def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):\n loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)\n return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_adapter(\n save_directory,\n adapter_name,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = None,\n with_head: bool = True,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(\n PredictionHeadLoader(\n self,\n error_on_missing=False,\n convert_to_flex_head=self._convert_to_flex_head,\n )\n )\n # Support passing a num_labels for compatibility reasons. Convert to label map here.\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None:\n id2label = {i: \"LABEL_\" + str(i) for i in range(num_labels)}\n return super().load_adapter(\n adapter_name_or_path,\n config=config,\n version=version,\n model_name=model_name,\n load_as=load_as,\n source=source,\n custom_weights_loaders=custom_weights_loaders,\n leave_out=leave_out,\n id2label=id2label,\n set_active=set_active,\n **kwargs,\n )\n\n def save_all_adapters(\n self,\n save_directory: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_all_adapters(\n save_directory,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n with_head: Union[bool, str] = False,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n with_head (Union[bool, str]):\n If True, will save a head with the same name as the AdapterFusionLayer. If a string, this will be used\n as the name of the head to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)\n\n if with_head:\n # Make sure to cover the different options for adapter_names\n if isinstance(with_head, str):\n head_name = with_head\n elif isinstance(adapter_names, Fuse):\n head_name = adapter_names.name\n elif isinstance(adapter_names, list):\n head_name = \",\".join(adapter_names)\n else:\n head_name = adapter_names\n if head_name not in self.heads:\n raise ValueError(\"No head with name {} found\".format(head_name))\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, head_name)\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n with_head: bool = True,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)\n\n def save_all_heads(self, save_directory):\n for head_name in self.heads:\n save_path = join(save_directory, head_name)\n self.save_head(save_path, head_name)\n\n def get_labels(self):\n return list(self.config.id2label.values())\n\n def get_labels_dict(self):\n return self.config.id2label\n\n def get_adapter(self, name):\n \"\"\"\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n return super().get_adapter(name)\n else:\n return self.base_model.get_adapter(name)\n\n def load_embeddings(self, path: str, name: str):\n if self.base_model is self:\n return super().load_embeddings(path, name)\n else:\n return self.base_model.load_embeddings(path, name)\n\n def save_embeddings(self, path, name, tokenizer=None):\n if self.base_model is self:\n return super().save_embeddings(path, name, tokenizer)\n else:\n return self.base_model.save_embeddings(path, name, tokenizer)\n\n def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):\n if self.base_model is None:\n return super().add_embeddings(name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim)\n else:\n return self.base_model.add_embeddings(\n name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim\n )\n\n def set_active_embeddings(self, name):\n if self.base_model is None:\n return super().set_active_embeddings(name)\n else:\n return self.base_model.set_active_embeddings(name)\n\n def delete_embeddings(self, name):\n if self.base_model is None:\n return super().delete_embeddings(name)\n else:\n return self.base_model.delete_embeddings(name)\n",
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for multiple choice.\n\"\"\"\n# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom itertools import chain\nfrom typing import Optional, Union\n\nimport datasets\nimport numpy as np\nimport torch\nfrom datasets import load_dataset\n\nimport transformers\nimport transformers.adapters.composition as ac\nfrom transformers import (\n AdapterConfig,\n AdapterTrainer,\n AutoConfig,\n AutoModelForMultipleChoice,\n AutoTokenizer,\n HfArgumentParser,\n MultiLingAdapterArguments,\n Trainer,\n TrainingArguments,\n default_data_collator,\n set_seed,\n)\nfrom transformers.file_utils import PaddingStrategy\nfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\nfrom transformers.trainer_utils import get_last_checkpoint\nfrom transformers.utils import check_min_version\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.17.0\")\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a text file).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_seq_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. If passed, sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to the maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n\n def __post_init__(self):\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n\n@dataclass\nclass DataCollatorForMultipleChoice:\n \"\"\"\n Data collator that will dynamically pad the inputs for multiple choice received.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n\n def __call__(self, features):\n label_name = \"label\" if \"label\" in features[0].keys() else \"labels\"\n labels = [feature.pop(label_name) for feature in features]\n batch_size = len(features)\n num_choices = len(features[0][\"input_ids\"])\n flattened_features = [\n [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features\n ]\n flattened_features = list(chain(*flattened_features))\n\n batch = self.tokenizer.pad(\n flattened_features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=\"pt\",\n )\n\n # Un-flatten\n batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}\n # Add back labels\n batch[\"labels\"] = torch.tensor(labels, dtype=torch.int64)\n return batch\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, MultiLingAdapterArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args, adapter_args = parser.parse_json_file(\n json_file=os.path.abspath(sys.argv[1])\n )\n else:\n model_args, data_args, training_args, adapter_args = parser.parse_args_into_dataclasses()\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.train_file is not None or data_args.validation_file is not None:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.train_file.split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)\n else:\n # Downloading and loading the swag dataset from the hub.\n raw_datasets = load_dataset(\"swag\", \"regular\", cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model = AutoModelForMultipleChoice.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n # Setup adapters\n if adapter_args.train_adapter:\n task_name = \"swag\"\n # check if adapter already exists otherwise add it\n if task_name not in model.config.adapters:\n # resolve adapter config\n adapter_config = AdapterConfig.load(\n adapter_args.adapter_config,\n non_linearity=adapter_args.adapter_non_linearity,\n reduction_factor=adapter_args.adapter_reduction_factor,\n )\n # load adapter from hub if specified\n if adapter_args.load_adapter:\n model.load_adapter(adapter_args.load_adapter, config=adapter_config, load_as=task_name)\n else:\n model.add_adapter(task_name, config=adapter_config)\n # optionally load a pretrained language adapter\n if adapter_args.load_lang_adapter:\n # resolve language adapter config\n lang_adapter_config = AdapterConfig.load(\n adapter_args.lang_adapter_config,\n non_linearity=adapter_args.lang_adapter_non_linearity,\n reduction_factor=adapter_args.lang_adapter_reduction_factor,\n )\n # load language adapter from Hub\n lang_adapter_name = model.load_adapter(\n adapter_args.load_lang_adapter,\n config=lang_adapter_config,\n load_as=adapter_args.language,\n )\n else:\n lang_adapter_name = None\n # Freeze all model weights except of those in this adapter\n model.train_adapter(task_name)\n # Set the adapters to be used in every forward pass\n if lang_adapter_name:\n model.set_active_adapters(ac.Stack(lang_adapter_name, task_name))\n else:\n model.set_active_adapters(task_name)\n else:\n if adapter_args.load_adapter or adapter_args.load_lang_adapter:\n raise ValueError(\n \"Adapters can only be loaded in adapters training mode.\"\n \"Use --train_adapter to enable adapter_training\"\n )\n\n # When using your own dataset or a different dataset from swag, you will probably need to change this.\n ending_names = [f\"ending{i}\" for i in range(4)]\n context_name = \"sent1\"\n question_header_name = \"sent2\"\n\n if data_args.max_seq_length is None:\n max_seq_length = tokenizer.model_max_length\n if max_seq_length > 1024:\n logger.warning(\n f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n \"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.\"\n )\n max_seq_length = 1024\n else:\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n # Preprocessing the datasets.\n def preprocess_function(examples):\n first_sentences = [[context] * 4 for context in examples[context_name]]\n question_headers = examples[question_header_name]\n second_sentences = [\n [f\"{header} {examples[end][i]}\" for end in ending_names] for i, header in enumerate(question_headers)\n ]\n\n # Flatten out\n first_sentences = list(chain(*first_sentences))\n second_sentences = list(chain(*second_sentences))\n\n # Tokenize\n tokenized_examples = tokenizer(\n first_sentences,\n second_sentences,\n truncation=True,\n max_length=max_seq_length,\n padding=\"max_length\" if data_args.pad_to_max_length else False,\n )\n # Un-flatten\n return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_eval:\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if data_args.max_eval_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Data collator\n data_collator = (\n default_data_collator\n if data_args.pad_to_max_length\n else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)\n )\n\n # Metric\n def compute_metrics(eval_predictions):\n predictions, label_ids = eval_predictions\n preds = np.argmax(predictions, axis=1)\n return {\"accuracy\": (preds == label_ids).astype(np.float32).mean().item()}\n\n # Initialize our Trainer\n trainer_class = AdapterTrainer if adapter_args.train_adapter else Trainer\n trainer = trainer_class(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n checkpoint = None\n if training_args.resume_from_checkpoint is not None:\n checkpoint = training_args.resume_from_checkpoint\n elif last_checkpoint is not None:\n checkpoint = last_checkpoint\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\n trainer.save_model() # Saves the tokenizer too for easy upload\n metrics = train_result.metrics\n\n max_train_samples = (\n data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)\n )\n metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n # Evaluation\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n metrics = trainer.evaluate()\n max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)\n metrics[\"eval_samples\"] = min(max_eval_samples, len(eval_dataset))\n\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\"eval\", metrics)\n\n kwargs = dict(\n finetuned_from=model_args.model_name_or_path,\n tasks=\"multiple-choice\",\n dataset_tags=\"swag\",\n dataset_args=\"regular\",\n dataset=\"SWAG\",\n language=\"en\",\n )\n\n if training_args.push_to_hub:\n trainer.push_to_hub(**kwargs)\n else:\n trainer.create_model_card(**kwargs)\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.load",
"torch.zeros",
"torch.nn.ModuleDict",
"torch.nn.Embedding",
"torch.nn.Embedding.from_pretrained",
"torch.save"
],
[
"numpy.argmax",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aaron8tang/qtrader | [
"e5c1e175e19b20381f9140fb76c30ad5cb81f01c",
"e5c1e175e19b20381f9140fb76c30ad5cb81f01c",
"e5c1e175e19b20381f9140fb76c30ad5cb81f01c"
] | [
"qtrader/simulation/aaft.py",
"qtrader/agents/pretrainer/objectives.py",
"tests/envs.py"
] | [
"import numpy as np\nimport pandas as pd\n\n\ndef AAFT(df, random=np.random.uniform, random_state=None):\n \"\"\"Amplitude Adjusted Fourier Transform Baseline Generator.\"\"\"\n # set random seed\n np.random.seed(random_state)\n # Operate on numpy.ndarray\n ts = df.values\n # 2d time-series format\n _ts = ts.reshape(len(ts), -1)\n # Odd number of samples\n if len(_ts) % 2 != 0:\n _ts = _ts[1:, :]\n # Generated time-series\n ts_gen = np.empty_like(_ts)\n for i, tsi in enumerate(_ts.T):\n # Fourier Transaformation (real-valued signal)\n F_tsi = np.fft.rfft(tsi)\n # Randomization of Phase\n rv_phase = np.exp(random(0, np.pi, len(F_tsi)) * 1.0j)\n # Generation of new time-series\n F_tsi_new = F_tsi * rv_phase\n # Inverse Fourier Transformation\n ts_gen[:, i] = np.fft.irfft(F_tsi_new)\n # Create pandas DataFrame\n df_gen = pd.DataFrame(ts_gen, columns=df.columns,\n index=df.index[-len(ts_gen):])\n return df_gen\n",
"import numpy as np\n\nfrom qtrader.utils.numpy import eps\n\n\ndef _mu_p(w: np.ndarray, r: np.ndarray) -> float:\n \"\"\"Portfolio Returns.\"\"\"\n return np.dot(w.T, r)\n\n\ndef _sigma_p(w: np.ndarray, Sigma: np.ndarray) -> float:\n \"\"\"Portoflio Variance\"\"\"\n return np.dot(np.dot(w.T, Sigma), w)\n\n\ndef _trans_costs(w: np.ndarray, w0: np.ndarray, coef: float) -> float:\n \"\"\"Transaction Costs.\"\"\"\n return np.sum(np.abs(w0 - w)) * coef\n\n\ndef risk_aversion(w: np.ndarray, mu: np.ndarray,\n Sigma: np.ndarray, w0: np.ndarray,\n alpha: float, beta: float) -> float:\n \"\"\"Risk Aversion with Transaction Costs.\"\"\"\n assert Sigma.shape[0] == Sigma.shape[1]\n assert mu.shape[0] == Sigma.shape[0]\n assert w.shape == w0.shape\n # mean - alpha * variance - transaction_costs\n return - (_mu_p(w, mu) - alpha * _sigma_p(w, Sigma) - _trans_costs(w, w0, beta))\n\n\ndef sharpe_ratio(w: np.ndarray, mu: np.ndarray,\n Sigma: np.ndarray, w0: np.ndarray,\n beta: float) -> float:\n \"\"\"Sharpe Ratio with Transaction Costs.\"\"\"\n assert Sigma.shape[0] == Sigma.shape[1]\n assert mu.shape[0] == Sigma.shape[0]\n assert w.shape == w0.shape\n # mean - alpha * variance - transaction_costs\n return - ((_mu_p(w, mu) - _trans_costs(w, w0, beta)) / (_sigma_p(w, Sigma) + eps))\n",
"import unittest\n\nimport numpy as np\nimport qtrader\n\nCSV_PATH = 'db/prices.csv'\n\n\nclass TestEnvs(unittest.TestCase):\n \"\"\"Test `qtrader.envs` module.\"\"\"\n\n def test__TradingEnv(self):\n \"\"\"Test `qtrader.envs.TradingEnv` class.\"\"\"\n env = qtrader.envs.TradingEnv(\n ['AAPL', 'MSFT', 'GE', 'JPM'], csv=CSV_PATH, end_date='2018')\n agent = qtrader.agents.RandomAgent(env.action_space)\n env.register(agent)\n env.reset()\n done = False\n rewards = []\n np.random.seed(13)\n while not done:\n _, reward, done, _ = env.step(\n {agent.name: env.action_space.sample()})\n rewards.append(reward[agent.name])\n env.unregister(agent)\n return self.assertIsInstance(np.sum(rewards), float)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.empty_like",
"numpy.fft.rfft",
"numpy.fft.irfft",
"numpy.random.seed"
],
[
"numpy.dot",
"numpy.abs"
],
[
"numpy.sum",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
camila-contreras/CD4ML-Scenarios | [
"806f812990c7cf33b5f78456f0065012b5b4cd35"
] | [
"cd4ml/model_tracking/validation_metrics.py"
] | [
"from sklearn import metrics\nimport numpy as np\nimport logging\nlogger = logging.getLogger(__name__)\n\n# TODO: add others\n# TODO: add ability to include generic functions\n\n\ndef r2_score(true_target, prediction):\n # R2 metric\n return metrics.r2_score(y_true=true_target, y_pred=prediction)\n\n\ndef rms_score(true_target, prediction):\n # Root mean square metric\n return np.sqrt(((prediction - true_target)**2).mean())\n\n\ndef mad_score(true_target, prediction):\n # mean absolute deviation metric\n return abs(prediction - true_target).mean()\n\n\ndef get_validation_metrics(metric_names, true_prediction_function):\n logger.info('Getting predictions')\n data = list(true_prediction_function())\n logger.info('Done with predictions')\n assert len(data) > 0\n true_target, prediction = zip(*data)\n\n true_target = np.array(true_target)\n prediction = np.array(prediction)\n\n n_validated = len(true_target)\n logger.info('n_validated: %s' % n_validated)\n\n validation_metrics = {}\n if 'r2_score' in metric_names:\n validation_metrics['r2_score'] = r2_score(true_target, prediction)\n logger.info('r2_score : {}'.format(validation_metrics['r2_score']))\n\n if 'rms_score' in metric_names:\n validation_metrics['rms_score'] = rms_score(true_target, prediction)\n logger.info('rms_scoring: {}'.format(validation_metrics['rms_score']))\n\n if 'mad_score' in metric_names:\n validation_metrics['mad_score'] = mad_score(true_target, prediction)\n logger.info('mad_scoring: {}'.format(validation_metrics['mad_score']))\n\n if 'num_validated' in metric_names:\n validation_metrics['num_validated'] = n_validated\n\n logger.info('Done validation metrics')\n\n return validation_metrics\n"
] | [
[
"numpy.array",
"sklearn.metrics.r2_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yumorozov/scikit-learn-intelex | [
"7a39c0a0e208b49f209168b01fb50206f962175f",
"7a39c0a0e208b49f209168b01fb50206f962175f",
"7a39c0a0e208b49f209168b01fb50206f962175f",
"7a39c0a0e208b49f209168b01fb50206f962175f"
] | [
"examples/daal4py/dbscan_spmd.py",
"examples/daal4py/decision_forest_classification_default_dense_batch.py",
"daal4py/sklearn/decomposition/_pca.py",
"examples/daal4py/logitboost_batch.py"
] | [
"#===============================================================================\n# Copyright 2014 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n# daal4py DBSCAN example for distributed memory systems; SPMD mode\n# run like this:\n# mpirun -n 4 python ./dbscan_spmd.py\n\nimport daal4py as d4p\nimport numpy as np\n\n\ndef main(method='defaultDense'):\n infile = \"./data/batch/dbscan_dense.csv\"\n epsilon = 0.04\n minObservations = 45\n\n # Load the data\n data = np.loadtxt(infile, delimiter=',')\n rpp = int(data.shape[0] / d4p.num_procs())\n data = data[rpp * d4p.my_procid(): rpp * d4p.my_procid() + rpp, :]\n\n # configure dbscan main object\n algo = d4p.dbscan(minObservations=minObservations, epsilon=epsilon, distributed=True)\n # and compute\n result = algo.compute(data)\n\n return result\n\n\nif __name__ == \"__main__\":\n # Initialize SPMD mode\n d4p.daalinit()\n result = main()\n print(\"\\nResults on node with id = \", d4p.my_procid(), \" :\\n\",\n \"\\nFirst 10 cluster assignments:\\n\", result.assignments[0:10],\n \"\\nNumber of clusters:\\n\", result.nClusters)\n d4p.daalfini()\n",
"#===============================================================================\n# Copyright 2014 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n# daal4py Decision Forest Classification example for shared memory systems\n\nimport daal4py as d4p\nimport numpy as np\n\n# let's try to use pandas' fast csv reader\ntry:\n import pandas\n\n def read_csv(f, c, t=np.float64):\n return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)\nexcept ImportError:\n # fall back to numpy loadtxt\n def read_csv(f, c, t=np.float64):\n return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2, dtype=t)\n\n\ndef main(readcsv=read_csv, method='defaultDense'):\n # input data file\n infile = \"./data/batch/df_classification_train.csv\"\n testfile = \"./data/batch/df_classification_test.csv\"\n\n # Configure a training object (5 classes)\n train_algo = d4p.decision_forest_classification_training(\n 5,\n method=method,\n nTrees=10,\n minObservationsInLeafNode=8,\n featuresPerNode=3,\n engine=d4p.engines_mt19937(seed=777),\n varImportance='MDI',\n bootstrap=True,\n resultsToCompute='computeOutOfBagError'\n )\n\n # Read data. Let's use 3 features per observation\n data = readcsv(infile, range(3), t=np.float32)\n labels = readcsv(infile, range(3, 4), t=np.float32)\n train_result = train_algo.compute(data, labels)\n # Traiing result provides (depending on parameters) model,\n # outOfBagError, outOfBagErrorPerObservation and/or variableImportance\n\n # Now let's do some prediction\n predict_algo = d4p.decision_forest_classification_prediction(\n nClasses=5,\n resultsToEvaluate=\"computeClassLabels|computeClassProbabilities\",\n votingMethod=\"unweighted\"\n )\n # read test data (with same #features)\n pdata = readcsv(testfile, range(3), t=np.float32)\n plabels = readcsv(testfile, range(3, 4), t=np.float32)\n # now predict using the model from the training above\n predict_result = predict_algo.compute(pdata, train_result.model)\n\n # Prediction result provides prediction\n assert(predict_result.prediction.shape == (pdata.shape[0], 1))\n\n return (train_result, predict_result, plabels)\n\n\nif __name__ == \"__main__\":\n (train_result, predict_result, plabels) = main()\n print(\"\\nVariable importance results:\\n\", train_result.variableImportance)\n print(\"\\nOOB error:\\n\", train_result.outOfBagError)\n print(\n \"\\nDecision forest prediction results (first 10 rows):\\n\",\n predict_result.prediction[0:10]\n )\n print(\n \"\\nDecision forest probabilities results (first 10 rows):\\n\",\n predict_result.probabilities[0:10]\n )\n print(\"\\nGround truth (first 10 rows):\\n\", plabels[0:10])\n print('All looks good!')\n",
"#===============================================================================\n# Copyright 2014 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\nimport numpy as np\nimport numbers\nfrom math import sqrt\nfrom scipy.sparse import issparse\n\nfrom sklearn.utils import check_array\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.extmath import stable_cumsum\n\nimport daal4py\nfrom .._utils import (\n getFPType, sklearn_check_version, PatchingConditionsChain)\nfrom .._device_offload import support_usm_ndarray\n\nif sklearn_check_version('0.22'):\n from sklearn.decomposition._pca import PCA as PCA_original\nelse:\n from sklearn.decomposition.pca import PCA as PCA_original\n\nif sklearn_check_version('0.23'):\n from sklearn.decomposition._pca import _infer_dimension\nelif sklearn_check_version('0.22'):\n from sklearn.decomposition._pca import _infer_dimension_\nelse:\n from sklearn.decomposition.pca import _infer_dimension_\n\n\nclass PCA(PCA_original):\n __doc__ = PCA_original.__doc__\n\n def __init__(\n self,\n n_components=None,\n copy=True,\n whiten=False,\n svd_solver='auto',\n tol=0.0,\n iterated_power='auto',\n random_state=None\n ):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n self.svd_solver = svd_solver\n self.tol = tol\n self.iterated_power = iterated_power\n self.random_state = random_state\n\n def _validate_n_components(self, n_components, n_samples, n_features):\n if n_components == 'mle':\n if n_samples < n_features:\n raise ValueError(\"n_components='mle' is only supported \"\n \"if n_samples >= n_features\")\n elif not 0 <= n_components <= min(n_samples, n_features):\n raise ValueError(\"n_components=%r must be between 0 and \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='full'\"\n % (n_components, min(n_samples, n_features)))\n elif n_components >= 1:\n if not isinstance(n_components, numbers.Integral):\n raise ValueError(\"n_components=%r must be of type int \"\n \"when greater than or equal to 1, \"\n \"was of type=%r\"\n % (n_components, type(n_components)))\n\n def _fit_full_daal4py(self, X, n_components):\n n_samples, n_features = X.shape\n n_sf_min = min(n_samples, n_features)\n\n if n_components == 'mle':\n daal_n_components = n_features\n elif n_components < 1:\n daal_n_components = n_sf_min\n else:\n daal_n_components = n_components\n\n fpType = getFPType(X)\n\n covariance_algo = daal4py.covariance(\n fptype=fpType, outputMatrixType='covarianceMatrix')\n covariance_res = covariance_algo.compute(X)\n\n self.mean_ = covariance_res.mean.ravel()\n covariance = covariance_res.covariance\n variances_ = np.array([covariance[i, i] for i in range(n_features)])\n\n pca_alg = daal4py.pca(\n fptype=fpType,\n method='correlationDense',\n resultsToCompute='eigenvalue',\n isDeterministic=True,\n nComponents=daal_n_components\n )\n pca_res = pca_alg.compute(X, covariance)\n\n components_ = pca_res.eigenvectors\n explained_variance_ = np.maximum(pca_res.eigenvalues.ravel(), 0)\n tot_var = explained_variance_.sum()\n explained_variance_ratio_ = explained_variance_ / tot_var\n\n if n_components == 'mle':\n if sklearn_check_version('0.23'):\n n_components = _infer_dimension(explained_variance_, n_samples)\n else:\n n_components = \\\n _infer_dimension_(explained_variance_, n_samples, n_features)\n elif 0 < n_components < 1.0:\n ratio_cumsum = stable_cumsum(explained_variance_ratio_)\n n_components = np.searchsorted(ratio_cumsum, n_components,\n side='right') + 1\n\n if n_components < n_sf_min:\n if explained_variance_.shape[0] == n_sf_min:\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n resid_var_ = variances_.sum()\n resid_var_ -= explained_variance_[:n_components].sum()\n self.noise_variance_ = resid_var_ / (n_sf_min - n_components)\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = explained_variance_[:n_components]\n self.explained_variance_ratio_ = explained_variance_ratio_[:n_components]\n self.singular_values_ = np.sqrt((n_samples - 1) * self.explained_variance_)\n\n def _fit_full(self, X, n_components):\n n_samples, n_features = X.shape\n self._validate_n_components(n_components, n_samples, n_features)\n\n self._fit_full_daal4py(X, min(X.shape))\n\n U = None\n V = self.components_\n S = self.singular_values_\n\n if n_components == 'mle':\n if sklearn_check_version('0.23'):\n n_components = _infer_dimension(self.explained_variance_, n_samples)\n else:\n n_components = \\\n _infer_dimension_(self.explained_variance_, n_samples, n_features)\n elif 0 < n_components < 1.0:\n ratio_cumsum = stable_cumsum(self.explained_variance_ratio_)\n n_components = np.searchsorted(ratio_cumsum, n_components,\n side='right') + 1\n\n if n_components < min(n_features, n_samples):\n self.noise_variance_ = self.explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = self.components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = self.explained_variance_[:n_components]\n self.explained_variance_ratio_ = self.explained_variance_ratio_[:n_components]\n self.singular_values_ = self.singular_values_[:n_components]\n\n return U, S, V\n\n def _fit(self, X):\n if issparse(X):\n raise TypeError('PCA does not support sparse input. See '\n 'TruncatedSVD for a possible alternative.')\n\n if sklearn_check_version('0.23'):\n X = self._validate_data(X, dtype=[np.float64, np.float32],\n ensure_2d=True, copy=False)\n else:\n X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=False)\n\n if self.n_components is None:\n if self.svd_solver != 'arpack':\n n_components = min(X.shape)\n else:\n n_components = min(X.shape) - 1\n else:\n n_components = self.n_components\n\n self._fit_svd_solver = self.svd_solver\n shape_good_for_daal = X.shape[1] / X.shape[0] < 2\n\n if self._fit_svd_solver == 'auto':\n if n_components == 'mle':\n self._fit_svd_solver = 'full'\n else:\n n, p, k = X.shape[0], X.shape[1], n_components\n # These coefficients are result of training of Logistic Regression\n # (max_iter=10000, solver=\"liblinear\", fit_intercept=False)\n # on different datasets and number of components. X is a dataset with\n # npk, np^2, and n^2 columns. And y is speedup of patched scikit-learn's\n # full PCA against stock scikit-learn's randomized PCA.\n regression_coefs = np.array([\n [9.779873e-11, n * p * k],\n [-1.122062e-11, n * p * p],\n [1.127905e-09, n ** 2],\n ])\n\n if n_components >= 1 \\\n and np.dot(regression_coefs[:, 0], regression_coefs[:, 1]) <= 0:\n self._fit_svd_solver = 'randomized'\n else:\n self._fit_svd_solver = 'full'\n\n if not shape_good_for_daal or self._fit_svd_solver != 'full':\n if sklearn_check_version('0.23'):\n X = self._validate_data(X, copy=self.copy)\n else:\n X = check_array(X, copy=self.copy)\n\n _patching_status = PatchingConditionsChain(\n \"sklearn.decomposition.PCA.fit\")\n _dal_ready = _patching_status.and_conditions([\n (self._fit_svd_solver == 'full',\n f\"'{self._fit_svd_solver}' SVD solver is not supported. \"\n \"Only 'full' solver is supported.\")\n ])\n\n if _dal_ready:\n _dal_ready = _patching_status.and_conditions([\n (shape_good_for_daal,\n \"The shape of X does not satisfy oneDAL requirements: \"\n \"number of features / number of samples >= 2\")\n ])\n if _dal_ready:\n result = self._fit_full(X, n_components)\n else:\n result = PCA_original._fit_full(self, X, n_components)\n elif self._fit_svd_solver in ['arpack', 'randomized']:\n result = self._fit_truncated(X, n_components, self._fit_svd_solver)\n else:\n raise ValueError(\"Unrecognized svd_solver='{0}'\"\n \"\".format(self._fit_svd_solver))\n\n _patching_status.write_log()\n return result\n\n def _transform_daal4py(self, X, whiten=False, scale_eigenvalues=True, check_X=True):\n if sklearn_check_version('0.22'):\n check_is_fitted(self)\n else:\n check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)\n\n if sklearn_check_version(\"1.0\"):\n self._check_feature_names(X, reset=False)\n X = check_array(X, dtype=[np.float64, np.float32], force_all_finite=check_X)\n fpType = getFPType(X)\n\n tr_data = dict()\n if self.mean_ is not None:\n tr_data['mean'] = self.mean_.reshape((1, -1))\n if whiten:\n if scale_eigenvalues:\n tr_data['eigenvalue'] = \\\n (self.n_samples_ - 1) * self.explained_variance_.reshape((1, -1))\n else:\n tr_data['eigenvalue'] = self.explained_variance_.reshape((1, -1))\n elif scale_eigenvalues:\n tr_data['eigenvalue'] = np.full(\n (1, self.explained_variance_.shape[0]),\n self.n_samples_ - 1.0, dtype=X.dtype)\n\n if X.shape[1] != self.n_features_:\n raise ValueError(\n (f'X has {X.shape[1]} features, '\n f'but PCA is expecting {self.n_features_} features as input'))\n\n tr_res = daal4py.pca_transform(\n fptype=fpType\n ).compute(X, self.components_, tr_data)\n\n return tr_res.transformedData\n\n @support_usm_ndarray()\n def transform(self, X):\n \"\"\"\n Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Projection of X in the first principal components, where `n_samples`\n is the number of samples and `n_components` is the number of the components.\n \"\"\"\n _patching_status = PatchingConditionsChain(\n \"sklearn.decomposition.PCA.transform\")\n _dal_ready = _patching_status.and_conditions([\n (self.n_components_ > 0, \"Number of components <= 0.\")\n ])\n\n _patching_status.write_log()\n if _dal_ready:\n return self._transform_daal4py(X, whiten=self.whiten,\n check_X=True, scale_eigenvalues=False)\n return PCA_original.transform(self, X)\n\n @support_usm_ndarray()\n def fit_transform(self, X, y=None):\n \"\"\"\n Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Ignored.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed values.\n\n Notes\n -----\n This method returns a Fortran-ordered array. To convert it to a\n C-ordered array, use 'np.ascontiguousarray'.\n \"\"\"\n U, S, _ = self._fit(X)\n\n _patching_status = PatchingConditionsChain(\n \"sklearn.decomposition.PCA.fit_transform\")\n _dal_ready = _patching_status.and_conditions([\n (U is None, \"Stock fitting was used.\")\n ])\n if _dal_ready:\n _dal_ready = _patching_status.and_conditions([\n (self.n_components_ > 0, \"Number of components <= 0.\")\n ])\n if _dal_ready:\n result = self._transform_daal4py(\n X, whiten=self.whiten, check_X=False, scale_eigenvalues=False)\n else:\n result = np.empty((self.n_samples_, 0), dtype=X.dtype)\n else:\n U = U[:, :self.n_components_]\n\n if self.whiten:\n U *= sqrt(X.shape[0] - 1)\n else:\n U *= S[:self.n_components_]\n\n result = U\n\n _patching_status.write_log()\n return result\n",
"#===============================================================================\n# Copyright 2014 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n# daal4py Logitboost example for shared memory systems\n\nimport daal4py as d4p\nimport numpy as np\n\n# let's try to use pandas' fast csv reader\ntry:\n import pandas\n\n def read_csv(f, c, t=np.float64):\n return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)\nexcept ImportError:\n # fall back to numpy loadtxt\n def read_csv(f, c, t=np.float64):\n return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)\n\n\ndef main(readcsv=read_csv, method='defaultDense'):\n infile = \"./data/batch/logitboost_train.csv\"\n testfile = \"./data/batch/logitboost_test.csv\"\n nClasses = 5\n\n # Configure a logitboost training object\n train_algo = d4p.logitboost_training(nClasses, maxIterations=100,\n accuracyThreshold=0.01)\n\n # Read data. Let's have 20 independent,\n # and 1 dependent variable (for each observation)\n indep_data = readcsv(infile, range(20))\n dep_data = readcsv(infile, range(20, 21))\n # Now train/compute, the result provides the model for prediction\n train_result = train_algo.compute(indep_data, dep_data)\n\n # Now let's do some prediction\n predict_algo = d4p.logitboost_prediction(nClasses)\n # read test data (with same #features)\n pdata = readcsv(testfile, range(20))\n # now predict using the model from the training above\n predict_result = predict_algo.compute(pdata, train_result.model)\n\n # The prediction result provides prediction\n assert predict_result.prediction.shape == (pdata.shape[0], dep_data.shape[1])\n ptdata = np.loadtxt(testfile, usecols=range(20, 21), delimiter=',', ndmin=2)\n assert np.allclose(predict_result.prediction, ptdata)\n return (train_result, predict_result, ptdata)\n\n\nif __name__ == \"__main__\":\n (train_result, predict_result, ptdata) = main()\n print(\"\\nGround truth (first 20 observations):\\n\", ptdata[:20])\n print(\n \"Logitboost classification results: (first 20 observations):\\n\",\n predict_result.prediction[:20]\n )\n print('All looks good!')\n"
] | [
[
"numpy.loadtxt"
],
[
"pandas.read_csv",
"numpy.loadtxt"
],
[
"numpy.dot",
"sklearn.utils.validation.check_is_fitted",
"numpy.sqrt",
"scipy.sparse.issparse",
"sklearn.decomposition.pca.PCA._fit_full",
"sklearn.utils.check_array",
"sklearn.decomposition.pca._infer_dimension_",
"sklearn.decomposition._pca._infer_dimension",
"sklearn.utils.extmath.stable_cumsum",
"numpy.full",
"numpy.searchsorted",
"numpy.array",
"sklearn.decomposition.pca.PCA.transform",
"numpy.empty"
],
[
"pandas.read_csv",
"numpy.allclose",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Alex-Roudjiat/Federated-ML-AI-Federated-ML- | [
"8ccc24cf2c01b868988f5d5bd65f1666cf5526bc",
"8ccc24cf2c01b868988f5d5bd65f1666cf5526bc"
] | [
"fedml_api/model/cv/darts/utils.py",
"fedml_api/data_preprocessing/MNIST/data_loader.py"
] | [
"import os\nimport shutil\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass Cutout(object):\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\n\ndef count_parameters_in_MB(model):\n return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if \"auxiliary\" not in name) / 1e6\n\n\ndef save_checkpoint(state, is_best, save):\n filename = os.path.join(save, 'checkpoint.pth.tar')\n torch.save(state, filename)\n if is_best:\n best_filename = os.path.join(save, 'model_best.pth.tar')\n shutil.copyfile(filename, best_filename)\n\n\ndef save(model, model_path):\n torch.save(model.state_dict(), model_path)\n\n\ndef load(model, model_path):\n model.load_state_dict(torch.load(model_path))\n\n\ndef drop_path(x, drop_prob):\n if drop_prob > 0.:\n keep_prob = 1. - drop_prob\n mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))\n x.div_(keep_prob)\n x.mul_(mask)\n return x\n\n\ndef create_exp_dir(path, scripts_to_save=None):\n if not os.path.exists(path):\n os.mkdir(path)\n print('Experiment dir : {}'.format(path))\n\n if scripts_to_save is not None:\n os.mkdir(os.path.join(path, 'scripts'))\n for script in scripts_to_save:\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\n shutil.copyfile(script, dst_file)\n",
"import json\nimport os\n\nimport numpy as np\nimport torch\n\n\ndef read_data(train_data_dir, test_data_dir):\n '''parses data in given train and test data directories\n\n assumes:\n - the data in the input directories are .json files with \n keys 'users' and 'user_data'\n - the set of train set users is the same as the set of test set users\n\n Return:\n clients: list of non-unique client ids\n groups: list of group ids; empty list if none found\n train_data: dictionary of train data\n test_data: dictionary of test data\n '''\n clients = []\n groups = []\n train_data = {}\n test_data = {}\n\n train_files = os.listdir(train_data_dir)\n train_files = [f for f in train_files if f.endswith('.json')]\n for f in train_files:\n file_path = os.path.join(train_data_dir, f)\n with open(file_path, 'r') as inf:\n cdata = json.load(inf)\n clients.extend(cdata['users'])\n if 'hierarchies' in cdata:\n groups.extend(cdata['hierarchies'])\n train_data.update(cdata['user_data'])\n\n test_files = os.listdir(test_data_dir)\n test_files = [f for f in test_files if f.endswith('.json')]\n for f in test_files:\n file_path = os.path.join(test_data_dir, f)\n with open(file_path, 'r') as inf:\n cdata = json.load(inf)\n test_data.update(cdata['user_data'])\n\n clients = sorted(cdata['users'])\n\n return clients, groups, train_data, test_data\n\n\ndef batch_data(data, batch_size):\n '''\n data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)\n returns x, y, which are both numpy array of length: batch_size\n '''\n data_x = data['x']\n data_y = data['y']\n\n # randomly shuffle data\n np.random.seed(100)\n rng_state = np.random.get_state()\n np.random.shuffle(data_x)\n np.random.set_state(rng_state)\n np.random.shuffle(data_y)\n\n # loop through mini-batches\n batch_data = list()\n for i in range(0, len(data_x), batch_size):\n batched_x = data_x[i:i + batch_size]\n batched_y = data_y[i:i + batch_size]\n batched_x = torch.from_numpy(np.asarray(batched_x)).float()\n batched_y = torch.from_numpy(np.asarray(batched_y)).long()\n batch_data.append((batched_x, batched_y))\n return batch_data\n\n\ndef load_partition_data_mnist_by_device_id(batch_size,\n device_id,\n train_path=\"MNIST_mobile\",\n test_path=\"MNIST_mobile\"):\n train_path += '/' + device_id + '/' + 'train'\n test_path += '/' + device_id + '/' + 'test'\n return load_partition_data_mnist(batch_size, train_path, test_path)\n\n\ndef load_partition_data_mnist(batch_size,\n train_path=\"./../../../data/MNIST/train\",\n test_path=\"./../../../data/MNIST/test\"):\n users, groups, train_data, test_data = read_data(train_path, test_path)\n\n if len(groups) == 0:\n groups = [None for _ in users]\n train_data_num = 0\n test_data_num = 0\n train_data_local_dict = dict()\n test_data_local_dict = dict()\n train_data_local_num_dict = dict()\n train_data_global = list()\n test_data_global = list()\n client_idx = 0\n for u, g in zip(users, groups):\n user_train_data_num = len(train_data[u]['x'])\n user_test_data_num = len(test_data[u]['x'])\n train_data_num += user_train_data_num\n test_data_num += user_test_data_num\n train_data_local_num_dict[client_idx] = user_train_data_num\n\n # transform to batches\n train_batch = batch_data(train_data[u], batch_size)\n test_batch = batch_data(test_data[u], batch_size)\n\n # index using client index\n train_data_local_dict[client_idx] = train_batch\n test_data_local_dict[client_idx] = test_batch\n train_data_global += train_batch\n test_data_global += test_batch\n client_idx += 1\n client_num = client_idx\n class_num = 10\n\n return client_num, train_data_num, test_data_num, train_data_global, test_data_global, \\\n train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num\n"
] | [
[
"numpy.clip",
"torch.load",
"torch.from_numpy",
"numpy.ones",
"torch.save",
"numpy.random.randint"
],
[
"numpy.random.get_state",
"numpy.random.seed",
"numpy.asarray",
"numpy.random.shuffle",
"numpy.random.set_state"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexbarcelo/dislib | [
"989f81f235ae30b17410a8d805df258c7d931b38",
"989f81f235ae30b17410a8d805df258c7d931b38",
"989f81f235ae30b17410a8d805df258c7d931b38",
"989f81f235ae30b17410a8d805df258c7d931b38"
] | [
"examples/linear_regression_plot.py",
"tests/test_decision_tree.py",
"tests/performance/mn4/tests/gmm.py",
"dislib/model_selection/_search.py"
] | [
"import numpy as np\nfrom pylab import scatter, plot, show\n\nimport dislib as ds\nfrom dislib.regression import LinearRegression\n\n\ndef main():\n \"\"\"\n Linear regression example with plot\n \"\"\"\n\n # Example data\n x = np.array([1000, 4000, 5000, 4500, 3000, 4000, 9000, 11000, 15000,\n 12000, 7000, 3000])\n y = np.array([9914, 40487, 54324, 50044, 34719, 42551, 94871, 118914,\n 158484, 131348, 78504, 36284])\n x_ds = ds.array(x[:, np.newaxis], (4, 1))\n y_ds = ds.array(y[:, np.newaxis], (4, 1))\n reg = LinearRegression()\n reg.fit(x_ds, y_ds)\n coef = reg.coef_.collect()\n intercept = reg.intercept_.collect()\n print(coef, intercept)\n\n # plot_result:\n scatter(x, y, marker='x')\n x_mesh = np.linspace(min(x), max(x), 1000)\n plot(x_mesh, [coef*x + intercept for x in x_mesh])\n show()\n\n\nif __name__ == \"__main__\":\n main()\n",
"import unittest\n\nimport numpy as np\nfrom pycompss.api.api import compss_wait_on\n\nimport dislib as ds\nimport dislib.trees.decision_tree as dt\nfrom dislib.trees import RfClassifierDataset, transform_to_rf_dataset\n\n\nclass DecisionTreeTest(unittest.TestCase):\n def test_decision_tree(self):\n x1 = np.array(\n [\n [0.3, -0.3],\n [0.4, -0.5],\n [0.5, -0.4],\n [0.3, 0.3],\n [0.4, 0.5],\n [0.5, 0.4],\n [-0.3, -0.3],\n [-0.4, -0.5],\n [-0.5, -0.4],\n ]\n )\n x2 = np.array([[0.4, -0.3], [0.4, 0.3], [-0.4, -0.3]])\n y1 = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n y2 = np.array([0, 1, 2])\n\n x1_ds = ds.array(x1, (3, 2))\n x2_ds = ds.array(x2, (3, 2))\n y1_ds = ds.array(y1[:, np.newaxis], (3, 1))\n\n data1 = transform_to_rf_dataset(\n x1_ds, y1_ds, RfClassifierDataset, features_file=True\n )\n\n # Model\n try_features = 2\n max_depth = np.inf\n distr_depth = 2\n sklearn_max = 1e8\n bootstrap = True\n seed = 0\n random_state = np.random.RandomState(seed)\n n_samples, n_features = x1.shape\n n_classes = np.bincount(y1).shape[0]\n features_mmap = x1.T\n\n # Test bootstrap\n sample1, y_s1 = compss_wait_on(\n dt._sample_selection(n_samples, y1, True, seed)\n )\n sample2, y_s2 = compss_wait_on(\n dt._sample_selection(n_samples, y1, False, seed)\n )\n self.assertTrue(\n np.array_equal(sample1, np.array([0, 2, 3, 3, 3, 4, 5, 5, 7]))\n )\n self.assertTrue(\n np.array_equal(sample2, np.array([0, 1, 2, 3, 4, 5, 6, 7, 8]))\n )\n self.assertTrue(\n np.array_equal(y_s1, np.array([0, 0, 1, 1, 1, 1, 1, 1, 2]))\n )\n self.assertTrue(\n np.array_equal(y_s2, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]))\n )\n\n # Assert split wrapper\n sample, y_s = sample2, y_s2\n with self.assertRaises(ValueError):\n dt._split_node_wrapper(\n sample,\n n_features,\n y_s,\n n_classes,\n try_features,\n random_state,\n samples_file=None,\n features_file=None,\n )\n\n split = dt._split_node_wrapper(\n sample,\n n_features,\n y_s,\n n_classes,\n try_features,\n random_state,\n samples_file=data1.samples_path,\n features_file=data1.features_path,\n )\n split = compss_wait_on(split)\n node_info, left_group, y_l, right_group, y_r = split\n self.assertTrue(node_info.index in (0, 1))\n if node_info.index == 0:\n self.assertTrue(np.array_equal(left_group, np.array([6, 7, 8])))\n self.assertTrue(np.array_equal(y_l, np.array([2, 2, 2])))\n self.assertTrue(\n np.array_equal(right_group, np.array([0, 1, 2, 3, 4, 5]))\n )\n self.assertTrue(np.array_equal(y_r, np.array([0, 0, 0, 1, 1, 1])))\n self.assertAlmostEqual(node_info.value, 0.0)\n split_l = dt._compute_split(\n left_group,\n n_features,\n y_l,\n n_classes,\n try_features,\n features_mmap,\n random_state,\n )\n node_info, left_group, y_l, right_group, y_r = split_l\n self.assertTrue(np.array_equal(left_group, np.array([6, 7, 8])))\n self.assertTrue(np.array_equal(y_l, np.array([2, 2, 2])))\n self.assertTrue(np.array_equal(right_group, np.array([])))\n self.assertTrue(np.array_equal(y_r, np.array([])))\n self.assertTrue(\n np.array_equal(node_info.frequencies, np.array([0, 0, 3]))\n )\n self.assertEqual(node_info.size, 3)\n self.assertEqual(node_info.target, 2)\n elif node_info.index == 1:\n self.assertTrue(\n np.array_equal(left_group, np.array([0, 1, 2, 6, 7, 8]))\n )\n self.assertTrue(np.array_equal(y_l, np.array([0, 0, 0, 2, 2, 2])))\n self.assertTrue(np.array_equal(right_group, np.array([3, 4, 5])))\n self.assertTrue(np.array_equal(y_r, np.array([1, 1, 1])))\n self.assertAlmostEqual(node_info.value, 0.0)\n split_r = dt._compute_split(\n right_group,\n n_features,\n y_r,\n n_classes,\n try_features,\n features_mmap,\n random_state,\n )\n node_info, left_group, y_l, right_group, y_r = split_r\n self.assertTrue(np.array_equal(left_group, np.array([3, 4, 5])))\n self.assertTrue(np.array_equal(y_l, np.array([1, 1, 1])))\n self.assertTrue(np.array_equal(right_group, np.array([])))\n self.assertTrue(np.array_equal(y_r, np.array([])))\n self.assertTrue(\n np.array_equal(node_info.frequencies, np.array([0, 3, 0]))\n )\n self.assertEqual(node_info.size, 3)\n self.assertEqual(node_info.target, 1)\n\n # Test tree\n tree = dt.DecisionTreeClassifier(\n try_features,\n max_depth,\n distr_depth,\n sklearn_max,\n bootstrap,\n random_state,\n )\n tree.fit(data1)\n y_pred = compss_wait_on(tree.predict(x2_ds))\n self.assertTrue(np.array_equal(y_pred, y2))\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == \"__main__\":\n main()\n",
"import numpy as np\nimport performance\n\nimport dislib as ds\nfrom dislib.cluster import GaussianMixture\n\n\ndef main():\n n_samples = 100000000\n n_chunks = 768\n chunk_size = int(np.ceil(n_samples / n_chunks))\n n_features = 100\n n_clusters = 50\n\n x = ds.random_array((n_samples, n_features), (chunk_size, n_features))\n gmm = GaussianMixture(n_components=n_clusters, max_iter=5, tol=0,\n init_params=\"random\")\n performance.measure(\"GMM\", \"100M\", gmm.fit, x)\n\n\nif __name__ == \"__main__\":\n main()\n",
"from abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom itertools import product\n\nimport numpy as np\nfrom pycompss.api.api import compss_wait_on\nfrom scipy.stats import rankdata\nfrom sklearn import clone\nfrom sklearn.model_selection import ParameterGrid, ParameterSampler\nfrom numpy.ma import MaskedArray\n\nfrom dislib.model_selection._split import infer_cv\nfrom dislib.model_selection._validation import check_scorer, fit_and_score, \\\n validate_score, aggregate_score_dicts\n\n\nclass BaseSearchCV(ABC):\n \"\"\"Abstract base class for hyper parameter search with cross-validation.\"\"\"\n\n def __init__(self, estimator, scoring=None, cv=None, refit=True):\n self.estimator = estimator\n self.scoring = scoring\n self.cv = cv\n self.refit = refit\n\n @abstractmethod\n def _run_search(self, evaluate_candidates):\n \"\"\"Abstract method to perform the search. The parameter\n `evaluate_candidates` is a function that evaluates a ParameterGrid at a\n time \"\"\"\n pass\n\n def fit(self, x, y=None, **fit_params):\n \"\"\"Run fit with all sets of parameters.\n\n Parameters\n ----------\n x : ds-array\n Training data samples.\n y : ds-array, optional (default = None)\n Training data labels or values.\n **fit_params : dict of string -> object\n Parameters passed to the ``fit`` method of the estimator\n \"\"\"\n estimator = self.estimator\n cv = infer_cv(self.cv)\n\n scorers, refit_metric = self._infer_scorers()\n\n base_estimator = clone(estimator)\n\n n_splits = None\n all_candidate_params = []\n all_out = []\n\n def evaluate_candidates(candidate_params):\n \"\"\"Evaluate some parameters\"\"\"\n candidate_params = list(candidate_params)\n\n out = [fit_and_score(clone(base_estimator), train, validation,\n scorer=scorers, parameters=parameters,\n fit_params=fit_params)\n for parameters, (train, validation)\n in product(candidate_params, cv.split(x, y))]\n\n nonlocal n_splits\n n_splits = cv.get_n_splits()\n\n all_candidate_params.extend(candidate_params)\n all_out.extend(out)\n\n self._run_search(evaluate_candidates)\n\n for params_result in all_out:\n scores = params_result[0]\n for scorer_name, score in scores.items():\n score = compss_wait_on(score)\n scores[scorer_name] = validate_score(score, scorer_name)\n\n results = self._format_results(all_candidate_params, scorers,\n n_splits, all_out)\n\n # For multi-metric evaluation, store the best_index_, best_params_ and\n # best_score_ iff refit is one of the scorer names\n # In single metric evaluation, refit_metric is \"score\"\n if self.refit or not self.multimetric_:\n # If callable, refit is expected to return the index of the best\n # parameter set.\n if callable(self.refit):\n self.best_index_ = self.refit(results)\n if not isinstance(self.best_index_, (int, np.integer)):\n raise TypeError('best_index_ returned is not an integer')\n if (self.best_index_ < 0 or\n self.best_index_ >= len(results[\"params\"])):\n raise IndexError('best_index_ index out of range')\n else:\n self.best_index_ = results[\"rank_test_%s\"\n % refit_metric].argmin()\n self.best_score_ = results[\"mean_test_%s\" % refit_metric][\n self.best_index_]\n self.best_params_ = results[\"params\"][self.best_index_]\n\n if self.refit:\n self.best_estimator_ = clone(base_estimator).set_params(\n **self.best_params_)\n self.best_estimator_.fit(x, y, **fit_params)\n\n # Store the only scorer not as a dict for single metric evaluation\n self.scorer_ = scorers if self.multimetric_ else scorers['score']\n\n self.cv_results_ = results\n self.n_splits_ = n_splits\n\n return self\n\n @staticmethod\n def _format_results(candidate_params, scorers, n_splits, out):\n n_candidates = len(candidate_params)\n\n (test_score_dicts,) = zip(*out)\n\n test_scores = aggregate_score_dicts(test_score_dicts)\n\n results = {}\n\n def _store(key_name, array, splits=False, rank=False):\n \"\"\"A small helper to store the scores/times to the cv_results_\"\"\"\n array = np.array(array, dtype=np.float64).reshape(n_candidates,\n n_splits)\n if splits:\n for split_i in range(n_splits):\n # Uses closure to alter the results\n results[\"split%d_%s\"\n % (split_i, key_name)] = array[:, split_i]\n\n array_means = np.mean(array, axis=1)\n results['mean_%s' % key_name] = array_means\n array_stds = np.std(array, axis=1)\n results['std_%s' % key_name] = array_stds\n\n if rank:\n results[\"rank_%s\" % key_name] = np.asarray(\n rankdata(-array_means, method='min'), dtype=np.int32)\n\n # Use one MaskedArray and mask all the places where the param is not\n # applicable for that candidate. Use defaultdict as each candidate may\n # not contain all the params\n param_results = defaultdict(partial(MaskedArray,\n np.empty(n_candidates, ),\n mask=True,\n dtype=object))\n for cand_i, params in enumerate(candidate_params):\n for name, value in params.items():\n # An all masked empty array gets created for the key\n # `\"param_%s\" % name` at the first occurrence of `name`.\n # Setting the value at an index also unmasks that index\n param_results[\"param_%s\" % name][cand_i] = value\n\n results.update(param_results)\n # Store a list of param dicts at the key 'params'\n results['params'] = candidate_params\n\n for scorer_name in scorers.keys():\n _store('test_%s' % scorer_name, test_scores[scorer_name],\n splits=True, rank=True)\n\n return results\n\n def _infer_scorers(self):\n estimator = self.estimator\n scoring = self.scoring\n refit = self.refit\n if scoring is None or callable(scoring):\n scorers = {\"score\": check_scorer(estimator, scoring)}\n refit_metric = 'score'\n self.multimetric_ = False\n elif isinstance(scoring, dict):\n scorers = {key: check_scorer(estimator, scorer)\n for key, scorer in scoring.items()}\n if refit is not False and (\n not isinstance(refit, str) or\n refit not in scorers) and not callable(refit):\n raise ValueError(\"For multi-metric scoring, the parameter \"\n \"refit must be set to a scorer key or a \"\n \"callable to refit an estimator with the \"\n \"best parameter setting on the whole \"\n \"data and make the best_* attributes \"\n \"available for that metric. If this is \"\n \"not needed, refit should be set to \"\n \"False explicitly. %r was passed.\"\n % refit)\n refit_metric = refit\n self.multimetric_ = True\n else:\n raise ValueError('scoring is not valid')\n\n return scorers, refit_metric\n\n\nclass GridSearchCV(BaseSearchCV):\n \"\"\"Exhaustive search over specified parameter values for an estimator.\n\n GridSearchCV implements a \"fit\" and a \"score\" method.\n\n The parameters of the estimator used to apply these methods are optimized\n by cross-validated grid-search over a parameter grid.\n\n Parameters\n ----------\n estimator : estimator object.\n This is assumed to implement the scikit-learn estimator interface.\n Either estimator needs to provide a ``score`` function,\n or ``scoring`` must be passed.\n param_grid : dict or list of dictionaries\n Dictionary with parameters names (string) as keys and lists of\n parameter settings to try as values, or a list of such\n dictionaries, in which case the grids spanned by each dictionary\n in the list are explored. This enables searching over any sequence\n of parameter settings.\n scoring : callable, dict or None, optional (default=None)\n A callable to evaluate the predictions on the test set. It should take\n 3 parameters, estimator, x and y, and return a score (higher meaning\n better). For evaluating multiple metrics, give a dict with names as\n keys and callables as values. If None, the estimator's score method is\n used.\n cv : int or cv generator, optional (default=None)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 5-fold cross validation,\n - integer, to specify the number of folds in a `KFold`,\n - custom cv generator.\n refit : boolean, string, or callable, optional (default=True)\n Refit an estimator using the best found parameters on the whole\n dataset.\n For multiple metric evaluation, this needs to be a string denoting the\n scorer that would be used to find the best parameters for refitting\n the estimator at the end.\n Where there are considerations other than maximum score in\n choosing a best estimator, ``refit`` can be set to a function which\n returns the selected ``best_index_`` given ``cv_results_``.\n The refitted estimator is made available at the ``best_estimator_``\n attribute and permits using ``predict`` directly on this\n ``GridSearchCV`` instance.\n Also for multiple metric evaluation, the attributes ``best_index_``,\n ``best_score_`` and ``best_params_`` will only be available if\n ``refit`` is set and all of them will be determined w.r.t this specific\n scorer. ``best_score_`` is not returned if refit is callable.\n See ``scoring`` parameter to know more about multiple metric\n evaluation.\n\n Examples\n --------\n >>> import dislib as ds\n >>> from dislib.model_selection import GridSearchCV\n >>> from dislib.classification import RandomForestClassifier\n >>> import numpy as np\n >>> from sklearn import datasets\n >>>\n >>>\n >>> if __name__ == '__main__':\n >>> x_np, y_np = datasets.load_iris(return_X_y=True)\n >>> x = ds.array(x_np, (30, 4))\n >>> y = ds.array(y_np[:, np.newaxis], (30, 1))\n >>> param_grid = {'n_estimators': (2, 4), 'max_depth': range(3, 5)}\n >>> rf = RandomForestClassifier()\n >>> searcher = GridSearchCV(rf, param_grid)\n >>> searcher.fit(x, y)\n >>> searcher.cv_results_\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n For instance the below given table:\n\n +------------+------------+-----------------+---+---------+\n |param_kernel|param_degree|split0_test_score|...|rank_t...|\n +============+============+=================+===+=========+\n | 'poly' | 2 | 0.80 |...| 2 |\n +------------+------------+-----------------+---+---------+\n | 'poly' | 3 | 0.70 |...| 4 |\n +------------+------------+-----------------+---+---------+\n | 'rbf' | -- | 0.80 |...| 3 |\n +------------+------------+-----------------+---+---------+\n | 'rbf' | -- | 0.93 |...| 1 |\n +------------+------------+-----------------+---+---------+\n\n will be represented by a ``cv_results_`` dict of::\n\n {\n 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],\n mask = [False False False False]...),\n 'param_degree': masked_array(data = [2.0 3.0 -- --],\n mask = [False False True True]...),\n 'split0_test_score' : [0.80, 0.70, 0.80, 0.93],\n 'split1_test_score' : [0.82, 0.50, 0.68, 0.78],\n 'split2_test_score' : [0.79, 0.55, 0.71, 0.93],\n ...\n 'mean_test_score' : [0.81, 0.60, 0.75, 0.85],\n 'std_test_score' : [0.01, 0.10, 0.05, 0.08],\n 'rank_test_score' : [2, 4, 3, 1],\n 'params' : [{'kernel': 'poly', 'degree': 2}, ...],\n }\n\n NOTES:\n\n The key ``'params'`` is used to store a list of parameter\n settings dicts for all the parameter candidates.\n\n The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and\n ``std_score_time`` are all in seconds.\n\n For multi-metric evaluation, the scores for all the scorers are\n available in the ``cv_results_`` dict at the keys ending with that\n scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown\n above ('split0_test_precision', 'mean_train_precision' etc.).\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search, i.e. estimator\n which gave highest score (or smallest loss if specified)\n on the left out data. Not available if ``refit=False``.\n See ``refit`` parameter for more information on allowed values.\n best_score_ : float\n Mean cross-validated score of the best_estimator\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n best_index_ : int\n The index (of the ``cv_results_`` arrays) which corresponds to the best\n candidate parameter setting.\n The dict at ``search.cv_results_['params'][search.best_index_]`` gives\n the parameter setting for the best model, that gives the highest\n mean score (``search.best_score_``).\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n scorer_ : function or a dict\n Scorer function used on the held out data to choose the best\n parameters for the model.\n For multi-metric evaluation, this attribute holds the validated\n ``scoring`` dict which maps the scorer key to the scorer callable.\n n_splits_ : int\n The number of cross-validation splits (folds/iterations).\n \"\"\"\n\n def __init__(self, estimator, param_grid, scoring=None, cv=None,\n refit=True):\n super().__init__(estimator=estimator, scoring=scoring, cv=cv,\n refit=refit)\n self.param_grid = param_grid\n self._check_param_grid(param_grid)\n\n def _run_search(self, evaluate_candidates):\n evaluate_candidates(ParameterGrid(self.param_grid))\n\n @staticmethod\n def _check_param_grid(param_grid):\n if hasattr(param_grid, 'items'):\n param_grid = [param_grid]\n\n for p in param_grid:\n for name, v in p.items():\n if isinstance(v, np.ndarray) and v.ndim > 1:\n raise ValueError(\"Parameter array should be \"\n \"one-dimensional.\")\n\n if (isinstance(v, str) or\n not isinstance(v, (np.ndarray, Sequence))):\n raise ValueError(\n \"Parameter values for parameter ({0}) need \"\n \"to be a sequence (but not a string) or\"\n \" np.ndarray.\".format(name))\n\n if len(v) == 0:\n raise ValueError(\n \"Parameter values for parameter ({0}) need \"\n \"to be a non-empty sequence.\".format(name))\n\n\nclass RandomizedSearchCV(BaseSearchCV):\n \"\"\"Randomized search on hyper parameters.\n\n RandomizedSearchCV implements a \"fit\" and a \"score\" method.\n\n The parameters of the estimator used to apply these methods are optimized\n by cross-validated search over parameter settings.\n\n In contrast to GridSearchCV, not all parameter values are tried out, but\n rather a fixed number of parameter settings is sampled from the specified\n distributions. The number of parameter settings that are tried is\n given by n_iter.\n\n If all parameters are presented as a list,\n sampling without replacement is performed. If at least one parameter\n is given as a distribution, sampling with replacement is used.\n\n Parameters\n ----------\n estimator : estimator object.\n This is assumed to implement the scikit-learn estimator interface.\n Either estimator needs to provide a ``score`` function,\n or ``scoring`` must be passed.\n\n param_distributions : dict\n Dictionary with parameters names (string) as keys and distributions\n or lists of parameters to try. Distributions must provide a ``rvs``\n method for sampling (such as those from scipy.stats.distributions).\n If a list is given, it is sampled uniformly.\n\n n_iter : int, optional (default=10)\n Number of parameter settings that are sampled.\n\n scoring : callable, dict or None, optional (default=None)\n A callable to evaluate the predictions on the test set. It should take\n 3 parameters, estimator, x and y, and return a score (higher meaning\n better). For evaluating multiple metrics, give a dict with names as\n keys and callables as values. If None, the estimator's score method is\n used.\n\n cv : int or cv generator, optional (default=None)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 5-fold cross validation,\n - integer, to specify the number of folds in a `KFold`,\n - custom cv generator.\n\n refit : boolean, string, or callable, optional (default=True)\n Refit an estimator using the best found parameters on the whole\n dataset.\n For multiple metric evaluation, this needs to be a string denoting the\n scorer that would be used to find the best parameters for refitting\n the estimator at the end.\n Where there are considerations other than maximum score in\n choosing a best estimator, ``refit`` can be set to a function which\n returns the selected ``best_index_`` given ``cv_results_``.\n The refitted estimator is made available at the ``best_estimator_``\n attribute and permits using ``predict`` directly on this\n ``GridSearchCV`` instance.\n Also for multiple metric evaluation, the attributes ``best_index_``,\n ``best_score_`` and ``best_params_`` will only be available if\n ``refit`` is set and all of them will be determined w.r.t this specific\n scorer. ``best_score_`` is not returned if refit is callable.\n See ``scoring`` parameter to know more about multiple metric\n evaluation.\n\n random_state : int, RandomState instance or None, optional, default=None\n Pseudo random number generator state used for random sampling of params\n in param_distributions. This is not passed to each estimator.\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Examples\n --------\n >>> import dislib as ds\n >>> from dislib.model_selection import RandomizedSearchCV\n >>> from dislib.classification import CascadeSVM\n >>> import numpy as np\n >>> import scipy.stats as stats\n >>> from sklearn import datasets\n >>>\n >>>\n >>> if __name__ == '__main__':\n >>> x_np, y_np = datasets.load_iris(return_X_y=True)\n >>> # Pre-shuffling required for CSVM\n >>> p = np.random.permutation(len(x_np))\n >>> x = ds.array(x_np[p], (30, 4))\n >>> y = ds.array((y_np[p] == 0)[:, np.newaxis], (30, 1))\n >>> param_distributions = {'c': stats.expon(scale=0.5),\n >>> 'gamma': stats.expon(scale=10)}\n >>> csvm = CascadeSVM()\n >>> searcher = RandomizedSearchCV(csvm, param_distributions, n_iter=10)\n >>> searcher.fit(x, y)\n >>> searcher.cv_results_\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n For instance the below given table\n\n +---------+-------------+-------------------+---+---------------+\n | param_c | param_gamma | split0_test_score |...|rank_test_score|\n +=========+=============+===================+===+===============+\n | 0.193 | 1.883 | 0.82 |...| 3 |\n +---------+-------------+-------------------+---+---------------+\n | 1.452 | 0.327 | 0.81 |...| 2 |\n +---------+-------------+-------------------+---+---------------+\n | 0.926 | 3.452 | 0.94 |...| 1 |\n +---------+-------------+-------------------+---+---------------+\n\n will be represented by a ``cv_results_`` dict of::\n\n {\n 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],\n mask = False),\n 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),\n 'split0_test_score' : [0.82, 0.81, 0.94],\n 'split1_test_score' : [0.66, 0.75, 0.79],\n 'split2_test_score' : [0.82, 0.87, 0.84],\n ...\n 'mean_test_score' : [0.76, 0.84, 0.86],\n 'std_test_score' : [0.01, 0.20, 0.04],\n 'rank_test_score' : [3, 2, 1],\n 'params' : [{'c' : 0.193, 'gamma' : 1.883}, ...],\n }\n\n NOTE\n\n The key ``'params'`` is used to store a list of parameter\n settings dicts for all the parameter candidates.\n\n The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and\n ``std_score_time`` are all in seconds.\n\n For multi-metric evaluation, the scores for all the scorers are\n available in the ``cv_results_`` dict at the keys ending with that\n scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown\n above. ('split0_test_precision', 'mean_train_precision' etc.)\n\n best_estimator_ : estimator or dict\n Estimator that was chosen by the search, i.e. estimator\n which gave highest score (or smallest loss if specified)\n on the left out data. Not available if ``refit=False``.\n\n For multi-metric evaluation, this attribute is present only if\n ``refit`` is specified.\n\n See ``refit`` parameter for more information on allowed values.\n\n best_score_ : float\n Mean cross-validated score of the best_estimator.\n\n For multi-metric evaluation, this is not available if ``refit`` is\n ``False``. See ``refit`` parameter for more information.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n\n For multi-metric evaluation, this is not available if ``refit`` is\n ``False``. See ``refit`` parameter for more information.\n\n best_index_ : int\n The index (of the ``cv_results_`` arrays) which corresponds to the best\n candidate parameter setting.\n\n The dict at ``search.cv_results_['params'][search.best_index_]`` gives\n the parameter setting for the best model, that gives the highest\n mean score (``search.best_score_``).\n\n For multi-metric evaluation, this is not available if ``refit`` is\n ``False``. See ``refit`` parameter for more information.\n\n scorer_ : function or a dict\n Scorer function used on the held out data to choose the best\n parameters for the model.\n\n For multi-metric evaluation, this attribute holds the validated\n ``scoring`` dict which maps the scorer key to the scorer callable.\n\n n_splits_ : int\n The number of cross-validation splits (folds/iterations).\n \"\"\"\n def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,\n cv=None, refit=True, random_state=None):\n super().__init__(estimator=estimator, scoring=scoring, cv=cv,\n refit=refit)\n self.param_distributions = param_distributions\n self.n_iter = n_iter\n self.random_state = random_state\n\n def _run_search(self, evaluate_candidates):\n \"\"\"Search n_iter candidates from param_distributions\"\"\"\n ps = ParameterSampler(self.param_distributions, self.n_iter,\n random_state=self.random_state)\n evaluate_candidates(ps)\n"
] | [
[
"numpy.array"
],
[
"numpy.array_equal",
"numpy.array",
"numpy.bincount",
"numpy.random.RandomState"
],
[
"numpy.ceil"
],
[
"sklearn.model_selection.ParameterSampler",
"scipy.stats.rankdata",
"sklearn.clone",
"sklearn.model_selection.ParameterGrid",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
JeffreyLuu/apexe3-api | [
"081da51370e8b01b1e8169e4137a99636ea71866"
] | [
"examples/python/real_time_insights.py"
] | [
"'''\n/**\n * real_time_insights.py\n * \n * Streams a real-time insights for the supplied pair\n * An example of the real-time insights is available here:\n * https://app.ae3platform.com/insights\n * \n * Disclaimer:\n * APEX:E3 is a financial technology company based in the United Kingdom https://www.apexe3.com\n * \n * None of this code constitutes financial advice. APEX:E3 is not \n * liable for any loss resulting from the use of this code or the API. \n * \n * This code is governed by The MIT License (MIT)\n * \n * Copyright (c) 2020 APEX:E3 Team\n * \n **/\n'''\nimport sys\nsys.path.append('..')\nfrom apexe3.apexe3 import initialise\nfrom apexe3.apexe3 import initialise_stream\nfrom apexe3.apexe3 import initialise_insights_for_pair\n\nimport pandas as pd\n\n#Change these values to a base or quote you are interested in\nbase = 'btc'\nquote = 'usdt'\n\ndef process_spread(event):\n print('Best spreads for ' + str(base) +' '+ str(quote))\n table=pd.DataFrame(event[\"values\"])\n table.columns = ['exchange','base','quote','misc','strSpread', 'spread']\n table = table[['exchange','spread']]\n print(table)\n print('------------------------------------------')\n\ndef process_arbitrage(event):\n print('Arbitrage Opportunity For ' + str(base) +' '+ str(quote))\n table=pd.DataFrame(event[\"values\"])\n table.columns = ['exchange','base','quote','misc','strSpread', 'spread']\n table = table[['base','quote','spread']]\n print(table)\n print('------------------------------------------') \n\ndef process_whales(event):\n print('Largest whales for ' + str(base) +' '+ str(quote))\n table=pd.DataFrame(event[\"values\"])\n table.columns = ['exchange','base','quote','misc','strSize (USD)', ' size (usd)']\n table = table[['exchange','size (usd)']]\n print(table)\n print('------------------------------------------')\n\ndef process_bid_imbalances(event):\n table=pd.DataFrame(event[\"values\"])\n print('bid imbalance for ' + str(base) +' '+ str(quote))\n table.columns = ['exchange','base','quote','misc','strStrength (USD)', 'bid imbalance']\n table = table[['exchange','bid imbalance']]\n print(table)\n print('------------------------------------------')\n\ndef process_ask_imbalances(event):\n table=pd.DataFrame(event[\"values\"])\n print('ask imbalance for ' + str(base) +' '+str(quote))\n table.columns = ['exchange','base','quote','misc','strStrength (USD)', 'ask imbalance']\n table = table[['exchange','ask imbalance']]\n print(table)\n print('------------------------------------------') \n\n\ndef init():\n with open('./../secret.txt', 'r') as f:\n clientId = f.readline().strip()\n clientSecret = f.readline().strip()\n f.close()\n emitter = initialise(clientId, clientSecret)\n emitter.on('SPREAD', process_spread)\n \n #UNCOMMENT TO RECIEVE UPDATES FOR THESE ANALYTICS\n #emitter.on('WHALE', process_whales)\n #emitter.on('VOI_BID', process_bid_imbalances)\n #emitter.on('VOI_ASK', process_ask_imbalances)\n #emitter.on('ARBITRAGE', process_arbitrage)\n\n\nif __name__ == \"__main__\":\n init()\n initialise_insights_for_pair(base, quote)"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Ravi-0809/question-generation | [
"9065a3b47293b8a69a0548af1f6bedd4a4aa7f9c",
"9065a3b47293b8a69a0548af1f6bedd4a4aa7f9c"
] | [
"src/discriminator/instance.py",
"src/qa/qanet/model.py"
] | [
"import sys,json,time,os\nsys.path.insert(0, \"/Users/tom/Dropbox/msc-ml/project/src/\")\nsys.path.insert(0, \"/cs/student/msc/ml/2017/thosking/dev/msc-project/src/\")\n\nimport tensorflow as tf\nimport numpy as np\n\nimport discriminator.config\nfrom discriminator.model import Model\nfrom discriminator.prepro import convert_to_features, word_tokenize\nimport helpers.loader as loader\nimport flags\n\nmem_limit=1\n\n\n# This provides a somewhat normalised interface to a pre-trained QANet model - some tweaks have been made to get it to play nicely when other models are spun up\nclass DiscriminatorInstance():\n def __init__(self, trainable=False, path=None, log_slug=None, force_init=False):\n config = tf.app.flags.FLAGS\n self.run_id = str(int(time.time())) + (\"-\"+log_slug if log_slug is not None else \"\")\n self.trainable = trainable\n self.load_from_chkpt(path, force_init)\n if trainable:\n self.summary_writer = tf.summary.FileWriter(config.log_dir+'disc/'+self.run_id, self.model.graph)\n def __del__(self):\n self.sess.close()\n\n\n def load_from_chkpt(self, path=None, force_init=False):\n\n config = tf.app.flags.FLAGS\n with open(config.disc_word_emb_file, \"r\") as fh:\n word_mat = np.array(json.load(fh), dtype=np.float32)\n with open(config.disc_char_emb_file, \"r\") as fh:\n char_mat = np.array(json.load(fh), dtype=np.float32)\n # with open(config.disc_test_meta, \"r\") as fh:\n # meta = json.load(fh)\n\n with open(config.disc_word_dictionary, \"r\") as fh:\n self.word_dictionary = json.load(fh)\n with open(config.disc_char_dictionary, \"r\") as fh:\n self.char_dictionary = json.load(fh)\n\n\n self.model = Model(config, None, word_mat, char_mat, trainable=self.trainable, demo = True, opt=False)\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_limit,allow_growth = True,visible_device_list='0')\n self.sess = tf.Session(graph=self.model.graph, config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True))\n\n with self.model.graph.as_default():\n self.saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)\n if force_init and path is not None:\n chkpt_path = tf.train.latest_checkpoint(path)\n print(\"Loading discriminator from \", chkpt_path)\n\n restore_vars= [v for v in tf.trainable_variables() if v.name[:13] != 'Output_Layer/']\n self.sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(restore_vars)\n saver.restore(self.sess, chkpt_path)\n elif path is not None:\n\n\n chkpt_path = tf.train.latest_checkpoint(path)\n print(\"Loading discriminator from \", chkpt_path)\n self.saver.restore(self.sess, chkpt_path)\n if config.disc_decay < 1.0:\n self.sess.run(self.model.assign_vars)\n else:\n\n os.makedirs(config.model_dir+'disc/'+self.run_id)\n self.sess.run(tf.global_variables_initializer())\n\n\n\n def save_to_chkpt(self, path, step):\n self.saver.save(self.sess, path+'disc/'+self.run_id+'/model.checkpoint', global_step=step)\n\n def char_pos_to_word(self, text, tokens, char_pos):\n ix=0\n for t,token in enumerate(tokens):\n # print(token, t, ix, char_pos)\n for char in token:\n ix = text.find(char, ix)\n # ix += 1\n if ix >= char_pos:\n # print(\"***\", token, char, t, ix, char_pos)\n return t\n\n def prepro(self,contexts, questions, ans_text, ans_pos):\n config = tf.app.flags.FLAGS\n\n\n # query = zip(contexts, questions)\n toks = [word_tokenize(ctxt.replace(\"''\", '\" ').replace(\"``\", '\" ').lower()) for ctxt in contexts]\n ans_tok_pos = [self.char_pos_to_word(contexts[ix].lower(), toks[ix], ans_pos[ix]) for ix in range(len(toks))]\n ans_lens = [len(word_tokenize(ans)) for ans in ans_text]\n ans_toks = [toks[ix][ans:ans+ans_lens[ix]] for ix,ans in enumerate(ans_tok_pos)]\n\n # print(ans_pos)\n # print(ans_toks)\n # print(toks)\n # exit()\n # ans_start = [toks[i].index(ans_tok[0]) for i,ans_tok in enumerate(ans_toks)]\n # ans_end = [ans_start[i] + len(ans_toks[i])-1 for i in range(len(ans_toks))]\n ans_start = ans_pos\n ans_end = [ans+ans_lens[ix]-1 for ix,ans in enumerate(ans_pos)]\n questions = [q.replace(loader.PAD,\"\").replace(loader.EOS,\"\") for q in questions]\n query = list(zip(contexts, questions))\n\n # # the QANet code has fixed batch sizes - so pad it\n # length=config.batch_size\n # if len(query) < config.batch_size:\n # length=len(query)\n # query += [[\"blank\",\"blank\"] for i in range(config.batch_size-length)]\n # ans_start += [0 for i in range(config.batch_size-length)]\n # ans_end += [0 for i in range(config.batch_size-length)]\n\n feats=[convert_to_features(config, q, self.word_dictionary, self.char_dictionary)+(ans_start[ix],ans_end[ix]) for ix,q in enumerate(query)]\n return feats\n\n def get_pred(self, contexts, questions, ans_text, ans_pos):\n length = len(contexts)\n\n feats = self.prepro(contexts,questions,ans_text,ans_pos)\n c,ch,q,qh,ans_start,ans_end = zip(*feats)\n fd = {'context:0': c,\n 'question:0': q,\n 'context_char:0': ch,\n 'question_char:0': qh,\n 'answer_index1:0': ans_start,\n 'answer_index2:0': ans_end}\n\n pred = self.sess.run(self.model.probs, feed_dict = fd)\n\n return pred[:length]\n\n def get_nll(self, contexts, questions, ans_text, ans_pos, gold_labels):\n length = len(contexts)\n\n feats = self.prepro(contexts,questions,ans_text,ans_pos)\n c,ch,q,qh,ans_start,ans_end = zip(*feats)\n fd = {'context:0': c,\n 'question:0': q,\n 'context_char:0': ch,\n 'question_char:0': qh,\n 'answer_index1:0': ans_start,\n 'answer_index2:0': ans_end,\n 'gold_class:0': gold_labels}\n\n nll = self.sess.run(self.model.nll, feed_dict = fd)\n\n return nll[:length]\n\n def train_step(self, contexts, questions, ans_text, ans_pos, gold_labels, step):\n if not self.trainable:\n exit('train_step called on non-trainable discriminator!')\n config = tf.app.flags.FLAGS\n\n length = len(contexts)\n gold_labels = gold_labels\n feats = self.prepro(contexts,questions,ans_text,ans_pos)\n c,ch,q,qh,ans_start,ans_end = zip(*feats)\n fd = {'context:0': c,\n 'question:0': q,\n 'context_char:0': ch,\n 'question_char:0': qh,\n 'answer_index1:0': ans_start,\n 'answer_index2:0': ans_end,\n 'gold_class:0': gold_labels,\n self.model.dropout: config.disc_dropout}\n\n _,summ,loss = self.sess.run([self.model.train_op, self.model.train_summary, self.model.loss], feed_dict = fd)\n\n # if step % 25 ==0:\n # print(gold_labels, questions)\n\n self.summary_writer.add_summary(summ, global_step=step)\n\n return loss\n\n\ndef main(_):\n\n from tqdm import tqdm\n import matplotlib.pyplot as plt\n import numpy as np\n from sklearn.metrics import confusion_matrix\n import itertools\n from sklearn.metrics import roc_curve, auc\n\n # squad = loader.load_squad_triples(path=\"./data/\", dev=True, v2=True, as_dict=True)\n # with open(\"./data/squad2_dev_official_output_fixed.json\") as dataset_file:\n # ans_preds = json.load(dataset_file)\n with open(\"./results/out_eval_MALUUBA-CROP-LATENT-GLOVE_test.json\") as dataset_file:\n results = json.load(dataset_file)['results']\n\n\n\n# 1535473379-MALUUBA-CROP-LATENT-GLOVE_train\n# 1535474306-MALUUBA-CROP-LATENT-GLOVE_train_QAINIT\n # disc_path = \"./models/saved/discriminator-trained\"\n disc_path = \"./models/saved2/1535473379-MALUUBA-CROP-LATENT-GLOVE_train\"\n # disc_path = \"./models/saved2/1535474306-MALUUBA-CROP-LATENT-GLOVE_train_QAINIT\"\n\n\n disc = DiscriminatorInstance(path=disc_path)\n # disc = DiscriminatorInstance(path=\"./models/disc/1533307366-SQUAD-QANETINIT\")\n\n # output={}\n # for id,candidates in tqdm(ans_preds.items()):\n # ctxt, q, ans_gold, ans_gold_pos, label_gold = squad[id]\n #\n # scores=[]\n # for candidate in candidates:\n # scores.append( disc.get_pred([ctxt], [q], [candidate['text']], [candidate['answer_start']]).tolist()[0] )\n # cand_ix = np.argmax(scores)\n #\n # pred_ans = candidates[cand_ix]['text']\n # pred_score = scores[cand_ix]\n # output[id] = pred_ans if pred_score > 0.5 else \"\"\n #\n # with open(\"./logs/squad2_dev_filtered.json\",\"w\") as fh:\n # json.dump(output, fh)\n\n gold_labels=[]\n pred_labels=[]\n scores=[]\n\n for res in tqdm(results[:3000]):\n # print(res['q_gold'], res['q_pred'])\n gold_score = disc.get_pred([res['c']], [res['q_gold']],[res['a_text']],[res['a_pos']])\n pred_score = disc.get_pred([res['c']], [res['q_pred']],[res['a_text']],[res['a_pos']])\n\n\n gold_labels.append(1)\n gold_labels.append(0)\n pred_labels.append(1.0 * (gold_score[0] > 0.5))\n pred_labels.append(1.0 * (pred_score[0] > 0.5))\n scores.append(gold_score[0])\n scores.append(pred_score[0])\n\n\n print(disc_path)\n print(\"Acc: \", np.mean(np.equal(gold_labels, pred_labels)))\n\n\n\n # oh_labels =np.eye(2)[gold_labels]\n ### disc conf mat\n # gold_labels =['Generated' if l==0 else 'Ground truth' for l in gold_labels]\n # pred_labels =['Generated' if l==0 else 'Ground truth' for l in pred_labels]\n plt.figure(figsize=(4.5,3.5))\n cm = confusion_matrix(gold_labels, pred_labels)\n mat = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(mat)\n plt.imshow(mat, cmap=plt.cm.Blues)\n plt.colorbar()\n tick_marks = np.arange(2)\n plt.xticks(tick_marks,['Generated','Ground truth'], rotation=0)\n plt.yticks(tick_marks, ['Generated','Ground truth'], rotation=90)\n fmt = '.2f'\n thresh = mat.max() / 2.\n for i, j in itertools.product(range(mat.shape[0]), range(mat.shape[1])):\n plt.text(j, i, format(mat[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if mat[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('Actual Source', fontsize=14)\n plt.xlabel('Predicted source', fontsize=14)\n plt.savefig(\"/users/Tom/Dropbox/Apps/Overleaf/Question Generation/figures/disc_cm_latent.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n # plt.show()\n # exit()\n\n\n\n ### disc Roc curves\n fpr, tpr, _ = roc_curve(gold_labels, scores)\n roc_auc = auc(fpr, tpr)\n plt.figure(figsize=(4.5,3.5))\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Discriminator RoC curve')\n plt.legend(loc=\"lower right\")\n plt.savefig(\"/users/Tom/Dropbox/Apps/Overleaf/Question Generation/figures/disc_roc_latent.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n plt.show()\n exit()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"import tensorflow as tf\nfrom qa.qanet.layers import initializer, regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, optimized_trilinear_for_attention\n\nclass Model(object):\n def __init__(self, config, batch, word_mat=None, char_mat=None, trainable=True, opt=True, demo = False, graph = None):\n self.config = config\n self.demo = demo\n self.graph = graph if graph is not None else tf.Graph()\n with self.graph.as_default():\n\n self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32,\n initializer=tf.constant_initializer(0), trainable=False)\n self.dropout = tf.placeholder_with_default(0.0, (), name=\"dropout\")\n if self.demo:\n self.c = tf.placeholder(tf.int32, [None, config.test_para_limit],\"context\")\n self.q = tf.placeholder(tf.int32, [None, config.test_ques_limit],\"question\")\n self.ch = tf.placeholder(tf.int32, [None, config.test_para_limit, config.char_limit],\"context_char\")\n self.qh = tf.placeholder(tf.int32, [None, config.test_ques_limit, config.char_limit],\"question_char\")\n self.y1 = tf.placeholder(tf.int32, [None, config.test_para_limit],\"answer_index1\")\n self.y2 = tf.placeholder(tf.int32, [None, config.test_para_limit],\"answer_index2\")\n else:\n self.c, self.q, self.ch, self.qh, self.y1, self.y2, self.qa_id = batch.get_next()\n\n # self.word_unk = tf.get_variable(\"word_unk\", shape = [config.glove_dim], initializer=initializer())\n self.word_mat = tf.get_variable(\"word_mat\", initializer=tf.constant(\n word_mat, dtype=tf.float32), trainable=False)\n self.char_mat = tf.get_variable(\n \"char_mat\", initializer=tf.constant(char_mat, dtype=tf.float32))\n\n self.c_mask = tf.cast(self.c, tf.bool)\n self.q_mask = tf.cast(self.q, tf.bool)\n self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1)\n self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1)\n\n if opt:\n # we have to hardcode the max batch size here! use the batch size from the generator as this will be used for PG\n N, CL = config.batch_size if not self.demo else config.batch_size, config.char_limit\n self.c_maxlen = tf.reduce_max(self.c_len)\n self.q_maxlen = tf.reduce_max(self.q_len)\n self.c = tf.slice(self.c, [0, 0], [N, self.c_maxlen])\n self.q = tf.slice(self.q, [0, 0], [N, self.q_maxlen])\n self.c_mask = tf.slice(self.c_mask, [0, 0], [N, self.c_maxlen])\n self.q_mask = tf.slice(self.q_mask, [0, 0], [N, self.q_maxlen])\n self.ch = tf.slice(self.ch, [0, 0, 0], [N, self.c_maxlen, CL])\n self.qh = tf.slice(self.qh, [0, 0, 0], [N, self.q_maxlen, CL])\n self.y1 = tf.argmax(tf.slice(self.y1, [0, 0], [N, self.c_maxlen]),axis=-1)\n self.y2 = tf.argmax(tf.slice(self.y2, [0, 0], [N, self.c_maxlen]),axis=-1)\n else:\n self.c_maxlen, self.q_maxlen = config.para_limit, config.ques_limit\n\n self.ch_len = tf.reshape(tf.reduce_sum(\n tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1])\n self.qh_len = tf.reshape(tf.reduce_sum(\n tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1])\n\n self.forward()\n total_params()\n\n if trainable:\n self.lr = tf.minimum(config.learning_rate, 0.001 / tf.log(999.) * tf.log(tf.cast(self.global_step, tf.float32) + 1))\n self.opt = tf.train.AdamOptimizer(learning_rate = self.lr, beta1 = 0.8, beta2 = 0.999, epsilon = 1e-7)\n grads = self.opt.compute_gradients(self.loss)\n gradients, variables = zip(*grads)\n capped_grads, _ = tf.clip_by_global_norm(\n gradients, config.grad_clip)\n self.train_op = self.opt.apply_gradients(\n zip(capped_grads, variables), global_step=self.global_step)\n\n def forward(self):\n config = self.config\n N, PL, QL, CL, d, dc, nh = config.batch_size if not self.demo else config.batch_size, self.c_maxlen, self.q_maxlen, config.char_limit, config.hidden, config.char_dim, config.num_heads\n\n with tf.variable_scope(\"Input_Embedding_Layer\"):\n ch_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_mat, self.ch), [N * PL, CL, dc])\n qh_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_mat, self.qh), [N * QL, CL, dc])\n ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout)\n qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)\n\n\t\t\t# Bidaf style conv-highway encoder\n ch_emb = conv(ch_emb, d,\n bias = True, activation = tf.nn.relu, kernel_size = 5, name = \"char_conv\", reuse = None)\n qh_emb = conv(qh_emb, d,\n bias = True, activation = tf.nn.relu, kernel_size = 5, name = \"char_conv\", reuse = True)\n\n ch_emb = tf.reduce_max(ch_emb, axis = 1)\n qh_emb = tf.reduce_max(qh_emb, axis = 1)\n\n ch_emb = tf.reshape(ch_emb, [N, PL, ch_emb.shape[-1]])\n qh_emb = tf.reshape(qh_emb, [N, QL, ch_emb.shape[-1]])\n\n c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout)\n q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout)\n\n c_emb = tf.concat([c_emb, ch_emb], axis=2)\n q_emb = tf.concat([q_emb, qh_emb], axis=2)\n\n c_emb = highway(c_emb, size = d, scope = \"highway\", dropout = self.dropout, reuse = None)\n q_emb = highway(q_emb, size = d, scope = \"highway\", dropout = self.dropout, reuse = True)\n\n with tf.variable_scope(\"Embedding_Encoder_Layer\"):\n c = residual_block(c_emb,\n num_blocks = 1,\n num_conv_layers = 4,\n kernel_size = 7,\n mask = self.c_mask,\n num_filters = d,\n num_heads = nh,\n seq_len = self.c_len,\n scope = \"Encoder_Residual_Block\",\n bias = False,\n dropout = self.dropout)\n q = residual_block(q_emb,\n num_blocks = 1,\n num_conv_layers = 4,\n kernel_size = 7,\n mask = self.q_mask,\n num_filters = d,\n num_heads = nh,\n seq_len = self.q_len,\n scope = \"Encoder_Residual_Block\",\n reuse = True, # Share the weights between passage and question\n bias = False,\n dropout = self.dropout)\n\n with tf.variable_scope(\"Context_to_Query_Attention_Layer\"):\n # C = tf.tile(tf.expand_dims(c,2),[1,1,self.q_maxlen,1])\n # Q = tf.tile(tf.expand_dims(q,1),[1,self.c_maxlen,1,1])\n # S = trilinear([C, Q, C*Q], input_keep_prob = 1.0 - self.dropout)\n S = optimized_trilinear_for_attention([c, q], self.c_maxlen, self.q_maxlen, input_keep_prob = 1.0 - self.dropout)\n mask_q = tf.expand_dims(self.q_mask, 1)\n S_ = tf.nn.softmax(mask_logits(S, mask = mask_q))\n mask_c = tf.expand_dims(self.c_mask, 2)\n S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask = mask_c), dim = 1),(0,2,1))\n self.c2q = tf.matmul(S_, q)\n self.q2c = tf.matmul(tf.matmul(S_, S_T), c)\n attention_outputs = [c, self.c2q, c * self.c2q, c * self.q2c]\n\n with tf.variable_scope(\"Model_Encoder_Layer\"):\n inputs = tf.concat(attention_outputs, axis = -1)\n self.enc = [conv(inputs, d, name = \"input_projection\")]\n for i in range(3):\n if i % 2 == 0: # dropout every 2 blocks\n self.enc[i] = tf.nn.dropout(self.enc[i], 1.0 - self.dropout)\n self.enc.append(\n residual_block(self.enc[i],\n num_blocks = 7,\n num_conv_layers = 2,\n kernel_size = 5,\n mask = self.c_mask,\n num_filters = d,\n num_heads = nh,\n seq_len = self.c_len,\n scope = \"Model_Encoder\",\n bias = False,\n reuse = True if i > 0 else None,\n dropout = self.dropout)\n )\n\n with tf.variable_scope(\"Output_Layer\"):\n start_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[2]],axis = -1),1, bias = False, name = \"start_pointer\"),-1)\n end_logits = tf.squeeze(conv(tf.concat([self.enc[1], self.enc[3]],axis = -1),1, bias = False, name = \"end_pointer\"), -1)\n self.logits = [mask_logits(start_logits, mask = self.c_mask),\n mask_logits(end_logits, mask = self.c_mask)]\n\n logits1, logits2 = [l for l in self.logits]\n\n outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2),\n tf.expand_dims(tf.nn.softmax(logits2), axis=1))\n outer = tf.matrix_band_part(outer, 0, config.ans_limit)\n self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)\n self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits1, labels=self.y1)\n losses2 = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits2, labels=self.y2)\n self.loss = tf.reduce_mean(losses + losses2)\n\n if config.l2_norm is not None:\n variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables)\n self.loss += l2_loss\n\n if config.decay is not None:\n self.var_ema = tf.train.ExponentialMovingAverage(config.decay)\n ema_op = self.var_ema.apply(tf.trainable_variables())\n with tf.control_dependencies([ema_op]):\n self.loss = tf.identity(self.loss)\n\n self.assign_vars = []\n for var in tf.global_variables():\n v = self.var_ema.average(var)\n if v:\n self.assign_vars.append(tf.assign(var,v))\n\n def get_loss(self):\n return self.loss\n\n def get_global_step(self):\n return self.global_step\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.plot",
"tensorflow.GPUOptions",
"numpy.arange",
"tensorflow.ConfigProto",
"tensorflow.train.Saver",
"tensorflow.trainable_variables",
"tensorflow.app.run",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"sklearn.metrics.roc_curve",
"tensorflow.global_variables_initializer",
"numpy.equal",
"sklearn.metrics.auc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"tensorflow.summary.FileWriter",
"tensorflow.train.latest_checkpoint",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks"
],
[
"tensorflow.concat",
"tensorflow.matrix_band_part",
"tensorflow.contrib.layers.apply_regularization",
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.train.AdamOptimizer",
"tensorflow.Graph",
"tensorflow.get_collection",
"tensorflow.placeholder_with_default",
"tensorflow.trainable_variables",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.nn.embedding_lookup",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.assign",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.clip_by_global_norm",
"tensorflow.log",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
ProhardONE/python_primer | [
"211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0",
"211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0"
] | [
"ch_5/plot_w.py",
"ch_7/PiecewiseConstant2.py"
] | [
"# Exercise 5.35\n# Author: Noah Waterfield Price\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef f(x):\n r = np.zeros(len(x))\n r[x < 0] = -x[x < 0] - 5\n r[x >= 0] = x[x >= 0] - 5\n return abs(r)\n\nx = np.linspace(-10, 10, 101)\nplt.plot(x, f(x))\nplt.show()\n",
"# Exercise 7.24\n# Author: Noah Waterfield Price\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport operator\n\n\nclass PiecewiseConstant:\n\n def __init__(self, data, xmax):\n self.data = data + [(None, xmax)]\n\n def __call__(self, x):\n if isinstance(x, (float, int)):\n return self.piecewise(x, self.data)\n else:\n return self.piecewise_vec(x, self.data)\n\n def plot(self):\n data = self.data\n # create lists of points to exactly reproduce discontinuities\n x = [data[0][1]]\n y = [data[0][0], data[0][0]]\n for i in range(1, len(data) - 1):\n x.append(data[i][1])\n x.append(data[i][1])\n y.append(data[i][0])\n y.append(data[i][0])\n x.append(data[-1][1])\n return x, y\n\n @staticmethod\n def piecewise(x, data):\n for i in range(len(data) - 1):\n if data[i][1] <= x < data[i + 1][1] or x == data[-1][1]:\n return data[i][0]\n\n @staticmethod\n def piecewise_vec(x, data):\n r = np.zeros(len(x))\n for i in xrange(len(data) - 1):\n cond = operator.and_(data[i][1] <= x, x < data[i + 1][1])\n cond = operator.or_(cond, x == data[-1][1])\n r[cond] = data[i][0]\n return r\n\n\nf = PiecewiseConstant([(0.4, 1), (0.2, 1.5), (0.1, 3)], xmax=4)\nx, y = f.plot()\nplt.plot(x, y)\n# set appropriate y limits\nrange = max(y) - min(y)\nplt.ylim([min(y) - 0.1 * range, max(y) + 0.1 * range])\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Piecewise constant function')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"numpy.linspace"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KevinMMendez/cimcb_lite | [
"1e6cf7137cd04d6be4ad1ba6fd317077ace08ee8"
] | [
"cimcb_lite/model/BaseModel.py"
] | [
"from abc import ABC, abstractmethod, abstractproperty\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom bokeh.layouts import widgetbox, gridplot, column, row, layout\nfrom bokeh.models import HoverTool, Band\nfrom bokeh.models.widgets import DataTable, Div, TableColumn\nfrom bokeh.models.annotations import Title\nfrom bokeh.plotting import ColumnDataSource, figure, output_notebook, show\nfrom scipy import interp\nfrom sklearn import metrics\nfrom sklearn.utils import resample\nfrom ..bootstrap import Perc, BC, BCA\nfrom ..plot import scatter, scatterCI, boxplot, distribution, permutation_test, roc_calculate, roc_plot\nfrom ..utils import binary_metrics\n\n\nclass BaseModel(ABC):\n \"\"\"Base class for models: PLS_SIMPLS.\"\"\"\n\n @abstractmethod\n def __init__(self):\n pass\n\n @abstractmethod\n def train(self):\n \"\"\"Trains the model.\"\"\"\n pass\n\n @abstractmethod\n def test(self):\n \"\"\"Tests the model.\"\"\"\n pass\n\n @abstractproperty\n def bootlist(self):\n \"\"\"A list of attributes for bootstrap resampling.\"\"\"\n pass\n\n def evaluate(self, testset=None, specificity=False, cutoffscore=False, bootnum=1000):\n \"\"\"Plots a figure containing a Violin plot, Distribution plot, ROC plot and Binary Metrics statistics.\n\n Parameters\n ----------\n testset : array-like, shape = [n_samples, 2] or None, (default None)\n If testset is None, use train Y and train Y predicted for evaluate. Alternatively, testset is used to evaluate model in the format [Ytest, Ypred].\n\n specificity : number or False, (default False)\n Use the specificity to draw error bar. When False, use the cutoff score of 0.5.\n\n cutoffscore : number or False, (default False)\n Use the cutoff score to draw error bar. When False, use the specificity selected.\n\n bootnum : a positive integer, (default 1000)\n The number of bootstrap samples used in the computation.\n \"\"\"\n Ytrue_train = self.Y\n Yscore_train = self.Y_pred.flatten()\n\n # Get Ytrue_test, Yscore_test from testset\n if testset is not None:\n Ytrue_test = np.array(testset[0])\n Yscore_test = np.array(testset[1])\n\n # Error checking\n if len(Ytrue_test) != len(Yscore_test):\n raise ValueError(\"evaluate can't be used as length of Ytrue does not match length of Yscore in test set.\")\n if len(np.unique(Ytrue_test)) != 2:\n raise ValueError(\"Ytrue_test needs to have 2 groups. There is {}\".format(len(np.unique(Y))))\n if np.sort(np.unique(Ytrue_test))[0] != 0:\n raise ValueError(\"Ytrue_test should only contain 0s and 1s.\")\n if np.sort(np.unique(Ytrue_test))[1] != 1:\n raise ValueError(\"Ytrue_test should only contain 0s and 1s.\")\n\n # Get Yscore_combined and Ytrue_combined_name (Labeled Ytrue)\n Yscore_combined = np.concatenate([Yscore_train, Yscore_test])\n Ytrue_combined = np.concatenate([Ytrue_train, Ytrue_test + 2]) # Each Ytrue per group is unique\n Ytrue_combined_name = Ytrue_combined.astype(np.str)\n Ytrue_combined_name[Ytrue_combined == 0] = \"Train (0)\"\n Ytrue_combined_name[Ytrue_combined == 1] = \"Train (1)\"\n Ytrue_combined_name[Ytrue_combined == 2] = \"Test (0)\"\n Ytrue_combined_name[Ytrue_combined == 3] = \"Test (1)\"\n\n # Expliclity states which metric and value is used for the error_bar\n if specificity is not False:\n metric = \"specificity\"\n val = specificity\n elif cutoffscore is not False:\n metric = \"cutoffscore\"\n val = cutoffscore\n else:\n metric = \"specificity\"\n val = 0.8\n\n # ROC plot\n tpr, fpr, tpr_ci, stats, stats_bootci = roc_calculate(Ytrue_train, Yscore_train, bootnum=100, metric=metric, val=val)\n roc_title = \"Specificity: {}\".format(np.round(stats[\"val_specificity\"], 2))\n roc_bokeh = roc_plot(tpr, fpr, tpr_ci, width=320, height=315, title=roc_title, errorbar=stats[\"val_specificity\"])\n if testset is not None:\n fpr_test, tpr_test, threshold_test = metrics.roc_curve(Ytrue_test, Yscore_test, pos_label=1, drop_intermediate=False)\n fpr_test = np.insert(fpr_test, 0, 0)\n tpr_test = np.insert(tpr_test, 0, 0)\n roc_bokeh.line(fpr_test, tpr_test, color=\"red\", line_width=3.5, alpha=0.6, legend=\"ROC Curve (Test)\") # Add ROC Curve(Test) to roc_bokeh\n\n # Violin plot\n violin_title = \"Cut-off: {}\".format(np.round(stats[\"val_cutoffscore\"], 2))\n if testset is None:\n violin_bokeh = boxplot(Yscore_train, Ytrue_train, xlabel=\"Class\", ylabel=\"Predicted Score\", violin=True, color=[\"#FFCCCC\", \"#CCE5FF\"], width=320, height=315, title=violin_title, font_size=\"11pt\")\n else:\n violin_bokeh = boxplot(Yscore_combined, Ytrue_combined_name, xlabel=\"Class\", ylabel=\"Predicted Score\", violin=True, color=[\"#fcaeae\", \"#aed3f9\", \"#FFCCCC\", \"#CCE5FF\"], width=320, height=315, group_name=[\"Train (0)\", \"Test (0)\", \"Train (1)\", \"Test (1)\"], group_name_sort=[\"Test (0)\", \"Test (1)\", \"Train (0)\", \"Train (1)\"], title=violin_title, font_size=\"11pt\")\n violin_bokeh.multi_line([[-100, 100]], [[stats[\"val_cutoffscore\"], stats[\"val_cutoffscore\"]]], line_color=\"black\", line_width=2, line_alpha=1.0, line_dash=\"dashed\")\n\n # Distribution plot\n if testset is None:\n dist_bokeh = distribution(Yscore_train, group=Ytrue_train, kde=True, title=\"\", xlabel=\"Predicted Score\", ylabel=\"p.d.f.\", width=320, height=315)\n else:\n dist_bokeh = distribution(Yscore_combined, group=Ytrue_combined_name, kde=True, title=\"\", xlabel=\"Predicted Score\", ylabel=\"p.d.f.\", width=320, height=315)\n dist_bokeh.multi_line([[stats[\"val_cutoffscore\"], stats[\"val_cutoffscore\"]]], [[-100, 100]], line_color=\"black\", line_width=2, line_alpha=1.0, line_dash=\"dashed\")\n\n # Man-Whitney U for Table (round and use scienitic notation if p-value > 0.001)\n manw_pval = scipy.stats.mannwhitneyu(Yscore_train[Ytrue_train == 0], Yscore_train[Ytrue_train == 1], alternative=\"two-sided\")[1]\n if manw_pval > 0.001:\n manw_pval_round = \"%0.2f\" % manw_pval\n else:\n manw_pval_round = \"%0.2e\" % manw_pval\n if testset is not None:\n testmanw_pval = scipy.stats.mannwhitneyu(Yscore_test[Ytrue_test == 0], Yscore_test[Ytrue_test == 1], alternative=\"two-sided\")[1]\n if testmanw_pval > 0.001:\n testmanw_pval_round = \"%0.2f\" % testmanw_pval\n else:\n testmanw_pval_round = \"%0.2e\" % testmanw_pval\n\n # Create a stats table for test\n if testset is not None:\n teststats = binary_metrics(Ytrue_test, Yscore_test, cut_off=stats[\"val_cutoffscore\"])\n teststats_round = {}\n for i in teststats.keys():\n teststats_round[i] = np.round(teststats[i], 2)\n\n # Round stats, and stats_bootci for Table\n stats_round = {}\n for i in stats.keys():\n stats_round[i] = np.round(stats[i], 2)\n bootci_round = {}\n for i in stats_bootci.keys():\n bootci_round[i] = np.round(stats_bootci[i], 2)\n\n # Create table\n tabledata = dict(\n evaluate=[[\"Train\"]],\n manw_pval=[[\"{}\".format(manw_pval_round)]],\n auc=[[\"{} ({}, {})\".format(stats_round[\"AUC\"], bootci_round[\"AUC\"][0], bootci_round[\"AUC\"][1])]],\n accuracy=[[\"{} ({}, {})\".format(stats_round[\"ACCURACY\"], bootci_round[\"ACCURACY\"][0], bootci_round[\"ACCURACY\"][1])]],\n precision=[[\"{} ({}, {})\".format(stats_round[\"PRECISION\"], bootci_round[\"PRECISION\"][0], bootci_round[\"PRECISION\"][1])]],\n sensitivity=[[\"{} ({}, {})\".format(stats_round[\"SENSITIVITY\"], bootci_round[\"SENSITIVITY\"][0], bootci_round[\"SENSITIVITY\"][1])]],\n specificity=[[\"{} ({}, {})\".format(stats_round[\"SPECIFICITY\"], bootci_round[\"SPECIFICITY\"][0], bootci_round[\"SPECIFICITY\"][1])]],\n F1score=[[\"{} ({}, {})\".format(stats_round[\"F1-SCORE\"], bootci_round[\"F1-SCORE\"][0], bootci_round[\"F1-SCORE\"][1])]],\n R2=[[\"{} ({}, {})\".format(stats_round[\"R²\"], bootci_round[\"R²\"][0], bootci_round[\"R²\"][1])]],\n )\n\n # Append test data\n if testset is not None:\n tabledata[\"evaluate\"].append([\"Test\"])\n tabledata[\"manw_pval\"].append([testmanw_pval_round])\n tabledata[\"auc\"].append([teststats_round[\"AUC\"]])\n tabledata[\"accuracy\"].append([teststats_round[\"ACCURACY\"]])\n tabledata[\"precision\"].append([teststats_round[\"PRECISION\"]])\n tabledata[\"sensitivity\"].append([teststats_round[\"SENSITIVITY\"]])\n tabledata[\"specificity\"].append([teststats_round[\"SPECIFICITY\"]])\n tabledata[\"F1score\"].append([teststats_round[\"F1-SCORE\"]])\n tabledata[\"R2\"].append([teststats_round[\"R²\"]])\n\n # Plot table\n source = ColumnDataSource(data=tabledata)\n columns = [TableColumn(field=\"evaluate\", title=\"Evaluate\"), TableColumn(field=\"manw_pval\", title=\"MW-U Pvalue\"), TableColumn(field=\"R2\", title=\"R2\"), TableColumn(field=\"auc\", title=\"AUC\"), TableColumn(field=\"accuracy\", title=\"Accuracy\"), TableColumn(field=\"precision\", title=\"Precision\"), TableColumn(field=\"sensitivity\", title=\"Sensitivity\"), TableColumn(field=\"F1score\", title=\"F1score\")]\n table_bokeh = widgetbox(DataTable(source=source, columns=columns, width=950, height=90), width=950, height=80)\n\n # Title\n if specificity is not False:\n title = \"Specificity fixed to: {}\".format(np.round(val, 2))\n elif cutoffscore is not False:\n title = \"Score cut-off fixed to: {}\".format(np.round(val, 2))\n else:\n title = \"Specificity fixed to: {}\".format(np.round(val, 2))\n title_bokeh = \"<h3>{}</h3>\".format(title)\n\n # Combine table, violin plot and roc plot into one figure\n fig = layout([[violin_bokeh, dist_bokeh, roc_bokeh], [table_bokeh]], toolbar_location=\"right\")\n output_notebook()\n show(column(Div(text=title_bokeh, width=900, height=50), fig))\n\n def calc_bootci(self, bootnum=100, type=\"bca\"):\n \"\"\"Calculates bootstrap confidence intervals based on bootlist.\n\n Parameters\n ----------\n bootnum : a positive integer, (default 100)\n The number of bootstrap samples used in the computation.\n\n type : 'bc', 'bca', 'perc', (default 'bca')\n Methods for bootstrap confidence intervals. 'bc' is bias-corrected bootstrap confidence intervals. 'bca' is bias-corrected and accelerated bootstrap confidence intervals. 'perc' is percentile confidence intervals.\n \"\"\"\n bootlist = self.bootlist\n if type is \"bca\":\n boot = BCA(self, self.X, self.Y, self.bootlist, bootnum=bootnum)\n if type is \"bc\":\n boot = BC(self, self.X, self.Y, self.bootlist, bootnum=bootnum)\n if type is \"perc\":\n boot = Perc(self, self.X, self.Y, self.bootlist, bootnum=bootnum)\n self.bootci = boot.run()\n\n def plot_featureimportance(self, PeakTable, peaklist=None, ylabel=\"Label\", sort=True):\n \"\"\"Plots feature importance metrics.\n\n Parameters\n ----------\n PeakTable : DataFrame\n Peak sheet with the required columns.\n\n peaklist : list or None, (default None)\n Peaks to include in plot (the default is to include all samples).\n\n ylabel : string, (default \"Label\")\n Name of column in PeakTable to use as the ylabel.\n\n sort : boolean, (default True)\n Whether to sort plots in absolute descending order.\n\n Returns\n -------\n Peaksheet : DataFrame\n New PeakTable with added \"Coef\" and \"VIP\" columns (+ \"Coef-95CI\" and \"VIP-95CI\" if calc_bootci is used prior to plot_featureimportance).\n \"\"\"\n if not hasattr(self, \"bootci\"):\n print(\"Use method calc_bootci prior to plot_featureimportance to add 95% confidence intervals to plots.\")\n ci_coef = None\n ci_vip = None\n else:\n ci_coef = self.bootci[\"model.coef_\"]\n ci_vip = self.bootci[\"model.vip_\"]\n\n # Remove rows from PeakTable if not in peaklist\n if peaklist is not None:\n PeakTable = PeakTable[PeakTable[\"Name\"].isin(peaklist)]\n peaklabel = PeakTable[ylabel]\n peaklabel = peaklabel.apply(str)\n\n # Plot\n fig_1 = scatterCI(self.model.coef_, ci=ci_coef, label=peaklabel, hoverlabel=PeakTable[[\"Idx\", \"Name\", \"Label\"]], hline=0, col_hline=True, title=\"Coefficient Plot\", sort_abs=sort)\n fig_2 = scatterCI(self.model.vip_, ci=ci_vip, label=peaklabel, hoverlabel=PeakTable[[\"Idx\", \"Name\", \"Label\"]], hline=1, col_hline=False, title=\"Variable Importance in Projection (VIP)\", sort_abs=sort)\n fig = layout([[fig_1], [fig_2]])\n output_notebook()\n show(fig)\n\n # Return table with: Idx, Name, Label, Coefficient, 95CI, VIP, 95CI\n if not hasattr(self, \"bootci\"):\n coef = pd.DataFrame([self.model.coef_]).T\n coef.rename(columns={0: \"Coef\"}, inplace=True)\n vip = pd.DataFrame([self.model.vip_]).T\n vip.rename(columns={0: \"VIP\"}, inplace=True)\n else:\n coef = pd.DataFrame([self.model.coef_, self.bootci[\"model.coef_\"]]).T\n coef.rename(columns={0: \"Coef\", 1: \"Coef-95CI\"}, inplace=True)\n vip = pd.DataFrame([self.model.vip_, self.bootci[\"model.vip_\"]]).T\n vip.rename(columns={0: \"VIP\", 1: \"VIP-95CI\"}, inplace=True)\n\n Peaksheet = PeakTable.copy()\n Peaksheet[\"Coef\"] = coef[\"Coef\"].values\n Peaksheet[\"VIP\"] = vip[\"VIP\"].values\n if hasattr(self, \"bootci\"):\n Peaksheet[\"Coef-95CI\"] = coef[\"Coef-95CI\"].values\n Peaksheet[\"VIP-95CI\"] = vip[\"VIP-95CI\"].values\n return Peaksheet\n\n def permutation_test(self, nperm=100):\n \"\"\"Plots permutation test figures.\n\n Parameters\n ----------\n nperm : positive integer, (default 100)\n Number of permutations.\n \"\"\"\n fig = permutation_test(self, self.X, self.Y, nperm=nperm)\n output_notebook()\n show(fig)\n"
] | [
[
"numpy.unique",
"sklearn.metrics.roc_curve",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.round",
"scipy.stats.mannwhitneyu",
"numpy.insert",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
qzchenwl/tensorboard | [
"e59ca8d45746f459d797f4e69377eda4433e1624",
"e59ca8d45746f459d797f4e69377eda4433e1624",
"e59ca8d45746f459d797f4e69377eda4433e1624",
"e59ca8d45746f459d797f4e69377eda4433e1624",
"e59ca8d45746f459d797f4e69377eda4433e1624"
] | [
"tensorboard/util/test_util.py",
"tensorboard/plugins/debugger/session_debug_test.py",
"tensorboard/plugins/debugger/debugger_server_test.py",
"tensorboard/plugins/beholder/visualizer.py",
"tensorboard/plugins/pr_curve/summary_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TensorBoard testing helper routine module.\n\nThis module is basically a dumpster for really generic succinct helper\nroutines that exist solely for test code.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\nimport unittest\n\nimport tensorflow as tf\n\n# See discussion on issue #1996 for private module import justification.\nfrom tensorflow.python import tf2 as tensorflow_python_tf2\n\nfrom tensorboard.compat.proto import event_pb2\nfrom tensorboard.compat.proto import graph_pb2\nfrom tensorboard.compat.proto import meta_graph_pb2\nfrom tensorboard.compat.proto import summary_pb2\nfrom tensorboard.util import tb_logging\n\nlogger = tb_logging.get_logger()\n\n\nclass FileWriter(tf.compat.v1.summary.FileWriter):\n \"\"\"FileWriter for test.\n\n TensorFlow FileWriter uses TensorFlow's Protobuf Python binding\n which is largely discouraged in TensorBoard. We do not want a\n TB.Writer but require one for testing in integrational style\n (writing out event files and use the real event readers).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Briefly enter graph mode context so this testing FileWriter can be\n # created from an eager mode context without triggering a usage error.\n with tf.compat.v1.Graph().as_default():\n super(FileWriter, self).__init__(*args, **kwargs)\n\n def add_test_summary(self, tag, simple_value=1.0, step=None):\n \"\"\"Convenience for writing a simple summary for a given tag.\"\"\"\n value = summary_pb2.Summary.Value(tag=tag, simple_value=simple_value)\n summary = summary_pb2.Summary(value=[value])\n self.add_summary(summary, global_step=step)\n\n def add_event(self, event):\n if isinstance(event, event_pb2.Event):\n tf_event = tf.compat.v1.Event.FromString(event.SerializeToString())\n else:\n logger.warn(\n \"Added TensorFlow event proto. \"\n \"Please prefer TensorBoard copy of the proto\"\n )\n tf_event = event\n super(FileWriter, self).add_event(tf_event)\n\n def add_summary(self, summary, global_step=None):\n if isinstance(summary, summary_pb2.Summary):\n tf_summary = tf.compat.v1.Summary.FromString(\n summary.SerializeToString()\n )\n else:\n logger.warn(\n \"Added TensorFlow summary proto. \"\n \"Please prefer TensorBoard copy of the proto\"\n )\n tf_summary = summary\n super(FileWriter, self).add_summary(tf_summary, global_step)\n\n def add_session_log(self, session_log, global_step=None):\n if isinstance(session_log, event_pb2.SessionLog):\n tf_session_log = tf.compat.v1.SessionLog.FromString(\n session_log.SerializeToString()\n )\n else:\n logger.warn(\n \"Added TensorFlow session_log proto. \"\n \"Please prefer TensorBoard copy of the proto\"\n )\n tf_session_log = session_log\n super(FileWriter, self).add_session_log(tf_session_log, global_step)\n\n def add_graph(self, graph, global_step=None, graph_def=None):\n if isinstance(graph_def, graph_pb2.GraphDef):\n tf_graph_def = tf.compat.v1.GraphDef.FromString(\n graph_def.SerializeToString()\n )\n else:\n tf_graph_def = graph_def\n\n super(FileWriter, self).add_graph(\n graph, global_step=global_step, graph_def=tf_graph_def\n )\n\n def add_meta_graph(self, meta_graph_def, global_step=None):\n if isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):\n tf_meta_graph_def = tf.compat.v1.MetaGraphDef.FromString(\n meta_graph_def.SerializeToString()\n )\n else:\n tf_meta_graph_def = meta_graph_def\n\n super(FileWriter, self).add_meta_graph(\n meta_graph_def=tf_meta_graph_def, global_step=global_step\n )\n\n\nclass FileWriterCache(object):\n \"\"\"Cache for TensorBoard test file writers.\"\"\"\n\n # Cache, keyed by directory.\n _cache = {}\n\n # Lock protecting _FILE_WRITERS.\n _lock = threading.RLock()\n\n @staticmethod\n def get(logdir):\n \"\"\"Returns the FileWriter for the specified directory.\n\n Args:\n logdir: str, name of the directory.\n\n Returns:\n A `FileWriter`.\n \"\"\"\n with FileWriterCache._lock:\n if logdir not in FileWriterCache._cache:\n FileWriterCache._cache[logdir] = FileWriter(\n logdir, graph=tf.compat.v1.get_default_graph()\n )\n return FileWriterCache._cache[logdir]\n\n\nclass FakeTime(object):\n \"\"\"Thread-safe fake replacement for the `time` module.\"\"\"\n\n def __init__(self, current=0.0):\n self._time = float(current)\n self._lock = threading.Lock()\n\n def time(self):\n with self._lock:\n return self._time\n\n def sleep(self, secs):\n with self._lock:\n self._time += secs\n\n\ndef ensure_tb_summary_proto(summary):\n \"\"\"Ensures summary is TensorBoard Summary proto.\n\n TB v1 summary API returns TF Summary proto. To make test for v1 and\n v2 API congruent, one can use this API to convert result of v1 API\n to TB Summary proto.\n \"\"\"\n if isinstance(summary, summary_pb2.Summary):\n return summary\n\n return summary_pb2.Summary.FromString(summary.SerializeToString())\n\n\ndef _run_conditionally(guard, name, default_reason=None):\n \"\"\"Create a decorator factory that skips a test when guard returns False.\n\n The factory raises ValueError when default_reason is None and reason is not\n passed to the factory.\n\n Args:\n guard: A lambda that returns True if a test should be executed.\n name: A human readable name for the decorator for an error message.\n default_reason: A string describing why a test should be skipped. If it\n is None, the decorator will make sure the reason is supplied by the\n consumer of the decorator. Default is None.\n\n Raises:\n ValueError when both reason and default_reason are None.\n\n Returns:\n A function that returns a decorator.\n \"\"\"\n\n def _impl(reason=None):\n if reason is None:\n if default_reason is None:\n raise ValueError(\"%s requires a reason for skipping.\" % name)\n reason = default_reason\n return unittest.skipUnless(guard(), reason)\n\n return _impl\n\n\nrun_v1_only = _run_conditionally(\n lambda: not tensorflow_python_tf2.enabled(), name=\"run_v1_only\"\n)\nrun_v2_only = _run_conditionally(\n lambda: tensorflow_python_tf2.enabled(),\n name=\"run_v2_only\",\n default_reason=\"Test only appropriate for TensorFlow v2\",\n)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests end-to-end debugger data server behavior by starting TensorBoard.\n\nThis test launches an instance of TensorBoard as a subprocess. In turn,\nTensorBoard (specifically its debugger plugin) starts a debugger data\nserver. The test then calls Session.run() using RunOptions pointing to\nthe grpc:// debug URL of the debugger data server. It then checks the\ncorrectness of the Event proto file created by the debugger data server.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport glob\nimport os\nimport shutil\nimport tempfile\nimport threading\nimport time\n\nimport numpy as np\nimport portpicker # pylint: disable=import-error\nimport tensorflow.compat.v1 as tf # pylint: disable=wrong-import-order\nfrom tensorflow.python import (\n debug as tf_debug,\n) # pylint: disable=wrong-import-order\n\nfrom tensorboard.plugins.debugger import constants\nfrom tensorboard.plugins.debugger import debugger_server_lib\nfrom tensorboard.util import test_util\n\n# These unit tests for Debugger Plugin V1 are tied to TF1.x behavior\n# (`tf.Session`s).\ntf.disable_v2_behavior()\n\n\nclass SessionDebugTestBase(tf.test.TestCase):\n def setUp(self):\n self._debugger_data_server_grpc_port = portpicker.pick_unused_port()\n self._debug_url = (\n \"grpc://localhost:%d\" % self._debugger_data_server_grpc_port\n )\n self._logdir = tempfile.mkdtemp(prefix=\"tensorboard_dds_\")\n\n self._debug_data_server = debugger_server_lib.DebuggerDataServer(\n self._debugger_data_server_grpc_port,\n self._logdir,\n always_flush=True,\n )\n self._server_thread = threading.Thread(\n target=self._debug_data_server.start_the_debugger_data_receiving_server\n )\n self._server_thread.start()\n\n self.assertTrue(self._poll_server_till_success(50, 0.2))\n\n def tearDown(self):\n self._debug_data_server.stop_server()\n self._server_thread.join()\n\n if os.path.isdir(self._logdir):\n shutil.rmtree(self._logdir)\n\n tf.reset_default_graph()\n\n def _poll_server_till_success(self, max_tries, poll_interval_seconds):\n for _ in range(max_tries):\n try:\n with tf.Session() as sess:\n a_init_val = np.array([42.0])\n a_init = tf.constant(a_init_val, shape=[1], name=\"a_init\")\n a = tf.Variable(a_init, name=\"a\")\n\n run_options = tf.RunOptions(output_partition_graphs=True)\n tf_debug.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary\"],\n debug_urls=[self._debug_url],\n )\n\n sess.run(a.initializer, options=run_options)\n return True\n except tf.errors.FailedPreconditionError as exc:\n time.sleep(poll_interval_seconds)\n\n return False\n\n def _compute_health_pill(self, x):\n x_clean = x[\n np.where(\n np.logical_and(\n np.logical_not(np.isnan(x)), np.logical_not(np.isinf(x))\n )\n )\n ]\n if np.size(x_clean):\n x_min = np.min(x_clean)\n x_max = np.max(x_clean)\n x_mean = np.mean(x_clean)\n x_var = np.var(x_clean)\n else:\n x_min = np.inf\n x_max = -np.inf\n x_mean = np.nan\n x_var = np.nan\n\n return np.array(\n [\n 1.0, # Assume is initialized.\n np.size(x),\n np.sum(np.isnan(x)),\n np.sum(x == -np.inf),\n np.sum(np.logical_and(x < 0.0, x != -np.inf)),\n np.sum(x == 0.0),\n np.sum(np.logical_and(x > 0.0, x != np.inf)),\n np.sum(x == np.inf),\n x_min,\n x_max,\n x_mean,\n x_var,\n float(tf.as_dtype(x.dtype).as_datatype_enum),\n float(len(x.shape)),\n ]\n + list(x.shape)\n )\n\n def _check_health_pills_in_events_file(\n self, events_file_path, debug_key_to_tensors\n ):\n reader = tf.python_io.tf_record_iterator(events_file_path)\n event_read = tf.Event()\n\n # The first event in the file should contain the events version, which is\n # important because without it, TensorBoard may purge health pill events.\n event_read.ParseFromString(next(reader))\n self.assertEqual(\"brain.Event:2\", event_read.file_version)\n\n health_pills = {}\n while True:\n next_event = next(reader, None)\n if not next_event:\n break\n event_read.ParseFromString(next_event)\n values = event_read.summary.value\n if values:\n if (\n values[0].metadata.plugin_data.plugin_name\n == constants.DEBUGGER_PLUGIN_NAME\n ):\n debug_key = values[0].node_name\n if debug_key not in health_pills:\n health_pills[debug_key] = [\n tf_debug.load_tensor_from_event(event_read)\n ]\n else:\n health_pills[debug_key].append(\n tf_debug.load_tensor_from_event(event_read)\n )\n\n for debug_key in debug_key_to_tensors:\n tensors = debug_key_to_tensors[debug_key]\n for i, tensor in enumerate(tensors):\n self.assertAllClose(\n self._compute_health_pill(tensor),\n health_pills[debug_key][i],\n )\n\n def testRunSimpleNetworkoWithInfAndNaNWorks(self):\n with tf.Session() as sess:\n x_init_val = np.array([[2.0], [-1.0]])\n y_init_val = np.array([[0.0], [-0.25]])\n z_init_val = np.array([[0.0, 3.0], [-1.0, 0.0]])\n\n x_init = tf.constant(x_init_val, shape=[2, 1], name=\"x_init\")\n x = tf.Variable(x_init, name=\"x\")\n y_init = tf.constant(y_init_val, shape=[2, 1])\n y = tf.Variable(y_init, name=\"y\")\n z_init = tf.constant(z_init_val, shape=[2, 2])\n z = tf.Variable(z_init, name=\"z\")\n\n u = tf.div(x, y, name=\"u\") # Produces an Inf.\n v = tf.matmul(z, u, name=\"v\") # Produces NaN and Inf.\n\n sess.run(x.initializer)\n sess.run(y.initializer)\n sess.run(z.initializer)\n\n run_options = tf.RunOptions(output_partition_graphs=True)\n tf_debug.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary\"],\n debug_urls=[self._debug_url],\n )\n\n result = sess.run(v, options=run_options)\n self.assertTrue(np.isnan(result[0, 0]))\n self.assertEqual(-np.inf, result[1, 0])\n\n # Debugger data is stored within a special directory within logdir.\n event_files = glob.glob(\n os.path.join(\n self._logdir,\n constants.DEBUGGER_DATA_DIRECTORY_NAME,\n \"events.debugger*\",\n )\n )\n self.assertEqual(1, len(event_files))\n\n self._check_health_pills_in_events_file(\n event_files[0],\n {\n \"x:0:DebugNumericSummary\": [x_init_val],\n \"y:0:DebugNumericSummary\": [y_init_val],\n \"z:0:DebugNumericSummary\": [z_init_val],\n \"u:0:DebugNumericSummary\": [x_init_val / y_init_val],\n \"v:0:DebugNumericSummary\": [\n np.matmul(z_init_val, x_init_val / y_init_val)\n ],\n },\n )\n\n report = self._debug_data_server.numerics_alert_report()\n self.assertEqual(2, len(report))\n self.assertTrue(report[0].device_name.lower().endswith(\"cpu:0\"))\n self.assertEqual(\"u:0\", report[0].tensor_name)\n self.assertGreater(report[0].first_timestamp, 0)\n self.assertEqual(0, report[0].nan_event_count)\n self.assertEqual(0, report[0].neg_inf_event_count)\n self.assertEqual(1, report[0].pos_inf_event_count)\n self.assertTrue(report[1].device_name.lower().endswith(\"cpu:0\"))\n self.assertEqual(\"u:0\", report[0].tensor_name)\n self.assertGreaterEqual(\n report[1].first_timestamp, report[0].first_timestamp\n )\n self.assertEqual(1, report[1].nan_event_count)\n self.assertEqual(1, report[1].neg_inf_event_count)\n self.assertEqual(0, report[1].pos_inf_event_count)\n\n def testMultipleInt32ValuesOverMultipleRunsAreRecorded(self):\n with tf.Session() as sess:\n x_init_val = np.array([10], dtype=np.int32)\n x_init = tf.constant(x_init_val, shape=[1], name=\"x_init\")\n x = tf.Variable(x_init, name=\"x\")\n\n x_inc_val = np.array([2], dtype=np.int32)\n x_inc = tf.constant(x_inc_val, name=\"x_inc\")\n inc_x = tf.assign_add(x, x_inc, name=\"inc_x\")\n\n sess.run(x.initializer)\n\n run_options = tf.RunOptions(output_partition_graphs=True)\n tf_debug.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary\"],\n debug_urls=[self._debug_url],\n )\n\n # Increase three times.\n for _ in range(3):\n sess.run(inc_x, options=run_options)\n\n # Debugger data is stored within a special directory within logdir.\n event_files = glob.glob(\n os.path.join(\n self._logdir,\n constants.DEBUGGER_DATA_DIRECTORY_NAME,\n \"events.debugger*\",\n )\n )\n self.assertEqual(1, len(event_files))\n\n self._check_health_pills_in_events_file(\n event_files[0],\n {\n \"x_inc:0:DebugNumericSummary\": [x_inc_val] * 3,\n \"x:0:DebugNumericSummary\": [\n x_init_val,\n x_init_val + x_inc_val,\n x_init_val + 2 * x_inc_val,\n ],\n },\n )\n\n def testConcurrentNumericsAlertsAreRegisteredCorrectly(self):\n num_threads = 3\n num_runs_per_thread = 2\n total_num_runs = num_threads * num_runs_per_thread\n\n # Before any Session runs, the report ought to be empty.\n self.assertEqual([], self._debug_data_server.numerics_alert_report())\n\n with tf.Session() as sess:\n x_init_val = np.array([[2.0], [-1.0]])\n y_init_val = np.array([[0.0], [-0.25]])\n z_init_val = np.array([[0.0, 3.0], [-1.0, 0.0]])\n\n x_init = tf.constant(x_init_val, shape=[2, 1], name=\"x_init\")\n x = tf.Variable(x_init, name=\"x\")\n y_init = tf.constant(y_init_val, shape=[2, 1])\n y = tf.Variable(y_init, name=\"y\")\n z_init = tf.constant(z_init_val, shape=[2, 2])\n z = tf.Variable(z_init, name=\"z\")\n\n u = tf.div(x, y, name=\"u\") # Produces an Inf.\n v = tf.matmul(z, u, name=\"v\") # Produces NaN and Inf.\n\n sess.run(x.initializer)\n sess.run(y.initializer)\n sess.run(z.initializer)\n\n run_options_list = []\n for i in range(num_threads):\n run_options = tf.RunOptions(output_partition_graphs=True)\n # Use different grpc:// URL paths so that each thread opens a separate\n # gRPC stream to the debug data server, simulating multi-worker setting.\n tf_debug.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugNumericSummary\"],\n debug_urls=[self._debug_url + \"/thread%d\" % i],\n )\n run_options_list.append(run_options)\n\n def run_v(thread_id):\n for _ in range(num_runs_per_thread):\n sess.run(v, options=run_options_list[thread_id])\n\n run_threads = []\n for thread_id in range(num_threads):\n thread = threading.Thread(\n target=functools.partial(run_v, thread_id)\n )\n thread.start()\n run_threads.append(thread)\n\n for thread in run_threads:\n thread.join()\n\n report = self._debug_data_server.numerics_alert_report()\n self.assertEqual(2, len(report))\n self.assertTrue(report[0].device_name.lower().endswith(\"cpu:0\"))\n self.assertEqual(\"u:0\", report[0].tensor_name)\n self.assertGreater(report[0].first_timestamp, 0)\n self.assertEqual(0, report[0].nan_event_count)\n self.assertEqual(0, report[0].neg_inf_event_count)\n self.assertEqual(total_num_runs, report[0].pos_inf_event_count)\n self.assertTrue(report[1].device_name.lower().endswith(\"cpu:0\"))\n self.assertEqual(\"u:0\", report[0].tensor_name)\n self.assertGreaterEqual(\n report[1].first_timestamp, report[0].first_timestamp\n )\n self.assertEqual(total_num_runs, report[1].nan_event_count)\n self.assertEqual(total_num_runs, report[1].neg_inf_event_count)\n self.assertEqual(0, report[1].pos_inf_event_count)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the debugger data server, which receives and writes debugger\nevents.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\n\nimport tensorflow as tf\n\n# pylint: disable=ungrouped-imports, wrong-import-order\nfrom google.protobuf import json_format\nfrom tensorflow.core.debug import debugger_event_metadata_pb2\n\nfrom tensorboard.compat.proto import event_pb2\nfrom tensorboard.plugins.debugger import constants\nfrom tensorboard.plugins.debugger import debugger_server_lib\nfrom tensorboard.plugins.debugger import numerics_alert\nfrom tensorboard.util import tensor_util\n\n# pylint: enable=ungrouped-imports, wrong-import-order\n\n\nclass FakeEventsWriterManager(object):\n \"\"\"An events writer manager that tracks events that would be written.\n\n During normal usage, the debugger data server would write events to\n disk. Unfortunately, this test cannot depend on TensorFlow's record\n reader due to GRPC library conflicts (b/35006065). Hence, we use a\n fake EventsWriter that keeps track of events that would be written\n to disk.\n \"\"\"\n\n def __init__(self, events_output_list):\n \"\"\"Constructs a fake events writer, which appends events to a list.\n\n Args:\n events_output_list: The list to append events that would be written to\n disk.\n \"\"\"\n self.events_written = events_output_list\n\n def dispose(self):\n \"\"\"Does nothing.\n\n This implementation creates no file.\n \"\"\"\n\n def write_event(self, event):\n \"\"\"Pretends to write an event to disk.\n\n Args:\n event: The event proto.\n \"\"\"\n self.events_written.append(event)\n\n\nclass DebuggerDataServerTest(tf.test.TestCase):\n def setUp(self):\n self.events_written = []\n\n events_writer_manager = FakeEventsWriterManager(self.events_written)\n self.stream_handler = debugger_server_lib.DebuggerDataStreamHandler(\n events_writer_manager=events_writer_manager\n )\n self.stream_handler.on_core_metadata_event(event_pb2.Event())\n\n def tearDown(self):\n tf.compat.v1.test.mock.patch.stopall()\n\n def _create_event_with_float_tensor(\n self, node_name, output_slot, debug_op, list_of_values\n ):\n \"\"\"Creates event with float64 (double) tensors.\n\n Args:\n node_name: The string name of the op. This lacks both the output slot as\n well as the name of the debug op.\n output_slot: The number that is the output slot.\n debug_op: The name of the debug op to use.\n list_of_values: A python list of values within the tensor.\n Returns:\n A `Event` with a summary containing that node name and a float64\n tensor with those values.\n \"\"\"\n event = event_pb2.Event()\n value = event.summary.value.add(\n tag=node_name,\n node_name=\"%s:%d:%s\" % (node_name, output_slot, debug_op),\n tensor=tensor_util.make_tensor_proto(\n list_of_values, dtype=tf.float64, shape=[len(list_of_values)]\n ),\n )\n plugin_content = debugger_event_metadata_pb2.DebuggerEventMetadata(\n device=\"/job:localhost/replica:0/task:0/cpu:0\",\n output_slot=output_slot,\n )\n value.metadata.plugin_data.plugin_name = constants.DEBUGGER_PLUGIN_NAME\n value.metadata.plugin_data.content = tf.compat.as_bytes(\n json_format.MessageToJson(\n plugin_content, including_default_value_fields=True\n )\n )\n return event\n\n def _verify_event_lists_have_same_tensor_values(self, expected, gotten):\n \"\"\"Checks that two lists of events have the same tensor values.\n\n Args:\n expected: The expected list of events.\n gotten: The list of events we actually got.\n \"\"\"\n self.assertEqual(len(expected), len(gotten))\n\n # Compare the events one at a time.\n for expected_event, gotten_event in zip(expected, gotten):\n self.assertEqual(\n expected_event.summary.value[0].node_name,\n gotten_event.summary.value[0].node_name,\n )\n self.assertAllClose(\n tensor_util.make_ndarray(\n expected_event.summary.value[0].tensor\n ),\n tensor_util.make_ndarray(gotten_event.summary.value[0].tensor),\n )\n self.assertEqual(\n expected_event.summary.value[0].tag,\n gotten_event.summary.value[0].tag,\n )\n\n def testOnValueEventWritesHealthPill(self):\n \"\"\"Tests that the stream handler writes health pills in order.\"\"\"\n # The debugger stream handler receives 2 health pill events.\n received_events = [\n self._create_event_with_float_tensor(\n \"MatMul\", 0, \"DebugNumericSummary\", list(range(1, 15))\n ),\n self._create_event_with_float_tensor(\n \"add\", 0, \"DebugNumericSummary\", [x * x for x in range(1, 15)]\n ),\n self._create_event_with_float_tensor(\n \"MatMul\",\n 0,\n \"DebugNumericSummary\",\n [x + 42 for x in range(1, 15)],\n ),\n ]\n\n for event in received_events:\n self.stream_handler.on_value_event(event)\n\n # Verify that the stream handler wrote them to disk in order.\n self._verify_event_lists_have_same_tensor_values(\n received_events, self.events_written\n )\n\n def testOnValueEventIgnoresIrrelevantOps(self):\n \"\"\"Tests that non-DebugNumericSummary ops are ignored.\"\"\"\n # Receive a DebugNumericSummary event.\n numeric_summary_event = self._create_event_with_float_tensor(\n \"MatMul\", 42, \"DebugNumericSummary\", list(range(1, 15))\n )\n self.stream_handler.on_value_event(numeric_summary_event)\n\n # Receive a non-DebugNumericSummary event.\n self.stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"add\", 0, \"DebugIdentity\", list(range(1, 15))\n )\n )\n\n # The stream handler should have only written the DebugNumericSummary event\n # to disk.\n self._verify_event_lists_have_same_tensor_values(\n [numeric_summary_event], self.events_written\n )\n\n def testCorrectStepIsWritten(self):\n events_written = []\n metadata_event = event_pb2.Event()\n metadata_event.log_message.message = json.dumps(\n {\"session_run_index\": 42}\n )\n stream_handler = debugger_server_lib.DebuggerDataStreamHandler(\n events_writer_manager=FakeEventsWriterManager(events_written)\n )\n stream_handler.on_core_metadata_event(metadata_event)\n\n # The server receives 2 events. It should assign both the correct step.\n stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"MatMul\", 0, \"DebugNumericSummary\", list(range(1, 15))\n )\n )\n stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"Add\", 0, \"DebugNumericSummary\", list(range(2, 16))\n )\n )\n self.assertEqual(42, events_written[0].step)\n self.assertEqual(42, events_written[1].step)\n\n def testSentinelStepValueAssignedWhenExecutorStepCountKeyIsMissing(self):\n events_written = []\n metadata_event = event_pb2.Event()\n metadata_event.log_message.message = json.dumps({})\n stream_handler = debugger_server_lib.DebuggerDataStreamHandler(\n events_writer_manager=FakeEventsWriterManager(events_written)\n )\n stream_handler.on_core_metadata_event(metadata_event)\n health_pill_event = self._create_event_with_float_tensor(\n \"MatMul\", 0, \"DebugNumericSummary\", list(range(1, 15))\n )\n stream_handler.on_value_event(health_pill_event)\n self.assertGreater(events_written[0].step, 0)\n\n def testSentinelStepValueAssignedWhenMetadataJsonIsInvalid(self):\n events_written = []\n metadata_event = event_pb2.Event()\n metadata_event.log_message.message = \"some invalid JSON string\"\n stream_handler = debugger_server_lib.DebuggerDataStreamHandler(\n events_writer_manager=FakeEventsWriterManager(events_written)\n )\n stream_handler.on_core_metadata_event(metadata_event)\n health_pill_event = self._create_event_with_float_tensor(\n \"MatMul\", 0, \"DebugNumericSummary\", list(range(1, 15))\n )\n stream_handler.on_value_event(health_pill_event)\n self.assertGreater(events_written[0].step, 0)\n\n def testAlertingEventCallback(self):\n numerics_alert_callback = tf.compat.v1.test.mock.Mock()\n stream_handler = debugger_server_lib.DebuggerDataStreamHandler(\n events_writer_manager=FakeEventsWriterManager(self.events_written),\n numerics_alert_callback=numerics_alert_callback,\n )\n stream_handler.on_core_metadata_event(event_pb2.Event())\n\n # The stream handler receives 1 good event and 1 with an NaN value.\n stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"Add\", 0, \"DebugNumericSummary\", [0] * 14\n )\n )\n stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"Add\",\n 0,\n \"DebugNumericSummary\",\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n )\n )\n\n # The second event should have triggered the callback.\n numerics_alert_callback.assert_called_once_with(\n numerics_alert.NumericsAlert(\n \"/job:localhost/replica:0/task:0/cpu:0\", \"Add:0\", 0, 1, 0, 0\n )\n )\n\n # The stream handler receives an event with a -Inf value.\n numerics_alert_callback.reset_mock()\n stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"Add\",\n 0,\n \"DebugNumericSummary\",\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n )\n )\n numerics_alert_callback.assert_called_once_with(\n numerics_alert.NumericsAlert(\n \"/job:localhost/replica:0/task:0/cpu:0\", \"Add:0\", 0, 0, 1, 0\n )\n )\n\n # The stream handler receives an event with a +Inf value.\n numerics_alert_callback.reset_mock()\n stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"Add\",\n 0,\n \"DebugNumericSummary\",\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n )\n )\n numerics_alert_callback.assert_called_once_with(\n numerics_alert.NumericsAlert(\n \"/job:localhost/replica:0/task:0/cpu:0\", \"Add:0\", 0, 0, 0, 1\n )\n )\n\n # The stream handler receives an event without any pathetic values.\n numerics_alert_callback.reset_mock()\n stream_handler.on_value_event(\n self._create_event_with_float_tensor(\n \"Add\",\n 0,\n \"DebugNumericSummary\",\n [0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0],\n )\n )\n # assert_not_called is not available in Python 3.4.\n self.assertFalse(numerics_alert_callback.called)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import deque\nfrom math import floor, sqrt\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorboard.plugins.beholder import im_util\nfrom tensorboard.plugins.beholder.shared_config import (\n SECTION_HEIGHT,\n IMAGE_WIDTH,\n DEFAULT_CONFIG,\n SECTION_INFO_FILENAME,\n)\nfrom tensorboard.plugins.beholder.file_system_tools import write_pickle\n\nMIN_SQUARE_SIZE = 3\n\n\nclass Visualizer(object):\n def __init__(self, logdir):\n self.logdir = logdir\n self.sections_over_time = deque([], DEFAULT_CONFIG[\"window_size\"])\n self.config = dict(DEFAULT_CONFIG)\n self.old_config = dict(DEFAULT_CONFIG)\n\n def _reshape_conv_array(self, array, section_height, image_width):\n \"\"\"Reshape a rank 4 array to be rank 2, where each column of\n block_width is a filter, and each row of block height is an input\n channel. For example:\n\n [[[[ 11, 21, 31, 41],\n [ 51, 61, 71, 81],\n [ 91, 101, 111, 121]],\n [[ 12, 22, 32, 42],\n [ 52, 62, 72, 82],\n [ 92, 102, 112, 122]],\n [[ 13, 23, 33, 43],\n [ 53, 63, 73, 83],\n [ 93, 103, 113, 123]]],\n [[[ 14, 24, 34, 44],\n [ 54, 64, 74, 84],\n [ 94, 104, 114, 124]],\n [[ 15, 25, 35, 45],\n [ 55, 65, 75, 85],\n [ 95, 105, 115, 125]],\n [[ 16, 26, 36, 46],\n [ 56, 66, 76, 86],\n [ 96, 106, 116, 126]]],\n [[[ 17, 27, 37, 47],\n [ 57, 67, 77, 87],\n [ 97, 107, 117, 127]],\n [[ 18, 28, 38, 48],\n [ 58, 68, 78, 88],\n [ 98, 108, 118, 128]],\n [[ 19, 29, 39, 49],\n [ 59, 69, 79, 89],\n [ 99, 109, 119, 129]]]]\n\n should be reshaped to:\n\n [[ 11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43],\n [ 14, 15, 16, 24, 25, 26, 34, 35, 36, 44, 45, 46],\n [ 17, 18, 19, 27, 28, 29, 37, 38, 39, 47, 48, 49],\n [ 51, 52, 53, 61, 62, 63, 71, 72, 73, 81, 82, 83],\n [ 54, 55, 56, 64, 65, 66, 74, 75, 76, 84, 85, 86],\n [ 57, 58, 59, 67, 68, 69, 77, 78, 79, 87, 88, 89],\n [ 91, 92, 93, 101, 102, 103, 111, 112, 113, 121, 122, 123],\n [ 94, 95, 96, 104, 105, 106, 114, 115, 116, 124, 125, 126],\n [ 97, 98, 99, 107, 108, 109, 117, 118, 119, 127, 128, 129]]\n \"\"\"\n\n # E.g. [100, 24, 24, 10]: this shouldn't be reshaped like normal.\n if (\n array.shape[1] == array.shape[2]\n and array.shape[0] != array.shape[1]\n ):\n array = np.rollaxis(np.rollaxis(array, 2), 2)\n\n block_height, block_width, in_channels = array.shape[:3]\n rows = []\n\n max_element_count = section_height * int(image_width / MIN_SQUARE_SIZE)\n element_count = 0\n\n for i in range(in_channels):\n rows.append(array[:, :, i, :].reshape(block_height, -1, order=\"F\"))\n\n # This line should be left in this position. Gives it one extra row.\n if (\n element_count >= max_element_count\n and not self.config[\"show_all\"]\n ):\n break\n\n element_count += block_height * in_channels * block_width\n\n return np.vstack(rows)\n\n def _reshape_irregular_array(self, array, section_height, image_width):\n \"\"\"Reshapes arrays of ranks not in {1, 2, 4}\"\"\"\n section_area = section_height * image_width\n flattened_array = np.ravel(array)\n\n if not self.config[\"show_all\"]:\n flattened_array = flattened_array[\n : int(section_area / MIN_SQUARE_SIZE)\n ]\n\n cell_count = np.prod(flattened_array.shape)\n cell_area = section_area / cell_count\n\n cell_side_length = max(1, floor(sqrt(cell_area)))\n row_count = max(1, int(section_height / cell_side_length))\n col_count = int(cell_count / row_count)\n\n # Reshape the truncated array so that it has the same aspect ratio as\n # the section.\n\n # Truncate whatever remaining values there are that don't fit. Hopefully\n # it doesn't matter that the last few (< section count) aren't there.\n section = np.reshape(\n flattened_array[: row_count * col_count], (row_count, col_count)\n )\n\n return section\n\n def _determine_image_width(self, arrays, show_all):\n final_width = IMAGE_WIDTH\n\n if show_all:\n for array in arrays:\n rank = len(array.shape)\n\n if rank == 1:\n width = len(array)\n elif rank == 2:\n width = array.shape[1]\n elif rank == 4:\n width = array.shape[1] * array.shape[3]\n else:\n width = IMAGE_WIDTH\n\n if width > final_width:\n final_width = width\n\n return final_width\n\n def _determine_section_height(self, array, show_all):\n rank = len(array.shape)\n height = SECTION_HEIGHT\n\n if show_all:\n if rank == 1:\n height = SECTION_HEIGHT\n if rank == 2:\n height = max(SECTION_HEIGHT, array.shape[0])\n elif rank == 4:\n height = max(SECTION_HEIGHT, array.shape[0] * array.shape[2])\n else:\n height = max(\n SECTION_HEIGHT, np.prod(array.shape) // IMAGE_WIDTH\n )\n\n return height\n\n def _arrays_to_sections(self, arrays):\n \"\"\"\n input: unprocessed numpy arrays.\n returns: columns of the size that they will appear in the image, not scaled\n for display. That needs to wait until after variance is computed.\n \"\"\"\n sections = []\n sections_to_resize_later = {}\n show_all = self.config[\"show_all\"]\n image_width = self._determine_image_width(arrays, show_all)\n\n for array_number, array in enumerate(arrays):\n rank = len(array.shape)\n section_height = self._determine_section_height(array, show_all)\n\n if rank == 1:\n section = np.atleast_2d(array)\n elif rank == 2:\n section = array\n elif rank == 4:\n section = self._reshape_conv_array(\n array, section_height, image_width\n )\n else:\n section = self._reshape_irregular_array(\n array, section_height, image_width\n )\n # Only calculate variance for what we have to. In some cases (biases),\n # the section is larger than the array, so we don't want to calculate\n # variance for the same value over and over - better to resize later.\n # About a 6-7x speedup for a big network with a big variance window.\n section_size = section_height * image_width\n array_size = np.prod(array.shape)\n\n if section_size > array_size:\n sections.append(section)\n sections_to_resize_later[array_number] = section_height\n else:\n sections.append(\n im_util.resize(section, section_height, image_width)\n )\n\n self.sections_over_time.append(sections)\n\n if self.config[\"mode\"] == \"variance\":\n sections = self._sections_to_variance_sections(\n self.sections_over_time\n )\n\n for array_number, height in sections_to_resize_later.items():\n sections[array_number] = im_util.resize(\n sections[array_number], height, image_width\n )\n return sections\n\n def _sections_to_variance_sections(self, sections_over_time):\n \"\"\"Computes the variance of corresponding sections over time.\n\n Returns:\n a list of np arrays.\n \"\"\"\n variance_sections = []\n\n for i in range(len(sections_over_time[0])):\n time_sections = [sections[i] for sections in sections_over_time]\n variance = np.var(time_sections, axis=0)\n variance_sections.append(variance)\n\n return variance_sections\n\n def _sections_to_image(self, sections):\n padding_size = 5\n\n sections = im_util.scale_sections(sections, self.config[\"scaling\"])\n\n final_stack = [sections[0]]\n padding = np.zeros((padding_size, sections[0].shape[1]))\n\n for section in sections[1:]:\n final_stack.append(padding)\n final_stack.append(section)\n\n return np.vstack(final_stack).astype(np.uint8)\n\n def _maybe_clear_deque(self):\n \"\"\"Clears the deque if certain parts of the config have changed.\"\"\"\n\n for config_item in [\"values\", \"mode\", \"show_all\"]:\n if self.config[config_item] != self.old_config[config_item]:\n self.sections_over_time.clear()\n break\n\n self.old_config = self.config\n\n window_size = self.config[\"window_size\"]\n if window_size != self.sections_over_time.maxlen:\n self.sections_over_time = deque(\n self.sections_over_time, window_size\n )\n\n def _save_section_info(self, arrays, sections):\n infos = []\n\n if self.config[\"values\"] == \"trainable_variables\":\n names = [x.name for x in tf.compat.v1.trainable_variables()]\n else:\n names = range(len(arrays))\n\n for array, section, name in zip(arrays, sections, names):\n info = {}\n\n info[\"name\"] = name\n info[\"shape\"] = str(array.shape)\n info[\"min\"] = \"{:.3e}\".format(section.min())\n info[\"mean\"] = \"{:.3e}\".format(section.mean())\n info[\"max\"] = \"{:.3e}\".format(section.max())\n info[\"range\"] = \"{:.3e}\".format(section.max() - section.min())\n info[\"height\"] = section.shape[0]\n\n infos.append(info)\n\n write_pickle(infos, \"{}/{}\".format(self.logdir, SECTION_INFO_FILENAME))\n\n def build_frame(self, arrays):\n self._maybe_clear_deque()\n\n arrays = arrays if isinstance(arrays, list) else [arrays]\n\n sections = self._arrays_to_sections(arrays)\n self._save_section_info(arrays, sections)\n final_image = self._sections_to_image(sections)\n final_image = im_util.apply_colormap(\n final_image, self.config[\"colormap\"]\n )\n\n return final_image\n\n def update(self, config):\n self.config = config\n",
"# -*- coding: utf-8 -*-\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the op that generates pr_curve summaries.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorboard.compat.proto import summary_pb2\nfrom tensorboard.plugins.pr_curve import metadata\nfrom tensorboard.plugins.pr_curve import summary\nfrom tensorboard.util import tensor_util\nfrom tensorboard.util import test_util\n\ntf.compat.v1.disable_v2_behavior()\n\n\nclass PrCurveTest(tf.test.TestCase):\n def setUp(self):\n super(PrCurveTest, self).setUp()\n tf.compat.v1.reset_default_graph()\n np.random.seed(42)\n\n def pb_via_op(self, summary_op, feed_dict=None):\n with tf.compat.v1.Session() as sess:\n actual_pbtxt = sess.run(summary_op, feed_dict=feed_dict or {})\n actual_proto = summary_pb2.Summary()\n actual_proto.ParseFromString(actual_pbtxt)\n return actual_proto\n\n def normalize_summary_pb(self, pb):\n \"\"\"Pass `pb`'s `TensorProto` through a marshalling roundtrip.\n\n `TensorProto`s can be equal in value even if they are not\n identical in representation, because data can be stored in\n either the `tensor_content` field or the `${dtype}_value` field.\n This normalization ensures a canonical form, and should be used\n before comparing two `Summary`s for equality.\n \"\"\"\n result = summary_pb2.Summary()\n if not isinstance(pb, summary_pb2.Summary):\n # pb can come from `pb_via_op` which creates a TB Summary.\n pb = test_util.ensure_tb_summary_proto(pb)\n result.MergeFrom(pb)\n for value in result.value:\n if value.HasField(\"tensor\"):\n new_tensor = tensor_util.make_tensor_proto(\n tensor_util.make_ndarray(value.tensor)\n )\n value.ClearField(\"tensor\")\n value.tensor.MergeFrom(new_tensor)\n return result\n\n def compute_and_check_summary_pb(\n self,\n name,\n labels,\n predictions,\n num_thresholds,\n weights=None,\n display_name=None,\n description=None,\n feed_dict=None,\n ):\n \"\"\"Use both `op` and `pb` to get a summary, asserting equality.\n\n Returns:\n a `Summary` protocol buffer\n \"\"\"\n labels_tensor = tf.constant(labels)\n predictions_tensor = tf.constant(predictions)\n weights_tensor = None if weights is None else tf.constant(weights)\n op = summary.op(\n name=name,\n labels=labels_tensor,\n predictions=predictions_tensor,\n num_thresholds=num_thresholds,\n weights=weights_tensor,\n display_name=display_name,\n description=description,\n )\n pb = self.normalize_summary_pb(\n summary.pb(\n name=name,\n labels=labels,\n predictions=predictions,\n num_thresholds=num_thresholds,\n weights=weights,\n display_name=display_name,\n description=description,\n )\n )\n pb_via_op = self.normalize_summary_pb(\n self.pb_via_op(op, feed_dict=feed_dict)\n )\n self.assertProtoEquals(pb, pb_via_op)\n return pb\n\n def verify_float_arrays_are_equal(self, expected, actual):\n # We use an absolute error instead of a relative one because the expected\n # values are small. The default relative error (trol) of 1e-7 yields many\n # undesired test failures.\n np.testing.assert_allclose(expected, actual, rtol=0, atol=1e-7)\n\n def test_metadata(self):\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([True]),\n predictions=np.float32([0.42]),\n num_thresholds=3,\n )\n summary_metadata = pb.value[0].metadata\n plugin_data = summary_metadata.plugin_data\n self.assertEqual(\"foo\", summary_metadata.display_name)\n self.assertEqual(\"\", summary_metadata.summary_description)\n self.assertEqual(metadata.PLUGIN_NAME, plugin_data.plugin_name)\n plugin_data = metadata.parse_plugin_metadata(\n summary_metadata.plugin_data.content\n )\n self.assertEqual(3, plugin_data.num_thresholds)\n\n def test_all_true_positives(self):\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([True]),\n predictions=np.float32([1]),\n num_thresholds=3,\n )\n expected = [\n [1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_all_true_negatives(self):\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([False]),\n predictions=np.float32([0]),\n num_thresholds=3,\n )\n expected = [\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_all_false_positives(self):\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([False]),\n predictions=np.float32([1]),\n num_thresholds=3,\n )\n expected = [\n [0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_all_false_negatives(self):\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([True]),\n predictions=np.float32([0]),\n num_thresholds=3,\n )\n expected = [\n [1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 1.0, 1.0],\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_many_values(self):\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([True, False, False, True, True, True]),\n predictions=np.float32([0.2, 0.3, 0.4, 0.6, 0.7, 0.8]),\n num_thresholds=3,\n )\n expected = [\n [4.0, 3.0, 0.0],\n [2.0, 0.0, 0.0],\n [0.0, 2.0, 2.0],\n [0.0, 1.0, 4.0],\n [2.0 / 3.0, 1.0, 0.0],\n [1.0, 0.75, 0.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_many_values_with_weights(self):\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([True, False, False, True, True, True]),\n predictions=np.float32([0.2, 0.3, 0.4, 0.6, 0.7, 0.8]),\n num_thresholds=3,\n weights=np.float32([0.0, 0.5, 2.0, 0.0, 0.5, 1.0]),\n )\n expected = [\n [1.5, 1.5, 0.0],\n [2.5, 0.0, 0.0],\n [0.0, 2.5, 2.5],\n [0.0, 0.0, 1.5],\n [0.375, 1.0, 0.0],\n [1.0, 1.0, 0.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_exhaustive_random_values(self):\n # Most other tests use small and crafted predictions and labels.\n # This test exhaustively generates many data points.\n data_points = 420\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.random.uniform(size=(data_points,)) > 0.5,\n predictions=np.float32(np.random.uniform(size=(data_points,))),\n num_thresholds=5,\n )\n expected = [\n [218.0, 162.0, 111.0, 55.0, 0.0],\n [202.0, 148.0, 98.0, 51.0, 0.0],\n [0.0, 54.0, 104.0, 151.0, 202.0],\n [0.0, 56.0, 107.0, 163.0, 218.0],\n [0.5190476, 0.5225806, 0.5311005, 0.5188679, 0.0],\n [1.0, 0.7431192, 0.5091743, 0.2522936, 0.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_counts_below_1(self):\n \"\"\"Tests support for counts below 1.\n\n Certain weights cause TP, FP, TN, FN counts to be below 1.\n \"\"\"\n pb = self.compute_and_check_summary_pb(\n name=\"foo\",\n labels=np.array([True, False, False, True, True, True]),\n predictions=np.float32([0.2, 0.3, 0.4, 0.6, 0.7, 0.8]),\n num_thresholds=3,\n weights=np.float32([0.0, 0.1, 0.2, 0.1, 0.1, 0.0]),\n )\n expected = [\n [0.2, 0.2, 0.0],\n [0.3, 0.0, 0.0],\n [0.0, 0.3, 0.3],\n [0.0, 0.0, 0.2],\n [0.4, 1.0, 0.0],\n [1.0, 1.0, 0.0],\n ]\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(expected, values)\n\n def test_raw_data(self):\n # We pass these raw counts and precision/recall values.\n name = \"foo\"\n true_positive_counts = [75, 64, 21, 5, 0]\n false_positive_counts = [150, 105, 18, 0, 0]\n true_negative_counts = [0, 45, 132, 150, 150]\n false_negative_counts = [0, 11, 54, 70, 75]\n precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]\n recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]\n num_thresholds = 5\n display_name = \"some_raw_values\"\n description = \"We passed raw values into a summary op.\"\n\n op = summary.raw_data_op(\n name=name,\n true_positive_counts=tf.constant(true_positive_counts),\n false_positive_counts=tf.constant(false_positive_counts),\n true_negative_counts=tf.constant(true_negative_counts),\n false_negative_counts=tf.constant(false_negative_counts),\n precision=tf.constant(precision),\n recall=tf.constant(recall),\n num_thresholds=num_thresholds,\n display_name=display_name,\n description=description,\n )\n pb_via_op = self.normalize_summary_pb(self.pb_via_op(op))\n\n # Call the corresponding method that is decoupled from TensorFlow.\n pb = self.normalize_summary_pb(\n summary.raw_data_pb(\n name=name,\n true_positive_counts=true_positive_counts,\n false_positive_counts=false_positive_counts,\n true_negative_counts=true_negative_counts,\n false_negative_counts=false_negative_counts,\n precision=precision,\n recall=recall,\n num_thresholds=num_thresholds,\n display_name=display_name,\n description=description,\n )\n )\n\n # The 2 methods above should write summaries with the same data.\n self.assertProtoEquals(pb, pb_via_op)\n\n # Test the metadata.\n summary_metadata = pb.value[0].metadata\n self.assertEqual(\"some_raw_values\", summary_metadata.display_name)\n self.assertEqual(\n \"We passed raw values into a summary op.\",\n summary_metadata.summary_description,\n )\n self.assertEqual(\n metadata.PLUGIN_NAME, summary_metadata.plugin_data.plugin_name\n )\n\n plugin_data = metadata.parse_plugin_metadata(\n summary_metadata.plugin_data.content\n )\n self.assertEqual(5, plugin_data.num_thresholds)\n\n # Test the summary contents.\n values = tensor_util.make_ndarray(pb.value[0].tensor)\n self.verify_float_arrays_are_equal(\n [\n [75.0, 64.0, 21.0, 5.0, 0.0], # True positives.\n [150.0, 105.0, 18.0, 0.0, 0.0], # False positives.\n [0.0, 45.0, 132.0, 150.0, 150.0], # True negatives.\n [0.0, 11.0, 54.0, 70.0, 75.0], # False negatives.\n [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0], # Precision.\n [1.0, 0.8533334, 0.28, 0.0666667, 0.0], # Recall.\n ],\n values,\n )\n\n\nclass StreamingOpTest(tf.test.TestCase):\n def setUp(self):\n super(StreamingOpTest, self).setUp()\n tf.compat.v1.reset_default_graph()\n np.random.seed(1)\n\n def pb_via_op(self, summary_op):\n actual_pbtxt = summary_op.eval()\n actual_proto = summary_pb2.Summary()\n actual_proto.ParseFromString(actual_pbtxt)\n return actual_proto\n\n def tensor_via_op(self, summary_op):\n actual_pbtxt = summary_op.eval()\n actual_proto = summary_pb2.Summary()\n actual_proto.ParseFromString(actual_pbtxt)\n return actual_proto\n\n def test_matches_op(self):\n predictions = tf.constant([0.2, 0.4, 0.5, 0.6, 0.8], dtype=tf.float32)\n labels = tf.constant([False, True, True, False, True], dtype=tf.bool)\n\n pr_curve, update_op = summary.streaming_op(\n name=\"pr_curve\",\n predictions=predictions,\n labels=labels,\n num_thresholds=10,\n )\n expected_pr_curve = summary.op(\n name=\"pr_curve\",\n predictions=predictions,\n labels=labels,\n num_thresholds=10,\n )\n with self.test_session() as sess:\n sess.run(tf.compat.v1.local_variables_initializer())\n sess.run([update_op])\n\n proto = self.pb_via_op(pr_curve)\n expected_proto = self.pb_via_op(expected_pr_curve)\n\n # Need to detect and fix the automatic _1 appended to second namespace.\n self.assertEqual(proto.value[0].tag, \"pr_curve/pr_curves\")\n self.assertEqual(\n expected_proto.value[0].tag, \"pr_curve_1/pr_curves\"\n )\n expected_proto.value[0].tag = \"pr_curve/pr_curves\"\n\n self.assertProtoEquals(expected_proto, proto)\n\n def test_matches_op_with_updates(self):\n predictions = tf.constant([0.2, 0.4, 0.5, 0.6, 0.8], dtype=tf.float32)\n labels = tf.constant([False, True, True, False, True], dtype=tf.bool)\n pr_curve, update_op = summary.streaming_op(\n name=\"pr_curve\",\n predictions=predictions,\n labels=labels,\n num_thresholds=10,\n )\n\n complete_predictions = tf.tile(predictions, [3])\n complete_labels = tf.tile(labels, [3])\n expected_pr_curve = summary.op(\n name=\"pr_curve\",\n predictions=complete_predictions,\n labels=complete_labels,\n num_thresholds=10,\n )\n with self.test_session() as sess:\n sess.run(tf.compat.v1.local_variables_initializer())\n sess.run([update_op])\n sess.run([update_op])\n sess.run([update_op])\n\n proto = self.pb_via_op(pr_curve)\n expected_proto = self.pb_via_op(expected_pr_curve)\n\n # Need to detect and fix the automatic _1 appended to second namespace.\n self.assertEqual(proto.value[0].tag, \"pr_curve/pr_curves\")\n self.assertEqual(\n expected_proto.value[0].tag, \"pr_curve_1/pr_curves\"\n )\n expected_proto.value[0].tag = \"pr_curve/pr_curves\"\n\n self.assertProtoEquals(expected_proto, proto)\n\n def test_only_1_summary_generated(self):\n \"\"\"Tests that the streaming op only generates 1 summary for PR curves.\n\n This test was made in response to a bug in which calling the\n streaming op actually introduced 2 tags.\n \"\"\"\n predictions = tf.constant([0.2, 0.4, 0.5, 0.6, 0.8], dtype=tf.float32)\n labels = tf.constant([False, True, True, False, True], dtype=tf.bool)\n _, update_op = summary.streaming_op(\n name=\"pr_curve\",\n predictions=predictions,\n labels=labels,\n num_thresholds=10,\n )\n with self.test_session() as sess:\n sess.run(tf.compat.v1.local_variables_initializer())\n sess.run(update_op)\n summary_proto = summary_pb2.Summary()\n summary_proto.ParseFromString(\n sess.run(tf.compat.v1.summary.merge_all())\n )\n\n tags = [v.tag for v in summary_proto.value]\n # Only 1 tag should have been introduced.\n self.assertEqual([\"pr_curve/pr_curves\"], tags)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.Graph",
"tensorflow.python.tf2.enabled"
],
[
"tensorflow.compat.v1.test.main",
"numpy.max",
"numpy.mean",
"numpy.var",
"tensorflow.compat.v1.python_io.tf_record_iterator",
"tensorflow.compat.v1.constant",
"numpy.matmul",
"numpy.size",
"numpy.min",
"numpy.isnan",
"tensorflow.compat.v1.div",
"tensorflow.compat.v1.RunOptions",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"tensorflow.python.debug.load_tensor_from_event",
"tensorflow.compat.v1.as_dtype",
"tensorflow.compat.v1.Variable",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.python.debug.watch_graph",
"tensorflow.compat.v1.Event",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.assign_add",
"tensorflow.compat.v1.reset_default_graph",
"numpy.isinf"
],
[
"tensorflow.core.debug.debugger_event_metadata_pb2.DebuggerEventMetadata",
"tensorflow.compat.v1.test.mock.Mock",
"tensorflow.test.main",
"tensorflow.compat.v1.test.mock.patch.stopall"
],
[
"numpy.rollaxis",
"numpy.reshape",
"tensorflow.compat.v1.trainable_variables",
"numpy.atleast_2d",
"numpy.prod",
"numpy.var",
"numpy.ravel",
"numpy.zeros",
"numpy.vstack"
],
[
"tensorflow.compat.v1.summary.merge_all",
"tensorflow.constant",
"tensorflow.compat.v1.local_variables_initializer",
"numpy.random.seed",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.test.main",
"tensorflow.compat.v1.Session",
"numpy.float32",
"numpy.testing.assert_allclose",
"numpy.random.uniform",
"numpy.array",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.tile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
DragonMyth/MyDartEnv | [
"5a5c40d0104e22e0493f823c41734776fb2e6790"
] | [
"gym/envs/dart/flatworm_swim_straight_reduced.py"
] | [
"import numpy as np\nfrom gym import utils\nfrom gym.envs.dart import dart_env\nfrom .simple_water_world import BaseFluidSimulator\n\n\nclass DartFlatwormSwimStraightReducedEnv(dart_env.DartEnv, utils.EzPickle):\n def __init__(self):\n control_bounds = np.array([[1.0] * 12, [-1.0] * 12])\n self.action_scale = np.array([5*np.pi,5*np.pi,3*np.pi,3*np.pi,np.pi,np.pi]*2)\n self.frame_skip = 5\n dart_env.DartEnv.__init__(self, 'flatworm_reduced.skel', self.frame_skip, 53, control_bounds, dt=0.002,\n disableViewer=not True,\n custom_world=BaseFluidSimulator)\n utils.EzPickle.__init__(self)\n\n self.bodynodes_dict = self.construct_skel_dict()\n\n self.init_state = self._get_obs()\n self.original_com = self.robot_skeleton.C\n self.original_q = self.robot_skeleton.q\n\n num_of_dofs = len(self.robot_skeleton.dofs) - len(self.robot_skeleton.joints[0].dofs)\n\n self.simulation_dt = self.dt * 1.0 / self.frame_skip\n self.Kp = np.diagflat([0.0] * len(self.robot_skeleton.joints[0].dofs) + [4000.0] * num_of_dofs)\n # self.Kd = 150 * self.simulation_dt * self.Kp\n self.Kd = self.simulation_dt * self.Kp\n\n self.invM = np.linalg.inv(self.robot_skeleton.M + self.Kd * self.simulation_dt)\n # self.symm_rate = -1 * np.array([1, 1, 0.01, 0.01])\n\n def _step(self, a):\n old_com = self.robot_skeleton.C\n old_q = self.robot_skeleton.q\n old_dq = self.robot_skeleton.dq\n\n\n\n target_pos = self.build_target_pos(a)\n ##SPD Controller\n # for i in range(self.frame_skip):\n # invM = self.invM\n # p = -self.Kp.dot(self.robot_skeleton.q + self.robot_skeleton.dq * self.simulation_dt - target_pos)\n # d = -self.Kd.dot(self.robot_skeleton.dq)\n # qddot = invM.dot(-self.robot_skeleton.c + p + d + self.robot_skeleton.constraint_forces())\n # tau = p + d - self.Kd.dot(qddot) * self.simulation_dt\n # # tau *= 0.0005\n # tau[0:len(self.robot_skeleton.joints[0].dofs)] = 0\n # self.do_simulation(tau, 1)\n\n invM = self.invM\n p = -self.Kp.dot(self.robot_skeleton.q + self.robot_skeleton.dq * self.simulation_dt - target_pos)\n d = -self.Kd.dot(self.robot_skeleton.dq)\n qddot = invM.dot(-self.robot_skeleton.c + p + d + self.robot_skeleton.constraint_forces())\n tau = p + d - self.Kd.dot(qddot) * self.simulation_dt\n tau *= 0.001\n tau[0:len(self.robot_skeleton.joints[0].dofs)] = 0\n self.do_simulation(tau, self.frame_skip)\n cur_com = self.robot_skeleton.C\n cur_q = self.robot_skeleton.q\n cur_dq = self.robot_skeleton.dq\n ob = self._get_obs()\n\n angs = np.abs(self.robot_skeleton.q[6::])\n\n horizontal_pos_rwd = (cur_com[0] - old_com[0]) * 500\n horizontal_vel_rwd = 0 # 3*cur_dq[3]\n orth_pen = 0.5 * (np.abs(cur_com[1] - self.original_com[1]) + np.abs(cur_com[2] - self.original_com[2]))\n rotate_pen = np.sum(np.abs(cur_q[:3] - self.original_q[:3]))\n\n energy_consumed_pen = 0.1 * np.sum(tau[6::] * old_dq[6::] * self.frame_skip)\n # mirror_enforce\n reward = 1 + horizontal_pos_rwd + horizontal_vel_rwd - rotate_pen - orth_pen - energy_consumed_pen\n\n notdone = np.isfinite(ob[5::]).all() and (np.abs(angs) < np.pi / 2.0).all()\n done = not notdone\n\n return ob, reward, done, {'rwd': reward, 'horizontal_pos_rwd': horizontal_pos_rwd,\n 'horizontal_vel_rwd': horizontal_vel_rwd,\n 'rotate_pen': -rotate_pen, 'orth_pen': -orth_pen, 'energy_consumed_pen':energy_consumed_pen,'tau':tau[6::]}\n\n def _get_obs(self):\n\n return np.concatenate([self.robot_skeleton.q[4:6], self.robot_skeleton.dq[3:6], self.robot_skeleton.q[6::],\n self.robot_skeleton.dq[6::]]).ravel()\n\n def reset_model(self):\n self.dart_world.reset()\n qpos = self.robot_skeleton.q + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)\n qvel = self.robot_skeleton.dq + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self._get_viewer().scene.tb.trans[2] = -3.5\n self._get_viewer().scene.tb._set_theta(-60)\n self.track_skeleton_id = 0\n\n def do_simulation(self, tau, n_frames):\n for _ in range(n_frames):\n comb = []\n import itertools\n for i in itertools.product(['l', 'r'], [1, 2, 3]):\n comb.append(i)\n for segIdx in range(1):\n for side, idx in comb:\n offset1_dir = np.array([-1, 0, 0])\n offset2_dir = np.array([1, 0, 0])\n curr_key = 'wing_' + str(side) + '_' + str(segIdx) + str(idx)\n next_key = 'wing_' + str(side) + '_' + str(segIdx + 1) + str(idx)\n curr_body = self.bodynodes_dict[curr_key]\n next_body = self.bodynodes_dict[next_key]\n\n constraint_force, offset1, offset2 = self.calc_constraint_force(curr_body, offset1_dir, next_body,\n offset2_dir, strength=6)\n\n curr_body.add_ext_force(constraint_force, _offset=offset1)\n next_body.add_ext_force(-constraint_force, _offset=offset2)\n\n super(DartFlatwormSwimStraightReducedEnv,self).do_simulation(tau,1)\n\n\n\n\n def calc_constraint_force(self, bodynode1, offset1_dir, bodynode2, offset2_dir, strength=1.0):\n shape1 = bodynode1.shapenodes[0]\n body1_geometry = shape1.shape.size()\n shape2 = bodynode2.shapenodes[0]\n body2_geometry = shape2.shape.size()\n\n offset1 = offset1_dir * body1_geometry / 2\n offset2 = offset2_dir * body2_geometry / 2\n\n body1_link_pos_to_world = bodynode1.to_world(offset1)\n body2_link_pos_to_world = bodynode2.to_world(offset2)\n constraint_force_dir = body2_link_pos_to_world - body1_link_pos_to_world\n constraint_force = constraint_force_dir * strength\n return constraint_force, offset1, offset2\n\n\n def construct_skel_dict(self):\n node_dict = {}\n bodynodes = self.robot_skeleton.bodynodes\n for i in range(len(bodynodes)):\n node_dict[bodynodes[i].name] = bodynodes[i]\n return node_dict\n\n def build_target_pos(self,a):\n target_pos = np.zeros(24)\n a = a*self.action_scale\n target_pos[0:6] = a[0:6]\n target_pos[6:12]= a[6:12]\n\n return np.concatenate(([0.0] * 6, target_pos))\n"
] | [
[
"numpy.abs",
"numpy.isfinite",
"numpy.linalg.inv",
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cysmnl/geometric_cognition | [
"473c0cf585aaf49904bfb87c35ea706e12f67f8a",
"473c0cf585aaf49904bfb87c35ea706e12f67f8a"
] | [
"torch_geometric/read/planetoid.py",
"torch_geometric/transforms/nn_graph.py"
] | [
"import sys\nimport os.path as osp\nfrom itertools import repeat\n\nimport torch\nfrom torch_sparse import coalesce\nfrom torch_geometric.data import Data\nfrom torch_geometric.read import read_txt_array\nfrom torch_geometric.utils import remove_self_loops\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\n\ndef read_planetoid_data(folder, prefix):\n \"\"\"Reads the planetoid data format.\n ind.{}.x\n \"\"\"\n names = ['x', 'tx', 'allx', 'y', 'ty', 'ally', 'graph', 'test.index']\n items = [read_file(folder, prefix, name) for name in names]\n x, tx, allx, y, ty, ally, graph, test_index = items\n train_index = torch.arange(y.size(0), dtype=torch.long)\n val_index = torch.arange(y.size(0), y.size(0) + 500, dtype=torch.long)\n sorted_test_index = test_index.sort()[0]\n\n if prefix.lower() == 'citeseer':\n # There are some isolated nodes in the Citeseer graph, resulting in\n # none consecutive test indices. We need to identify them and add them\n # as zero vectors to `tx` and `ty`.\n len_test_indices = (test_index.max() - test_index.min()).item() + 1\n\n tx_ext = torch.zeros(len_test_indices, tx.size(1))\n tx_ext[sorted_test_index - test_index.min(), :] = tx\n ty_ext = torch.zeros(len_test_indices, ty.size(1))\n ty_ext[sorted_test_index - test_index.min(), :] = ty\n\n tx, ty = tx_ext, ty_ext\n\n x = torch.cat([allx, tx], dim=0)\n y = torch.cat([ally, ty], dim=0).max(dim=1)[1]\n\n x[test_index] = x[sorted_test_index]\n y[test_index] = y[sorted_test_index]\n\n train_mask = sample_mask(train_index, num_nodes=y.size(0))\n val_mask = sample_mask(val_index, num_nodes=y.size(0))\n test_mask = sample_mask(test_index, num_nodes=y.size(0))\n\n edge_index = edge_index_from_dict(graph, num_nodes=y.size(0))\n\n data = Data(x=x, edge_index=edge_index, y=y)\n data.train_mask = train_mask\n data.val_mask = val_mask\n data.test_mask = test_mask\n\n return data\n\n\ndef read_file(folder, prefix, name):\n path = osp.join(folder, 'ind.{}.{}'.format(prefix.lower(), name))\n\n if name == 'test.index':\n return read_txt_array(path, dtype=torch.long)\n\n with open(path, 'rb') as f:\n if sys.version_info > (3, 0):\n out = pickle.load(f, encoding='latin1')\n else:\n out = pickle.load(f)\n\n if name == 'graph':\n return out\n\n out = out.todense() if hasattr(out, 'todense') else out\n out = torch.Tensor(out)\n return out\n\n\ndef edge_index_from_dict(graph_dict, num_nodes=None):\n row, col = [], []\n for key, value in graph_dict.items():\n row += repeat(key, len(value))\n col += value\n edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0)\n # NOTE: There are duplicated edges and self loops in the datasets. Other\n # implementations do not remove them!\n edge_index, _ = remove_self_loops(edge_index)\n edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)\n return edge_index\n\n\ndef sample_mask(index, num_nodes):\n mask = torch.zeros((num_nodes, ), dtype=torch.uint8)\n mask[index] = 1\n return mask\n",
"import torch\nimport scipy.spatial\nfrom torch_geometric.utils import to_undirected\n\n\nclass NNGraph(object):\n def __init__(self, k=6):\n self.k = k\n\n def __call__(self, data):\n pos = data.pos\n assert not pos.is_cuda\n\n row = torch.arange(pos.size(0), dtype=torch.long)\n row = row.view(-1, 1).repeat(1, self.k).view(-1)\n\n _, col = scipy.spatial.cKDTree(pos).query(pos, self.k + 1)\n col = torch.tensor(col)[:, 1:].contiguous().view(-1)\n mask = col < pos.size(0)\n edge_index = torch.stack([row[mask], col[mask]], dim=0)\n edge_index = to_undirected(edge_index, num_nodes=pos.size(0))\n\n data.edge_index = edge_index\n return data\n\n def __repr__(self):\n return '{}(k={})'.format(self.__class__.__name__, self.k)\n"
] | [
[
"torch.tensor",
"torch.zeros",
"torch.Tensor",
"torch.cat"
],
[
"torch.stack",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lichnost/latent-pose-reenactment | [
"ee2719355f1db3d0b927f9b10b0d42d1fd07d4c9",
"ee2719355f1db3d0b927f9b10b0d42d1fd07d4c9"
] | [
"embedders/FSTH.py",
"utils/radam.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn.utils import spectral_norm\nfrom generators.common import blocks\n\nclass Wrapper:\n @staticmethod\n def get_args(parser):\n parser.add('--embed_padding', type=str, default='zero', help='zero|reflection')\n parser.add('--embed_num_blocks', type=int, default=6)\n parser.add('--average_function', type=str, default='sum', help='sum|max')\n\n @staticmethod\n def get_net(args):\n net = Embedder(\n args.embed_padding, args.in_channels, args.out_channels,\n args.num_channels, args.max_num_channels, args.embed_channels,\n args.embed_num_blocks, args.average_function)\n return net.to(args.device)\n\nclass Embedder(nn.Module):\n def __init__(self, padding, in_channels, out_channels, num_channels, max_num_channels, embed_channels,\n embed_num_blocks, average_function):\n super().__init__()\n\n def get_down_block(in_channels, out_channels, padding):\n return blocks.ResBlock(in_channels, out_channels, padding, upsample=False, downsample=True,\n norm_layer='none')\n\n if padding == 'zero':\n padding = nn.ZeroPad2d\n elif padding == 'reflection':\n padding = nn.ReflectionPad2d\n\n self.out_channels = embed_channels\n\n self.down_block = nn.Sequential(\n padding(1),\n spectral_norm(\n nn.Conv2d(in_channels + out_channels, num_channels, 3, 1, 0),\n eps=1e-4),\n nn.ReLU(),\n padding(1),\n spectral_norm(\n nn.Conv2d(num_channels, num_channels, 3, 1, 0),\n eps=1e-4),\n nn.AvgPool2d(2))\n self.skip = nn.Sequential(\n spectral_norm(\n nn.Conv2d(in_channels + out_channels, num_channels, 1),\n eps=1e-4),\n nn.AvgPool2d(2))\n\n layers = []\n in_channels = num_channels\n for i in range(1, embed_num_blocks - 1):\n out_channels = min(in_channels * 2, max_num_channels)\n layers.append(get_down_block(in_channels, out_channels, padding))\n in_channels = out_channels\n layers.append(get_down_block(out_channels, embed_channels, padding))\n self.down_blocks = nn.Sequential(*layers)\n\n self.average_function = average_function\n\n self.finetuning = False\n\n def enable_finetuning(self, data_dict=None):\n self.finetuning = True\n\n def get_identity_embedding(self, data_dict):\n enc_stickmen = data_dict['enc_stickmen']\n enc_rgbs = data_dict['enc_rgbs']\n\n inputs = torch.cat([enc_stickmen, enc_rgbs], 2)\n\n b, n, c, h, w = inputs.shape\n inputs = inputs.view(-1, c, h, w)\n out = self.down_block(inputs)\n out = out + self.skip(inputs)\n out = self.down_blocks(out)\n out = torch.relu(out)\n embeds_elemwise = out.view(b, n, self.out_channels, -1).sum(3)\n\n if self.average_function == 'sum':\n embeds = embeds_elemwise.mean(1)\n elif self.average_function == 'max':\n embeds = embeds_elemwise.max(1)[0]\n else:\n raise Exception('Incorrect `average_function` argument, expected `sum` or `max`')\n\n data_dict['embeds'] = embeds\n data_dict['embeds_elemwise'] = embeds_elemwise\n\n def get_pose_embedding(self, data_dict):\n pass\n\n def forward(self, data_dict):\n if not self.finetuning:\n self.get_identity_embedding(data_dict)\n self.get_pose_embedding(data_dict)\n",
"# Source: https://github.com/LiyuanLucasLiu/RAdam\nimport math\nimport torch\nfrom torch.optim.optimizer import Optimizer, required\n\nclass RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n \n self.degenerated_to_sgd = degenerated_to_sgd\n if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):\n for param in params:\n if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):\n param['buffer'] = [[None, None, None] for _ in range(10)]\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)])\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = group['buffer'][int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n elif self.degenerated_to_sgd:\n step_size = 1.0 / (1 - beta1 ** state['step'])\n else:\n step_size = -1\n buffered[2] = step_size\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n p.data.copy_(p_data_fp32)\n elif step_size > 0:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n p_data_fp32.add_(-step_size * group['lr'], exp_avg)\n p.data.copy_(p_data_fp32)\n\n return loss\n\nclass PlainRAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n \n self.degenerated_to_sgd = degenerated_to_sgd\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n\n super(PlainRAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(PlainRAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n p.data.copy_(p_data_fp32)\n elif self.degenerated_to_sgd:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n p_data_fp32.add_(-step_size, exp_avg)\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass AdamW(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n \n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, warmup = warmup)\n super(AdamW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdamW, self).__setstate__(state)\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n \n if group['warmup'] > state['step']:\n scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']\n else:\n scheduled_lr = group['lr']\n\n step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1\n \n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)\n\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n"
] | [
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.Conv2d",
"torch.relu",
"torch.nn.AvgPool2d",
"torch.nn.ReLU"
],
[
"torch.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aimldl/coding | [
"70ddbfaa454ab92fd072ee8dc614ecc330b34a70",
"70ddbfaa454ab92fd072ee8dc614ecc330b34a70",
"70ddbfaa454ab92fd072ee8dc614ecc330b34a70",
"70ddbfaa454ab92fd072ee8dc614ecc330b34a70"
] | [
"python/en/_matplotlib/gallery/text_labels_and_annotations/auto-wrapping_text.py",
"python/en/archive/topics/temp/audio/digital_filters/test-scipy-butterworth_lowpass_filter.py",
"python/en/_numpy/1.Quickstart_tutorial-1.The_Basics-1.An_eample.py",
"python/en/_pandas/Pandas for Everyone/p4e-2_3_the_series.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ntext_labels_and_annotations/auto-wrapping_text.py\nMatplotlib > Gallery > Text, labels and annotations> Auto-wrapping text\nhttps://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/autowrap.html#sphx-glr-gallery-text-labels-and-annotations-autowrap-py\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nplt.axis([0, 10, 0, 10])\nt = (\"This is a really long string that I'd rather have wrapped so that it \"\n \"doesn't go outside of the figure, but if it's long enough it will go \"\n \"off the top or bottom!\")\nplt.text(4, 1, t, ha='left', rotation=15, wrap=True)\nplt.text(6, 5, t, ha='left', rotation=15, wrap=True)\nplt.text(5, 5, t, ha='right', rotation=-15, wrap=True)\nplt.text(5, 10, t, fontsize=18, style='oblique', ha='center',\n va='top', wrap=True)\nplt.text(3, 4, t, family='serif', style='italic', ha='right', wrap=True)\nplt.text(-1, 0, t, ha='left', rotation=-15, wrap=True)\n\nplt.show()\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ntest-scipy-butterworth_lowpass_filter.py\n\nCreating lowpass filter in SciPy - understanding methods and units\nhttps://stackoverflow.com/questions/25191620/creating-lowpass-filter-in-scipy-understanding-methods-and-units\n\nButterworth Bandpass\nhttps://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import butter, lfilter, freqz\n\ndef butter_lowpass(cutoff, fs, order=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\n b, a = butter_lowpass(cutoff, fs, order=order)\n y = lfilter(b, a, data)\n return y\n\ndef run():\n # Filter requirements.\n order = 6\n fs = 30.0 # sample rate, Hz\n cutoff = 3.667 # desired cutoff frequency of the filter, Hz\n\n # Plot the frequency response for a few different orders.\n plt.figure(1)\n plt.clf()\n for order in [3, 6, 9]:\n b, a = butter_lowpass(cutoff, fs, order)\n w, h = freqz(b, a, worN=8000)\n plt.plot((fs * 0.5 / np.pi) * w, abs(h), label=\"order = %d\" % order)\n\n plt.plot(0.5*fs*w/np.pi, np.abs(h), 'b')\n plt.plot(cutoff, 0.5*np.sqrt(2), 'ko')\n plt.axvline(cutoff, color='k')\n plt.xlim(0, 0.5*fs)\n plt.title(\"Lowpass Filter Frequency Response\")\n plt.xlabel('Frequency [Hz]')\n plt.ylabel('Gain')\n plt.grid()\n plt.legend(loc='best')\n plt.plot([0, 0.5 * fs], [np.sqrt(0.5), np.sqrt(0.5)], '--', label='sqrt(0.5)')\n\n # Filter a noisy signal.\n T = 10.00\n nsamples = T * fs\n t = np.linspace(0, T, nsamples, endpoint=False)\n a = 0.02\n f0 = 600.0\n x = 0.1 * np.sin(2 * np.pi * 1.2 * np.sqrt(t))\n x += 0.01 * np.cos(2 * np.pi * 312 * t + 0.1)\n x += a * np.cos(2 * np.pi * f0 * t + .11)\n x += 0.03 * np.cos(2 * np.pi * 2000 * t)\n plt.figure(2)\n plt.clf()\n plt.plot(t, x, label='Noisy signal')\n \n y = butter_lowpass_filter(x, cutoff, fs, order=6)\n plt.plot(t, y, label='Filtered signal (%g Hz)' % f0)\n plt.xlabel('time (seconds)')\n plt.hlines([-a, a], 0, T, linestyles='--')\n plt.grid()\n plt.axis('tight')\n plt.legend(loc='upper left')\n\n plt.show()\n\nrun()",
"# 1.Quickstart_tutorial-1.The_Basics-1.An_eample.py\n#\n# https://docs.scipy.org/doc/numpy/user/quickstart.html\n# The Basics - An example\n\nimport numpy as np\n\na = np.arange(15).reshape(3,5)\n#>>> a\n#array([[ 0, 1, 2, 3, 4],\n# [ 5, 6, 7, 8, 9],\n# [10, 11, 12, 13, 14]])\n\nprint( a.shape )\n#(3, 5)\n\nprint( a.ndim )\n#2\n\nprint( a.dtype.name )\n#int64\n\nprint( a.itemsize )\n#8\n\nprint( a.size )\n#15\n\nprint( type(a) )\n#<class 'numpy.ndarray'>\n\nb = np.array( [6,7,8] )\n#array([6, 7, 8])\n\nprint( type(b) )\n#<class 'numpy.ndarray'>\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\npandas_for_everyone-2_3_the_series.py\n2.3.The Series\n\"\"\"\n\nimport pandas as pd\n\n##################\n# 2.3.The Series #\n##################\n\n# Create our example dataframe with a row index label\nscientists = pd.DataFrame(\n data = {'Occupation': ['Chemist','Statistician'],\n 'Born': ['1920-07-25','1876-06-13'],\n 'Died': ['1958-04-16','1937-10-16'],\n 'Age': [37,61]\n },\n index=['Rosaline Franklin','William Gosset'],\n columns=['Occupation','Born','Died','Age']\n )\nprint( scientists )\n# Occupation Born Died Age\n#Rosaline Franklin Chemist 1920-07-25 1958-04-16 37\n#William Gosset Statistician 1876-06-13 1937-10-16 61\n\nfirst_row = scientists.loc['William Gosset']\nprint( type( first_row) )\n#<class 'pandas.core.series.Series'>\n\nprint( first_row )\n#Occupation Statistician\n#Born 1876-06-13\n#Died 1937-10-16\n#Age 61\n#Name: William Gosset, dtype: object\n\nprint( first_row.index )\n#Index(['Occupation', 'Born', 'Died', 'Age'], dtype='object')\n\nprint( first_row.keys() )\n#Index(['Occupation', 'Born', 'Died', 'Age'], dtype='object')\n\nprint( first_row.values )\n#['Statistician' '1876-06-13' '1937-10-16' 61]\n\n# TODO: Figure this out.\n# Q: I don't know what this is... because it's not mentioned in the book.\nprint( first_row.keys )\n#<bound method Series.keys of\n#Occupation Statistician\n#Born 1876-06-13\n#Died 1937-10-16\n#Age 61\n#Name: William Gosset, dtype: object>\n\n# Index is an attribute, so no ().\nprint( first_row.index[0] )\n#Occupation\n\n# keys is a method, so () is necessary.\nprint( first_row.keys()[0] )\n#Occupation\n# Notice the results are identical!\n\n#####################################\n# 2.3.1. The Series IS ndarray-like #\n#####################################\n\nages = scientists['Age']\nprint( ages )\n#Rosaline Franklin 37\n#William Gosset 61\n#Name: Age, dtype: int64\n\n# A Series and numpy.ndarray share a lot of similarities. \n# (Numpy is a scientific computing library that typically deals with numeric vectors.)\n# A Series can be thought of as an extension to the numpy.ndarray.\n# So there is an overlap of attributes and methods.\n\nprint( ages.mean() )\n#49.0\nprint( ages.min() )\n#37\nprint( ages.max() )\n#61\nprint( ages.std() )\n#16.97056274847714\n\nprint( ages.hist() )\n# Wow, the historygram is shown in the command line.\n\n#####################################\n# 2.3.2. Boolean Subsetting: Series #\n#####################################\nscientists = pd.read_csv('../data/scientists.csv')\n\nages = scientists['Age']\nprint( ages )\n#0 37\n#1 61\n#2 90\n#3 66\n#4 56\n#5 45\n#6 41\n#7 77\n#Name: Age, dtype: int64\n\nprint( ages.describe() )\n#count 8.000000\n#mean 59.125000\n#std 18.325918\n#min 37.000000\n#25% 44.000000\n#50% 58.500000\n#75% 68.750000\n#max 90.000000\n#Name: Age, dtype: float64\n\nprint( ages.mean() )\n#59.125\n\nprint( ages[ ages > ages.mean() ] )\n#1 61\n#2 90\n#3 66\n#7 77\n#Name: Age, dtype: int64\n\nprint( ages > ages.mean() )\n#0 False\n#1 True\n#2 True\n#3 True\n#4 False\n#5 False\n#6 False\n#7 True\n#Name: Age, dtype: bool\n\nprint( type( ages > ages.mean()) )\n#<class 'pandas.core.series.Series'>\n\nmanual_bool_values = [True, True, False, False, True, True, False, True]\nprint( ages[ manual_bool_values ])\n#0 37\n#1 61\n#4 56\n#5 45\n#7 77\n#Name: Age, dtype: int64\n\n##############################################################\n# 2.3.3. Operations Are Automatically Aligned and Vectorized #\n# (Broadcasting) #\n##############################################################\n\n#######################################\n# 2.3.3.1. Vectors of the Same Length #\n#######################################\nprint( ages )\n#0 37\n#1 61\n#2 90\n#3 66\n#4 56\n#5 45\n#6 41\n#7 77\n#Name: Age, dtype: int64\n\nprint( ages+ ages )\n#0 74\n#1 122\n#2 180\n#3 132\n#4 112\n#5 90\n#6 82\n#7 154\n#Name: Age, dtype: int64\n\nprint( ages * ages )\n#0 1369\n#1 3721\n#2 8100\n#3 4356\n#4 3136\n#5 2025\n#6 1681\n#7 5929\n#Name: Age, dtype: int64\n\n############################################\n# 2.3.3.2. Vectors With Integers (Scalars) #\n############################################\n# The scalar value is repeatly used across all the elements of the vector 'ages'.\nprint( ages + 100 )\n#0 137\n#1 161\n#2 190\n#3 166\n#4 156\n#5 145\n#6 141\n#7 177\n#Name: Age, dtype: int64\n\nprint( ages * 2 )\n#0 74\n#1 122\n#2 180\n#3 132\n#4 112\n#5 90\n#6 82\n#7 154\n#Name: Age, dtype: int64\n\n###########################################\n# 2.3.3.3. Vectors With Different Lengths #\n###########################################\n# The behavior depends on the type of the vectors (after ages).\n\n#################\n# With a Series #\n#################\n# The operation matches the index.\n# The rest of the resulting vector is filled with NaN, not a number.\nprint( ages + pd.Series([1,100]) )\n#0 38.0\n#1 161.0\n#2 NaN\n#3 NaN\n#4 NaN\n#5 NaN\n#6 NaN\n#7 NaN\n#dtype: float64\n\n####################\n# With other types #\n####################\n# The shapes must match.\n# For example, ValueError is returned with np.array.\n\n# This will cause an error\n#import numpy as np\n#print( ages + np.array( [1,100] ) )\n#ValueError: operands could not be broadcast together with shapes (8,) (2,) \n\n###################################################################\n# 2.3.3.4. Vectors With Common Index Labels (Automatic Alignment) #\n###################################################################\nprint( ages )\n#0 37\n#1 61\n#2 90\n#3 66\n#4 56\n#5 45\n#6 41\n#7 77\n#Name: Age, dtype: int64\n\nrev_ages = ages.sort_index( ascending=False )\nprint( rev_ages )\n#7 77\n#6 41\n#5 45\n#4 56\n#3 66\n#2 90\n#1 61\n#0 37\n#Name: Age, dtype: int64\n# Notice the index is reversed as well as the value.\n\nprint( ages*2 )\n#0 74\n#1 122\n#2 180\n#3 132\n#4 112\n#5 90\n#6 82\n#7 154\n#Name: Age, dtype: int64\n\nprint( ages + rev_ages )\n#0 74\n#1 122\n#2 180\n#3 132\n#4 112\n#5 90\n#6 82\n#7 154\n#Name: Age, dtype: int64\n\n# Note we get the same values to ages*2\n# even though the vector is reversed!\n# Why? The vectors are aligned FIRST before the operation is carried out.\n"
] | [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"scipy.signal.freqz",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hlines",
"scipy.signal.butter",
"matplotlib.pyplot.axis",
"scipy.signal.lfilter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"numpy.abs",
"numpy.cos",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
],
[
"numpy.arange",
"numpy.array"
],
[
"pandas.read_csv",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
geetakumri/Moview_Review_Sentiment_Analysis | [
"41f4c17d9115633b000f52268ac768cfe013e808"
] | [
"model_build.py"
] | [
"from sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer, CountVectorizer\nfrom sklearn import svm\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, ConfusionMatrixDisplay\nfrom sklearn.linear_model import SGDClassifier, LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport joblib\n\n\ndef model_building(data):\n def model_prediction(tfidf,name, model):\n \n # Training the classifier with Naive Bayes\n cassifier = Pipeline([tfidf,\n (name,model),\n ])\n\n cassifier.fit(X_train, Y_train)\n test_predict = cassifier.predict(X_test)\n #print(\"test_predict\", set(test_predict))\n\n train_accuracy = round(cassifier.score(X_train, Y_train)*100)\n test_accuracy = round(accuracy_score(test_predict, Y_test)*100)\n\n print(f\" {name} Train Accuracy Score : {train_accuracy}% \")\n print(f\" {name} Test Accuracy Score : {test_accuracy}% \")\n print()\n joblib.dump(cassifier, open(name, \"wb\"))\n\n\n X_train, X_test, y_train, y_test = train_test_split(data.index.values, data.Sentiment.values, test_size=0.1, random_state=42, stratify=data.Sentiment)\n X_train, X_val, y_train, y_val = train_test_split(X_train,y_train,test_size=.15, random_state=42, stratify=y_train)\n \n data['data_type'] = ['not_set']*data.shape[0]\n data.loc[X_train, 'data_type'] = 'train'\n data.loc[X_val, 'data_type'] = 'val'\n data.loc[X_test,'data_type'] = 'test'\n\n data = data.dropna()\n train_set = data[data['data_type'] == 'train'].drop_duplicates(ignore_index=True)\n val_set = data[data['data_type'] == 'val'].drop_duplicates(ignore_index=True)\n test_set = data[data['data_type'] == 'test'].drop_duplicates(ignore_index=True)\n\n data = pd.concat([train_set, val_set, test_set], ignore_index=True)\n data = data.sample(frac=1, random_state=1).reset_index(drop=True)\n\n X_train = train_set.Phrase.values\n Y_train = train_set.Sentiment.values\n X_test = test_set.Phrase.values\n Y_test = test_set.Sentiment.values\n\n #vect = CountVectorizer(stop_words='english', ngram_range=(1,1), )\n\n models = []\n models.append(('nb_clf', MultinomialNB()))\n models.append(('rf_clf', DecisionTreeClassifier()))\n models.append(('sgd_clf', SGDClassifier()))\n\n for name, model in models:\n model_prediction(('tfidf', TfidfVectorizer()),name, model)\n \n\n\n\n\n \n\n\n"
] | [
[
"pandas.concat",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.pipeline.Pipeline",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.linear_model.SGDClassifier",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wfondrie/diadem | [
"cf42449ccd305b7fd040f9b03129256f60f13949",
"cf42449ccd305b7fd040f9b03129256f60f13949"
] | [
"diadem/align.py",
"diadem/dataset.py"
] | [
"\"\"\"\nThis module contains the my implementation of the FastDTW algorithm.\n\nThe algorithm is described in http://cs.fit.edu/~pkc/papers/tdm04.pdf.\nThis implementation is losely based on the python package from this\nGitHub repository: https://github.com/slaypni/fastdtw.\n\nMy code deviates from this repository is a few ways to make it more\nuser friendly and amenable to aligning mass spectrometry runs:\n 1. Cython is not needed for good speed, because of numba.\n 2. The input numpy arrays (x and y) can be of any dimensionality, so\n long as the distance function can handle it.\n\nWritten by William E Fondrie, 2019\n\"\"\"\nfrom typing import Tuple, Callable\n\nimport numpy as np\nimport numba as nb\n\n# Distance Functions ----------------------------------------------------------\[email protected]\ndef cosine_distance(x, y, tiny=np.finfo(float).tiny):\n \"\"\"Compute 1 minus the cosine similarity between x and y\"\"\"\n denom = (np.linalg.norm(x) * np.linalg.norm(y) + tiny)\n return 1 - np.dot(x, y) / denom\n\n\n# DTW Functions ---------------------------------------------------------------\ndef fastdtw(x: np.ndarray, y: np.ndarray, radius: int = 1,\n dist: Callable[[np.ndarray, np.ndarray], float]\n = cosine_distance) -> Tuple[float, Tuple[Tuple[int, int]]]:\n \"\"\"\n Find the approximate minimum warping path between x and y.\n\n Parameters\n ----------\n x, y : numpy.ndarray\n Numpy arrays of the series to align. The first dimension is\n always assumed to be the time domain. For example, if aligning\n two mass spectrometry runs by their precursor mass spectra,\n x and y would be of shape [retention time, m/z] where m/z is\n each spectrum vectorized along the m/z axis.\n\n radius : int\n The radius to use for the FastDTW neighborhood.\n\n dist: Callable\n A distance function (not in the strict sense of the word), which\n accepts single time slices of x and y as input and returns their\n distance as a float.\n\n Returns\n -------\n Tuple[float, Tuple[Tuple[int, int]]]\n A tuple containing two elements. The first is the estimated DTW\n distance between x and y. The second is a Tuple of Tuples\n indicating the minimal warping path between x and y. The\n innermost tuple contains the mapping of (x, y) pairs in the\n path.\n \"\"\"\n min_time_size = radius + 2\n\n # The base case\n if x.shape[0] < min_time_size or y.shape[0] < min_time_size:\n return dtw(x, y, dist)\n\n # Recursive state\n shrunk_x = _reduce_by_half(x)\n shrunk_y = _reduce_by_half(y)\n _, path = fastdtw(shrunk_x, shrunk_y, radius=radius)\n window = _expand_window(path, x.shape[0], y.shape[0], radius=radius)\n\n return dtw(x, y, dist, window)\n\n\ndef dtw(x: np.ndarray, y: np.ndarray,\n dist: Callable[[np.ndarray, np.ndarray], float] = cosine_distance,\n _window = None) -> Tuple[float, Tuple[Tuple[int, int]]]:\n \"\"\"\n Find the minimum warping path between x and y.\n\n Parameters\n ----------\n x, y : numpy.ndarray\n Numpy arrays of the series to align. The first dimension is\n always assumed to be the time domain. For example, if aligning\n two mass spectrometry runs by their precursor mass spectra,\n x and y would be of shape [retention time, m/z] where m/z is\n each spectrum vectorized along the m/z axis.\n\n dist: Callable\n A distance function (not in the strict sense of the word), which\n accepts single time slices of x and y as input and returns their\n distance as a float.\n\n Returns\n -------\n Tuple[float, Tuple[Tuple[int, int]]]\n A tuple containing two elements. The first is the estimated DTW\n distance between x and y. The second is a Tuple of Tuples\n indicating the minimal warping path between x and y. The\n innermost tuple contains the mapping of (x, y) pairs in the\n path.\n \"\"\"\n if _window is None:\n _window = [(i, j) for i in range(x.shape[0]) for j in range(y.shape[0])]\n\n _window = list(_window)\n return _dtw_main(x, y, dist, _window)\n\n# Utility functions -----------------------------------------------------------\n# This is the implementation of the Dynamic Time Warping algorithm.\n# For some reason the jitted version is wayyyyy slower :(\ndef _dtw_main(x, y, dist, window):\n \"\"\"The DTW algorithm\"\"\"\n res = {}\n res[0, 0] = (float(0), 0, 0)\n\n for i, j in window:\n dt = dist(x[i, ...], y[j, ...])\n moves = ((i, j+1), (i+1, j), (i, j))\n\n val = np.Inf\n for move in moves:\n if move in res:\n if res[move][0] < val:\n val = res[move][0]\n res[i+1, j+1] = (val + dt, *move)\n\n\n path = []\n i, j = x.shape[0], y.shape[0]\n while i or j:\n path.append((i-1, j-1))\n i, j = res[i, j][1], res[i, j][2]\n\n path.reverse()\n return (res[x.shape[0], y.shape[0]][0], tuple(path))\n\n\ndef _reduce_by_half(x):\n \"\"\"Reduce x by half by taking the average.\"\"\"\n max_idx = x.shape[0] - (x.shape[0] % 2)\n return np.array([(x[i, ...] + x[i+1, ...]) / 2 for i in range(0, max_idx, 2)])\n\n\ndef _expand_window(path, len_x, len_y, radius):\n \"\"\"Expands the window around path and returns a new window\"\"\"\n path_ = set(path)\n path_range = range(-radius, radius+1)\n window = set()\n\n for i, j in path:\n for a, b in ((i+a, j+b) for a in path_range for b in path_range):\n if 0 <= a < len_x and 0 <= b < len_y:\n path_.add((a, b))\n\n for i, j in path_:\n i *= 2\n j *= 2\n for a, b in ((i, j), (i, j+1), (i+1, j), (i+1, j+1)):\n if 0 <= a < len_x and 0 <= b < len_y:\n window.add((a, b))\n\n return sorted(window)\n",
"\"\"\"\nClasses for storing and manipulating DIA datasets.\n\nThe DIARun class stores data from a single mzML file from a DIA\nexperiment. It consists of a collection of DIAWindow() instances which\nstore data from the individual DIA MS/MS windows.\n\"\"\"\nimport logging\nimport multiprocessing as mp\nfrom typing import Tuple, Dict\nfrom itertools import chain\n\nimport tqdm\nimport numpy as np\nimport numba as nb\n\nimport diadem.write\nfrom diadem.align import fastdtw\n\nclass DIARun():\n \"\"\"\n Store and manipulate a DIA run.\n\n Initialization loads a data-independent acquisition mass\n spectrometry (DIA) mzML file into memory.\n\n Parameters\n ----------\n mzml_file : str\n The mzML file to read. This can be either a normal mzML file or\n gzipped (mzML.gz).\n\n\n Attributes\n ----------\n windows : \n\n \"\"\"\n def __init__(self, scans, spectrum_matrix=None):\n \"\"\"Initialize a DIARun\"\"\"\n self.spectrum_matrix = spectrum_matrix\n self.windows = {}\n for scan in scans:\n win = scan.window[0]\n curr_win = self.windows.get(win, [])\n curr_win.append(scan)\n self.windows[win] = curr_win\n\n for window, spec_list in self.windows.items():\n self.windows[window] = DIAWindow(window, spec_list)\n\n @property\n def scans(self):\n \"\"\"Get the scans, ordered by index\"\"\"\n all_scans = [s.scans for _, s in self.windows.items()]\n all_scans = list(chain.from_iterable(all_scans))\n all_scans.sort(key=_get_index)\n return all_scans\n\n def mask(self, reference_run, tol=10):\n \"\"\"Mask this run using a reference run\"\"\"\n logging.info(\"Masking run by window...\")\n num_filtered = []\n for win_name, win in tqdm.tqdm(self.windows.items()):\n ref_win = reference_run.windows[win_name]\n before, after = win.mask(ref_win, tol)\n num_filtered.append((win_name, before, after))\n\n total_before = 0\n total_after = 0\n for win in num_filtered:\n logging.info(\"%s: %i -> %i peaks (%0.2f%% remaining)\",\n win[0], win[1], win[2], win[2]/win[1]*100)\n total_before += win[1]\n total_after += win[2]\n\n logging.info(\"Total: %i -> %i peaks (%0.2f%% remaining)\",\n total_before, total_after, total_after/total_before*100)\n\n def align(self, reference_run, radius=1):\n \"\"\"Calibrate the retention time to a reference run.\"\"\"\n logging.info(\"Aligning runs by window...\")\n for win_name, win in tqdm.tqdm(self.windows.items()):\n ref_mat = reference_run.windows[win_name].vectorize()\n targ_mat = win.vectorize()\n _, path = fastdtw(ref_mat, targ_mat, radius=radius)\n indices = _path2map(path, len(win.scans))\n win.reference_index = indices\n\n def write(self, out_file):\n \"\"\"Write a DIARun to mzML\"\"\"\n diadem.write.mzML(self, out_file)\n\n\nclass DIAWindow():\n \"\"\"\n Store data for each DIA MS/MS isolation window.\n\n Parameters\n ----------\n scans : A list of DIAScan\n \"\"\"\n def __init__(self, name, scans):\n self.name = name\n self._scans = scans\n self.sort_scans()\n\n @property\n def tic(self):\n \"\"\"The tic of each scan\"\"\"\n return np.array([s.tic for s in self._scans])\n\n @property\n def original_tic(self):\n return np.array([s.original_tic for s in self._scans])\n\n @property\n def peaks(self):\n return np.array([s.peaks for s in self._scans])\n\n @property\n def original_peaks(self):\n return np.array([s.original_peaks for s in self._scans])\n\n @property\n def ret_time(self):\n \"\"\"The retention times of each scan\"\"\"\n return np.array([s.ret_time for s in self._scans])\n\n @property\n def reference_index(self):\n \"\"\"The reference scan index for each scan\"\"\"\n return np.array([s.reference_index for s in self._scans])\n\n @reference_index.setter\n def reference_index(self, indices):\n \"\"\"Set the calibrated retention time for all scans\"\"\"\n for idx, scan in zip(indices, self._scans):\n scan.reference_index = idx\n\n @property\n def scans(self):\n \"\"\"Return the scans in a window\"\"\"\n return self._scans\n\n def sort_scans(self):\n \"\"\"Sort the scans by raw_rt\"\"\"\n self._scans.sort(key=_get_index)\n\n def vectorize(self, **kwargs) -> np.ndarray:\n \"\"\"Vectorize the spectra and return a matrix\"\"\"\n vecs = [s.vectorize(**kwargs) for s in self.scans]\n return np.vstack(vecs)\n\n def mask(self, reference_window, tol):\n \"\"\"Mask using a reference window\"\"\"\n total = 0\n kept = 0\n for scan in self.scans:\n ref_mz = [reference_window.scans[i].mz for i in scan.reference_index]\n before, after = scan.mask(np.concatenate(ref_mz), tol)\n total += before\n kept += after\n\n return (total, kept)\n\n\nclass DIAScan():\n \"\"\"\n Store data for an individual DIA Scan.\n \"\"\"\n def __init__(self, spectrum):\n \"\"\"Initialize a DIAScan() object\"\"\"\n keys = [\"ms level\", \"scanList\", \"m/z array\",\n \"intensity array\", \"id\", \"index\"]\n\n if spectrum[\"ms level\"] == 2:\n keys += [\"precursorList\"]\n\n self._spectrum = {key: spectrum[key] for key in keys}\n self.reference_index = [None]\n self.original_peaks = len(self._spectrum[\"intensity array\"])\n self.original_tic = self._spectrum[\"intensity array\"].sum()\n\n @property\n def ms_level(self) -> int:\n \"\"\"Retrieve the MS level\"\"\"\n return self._spectrum[\"ms level\"]\n\n @property\n def ret_time(self) -> float:\n \"\"\"Get the retention time of the spectrum\"\"\"\n return self._spectrum[\"scanList\"][\"scan\"][0][\"scan start time\"]\n\n @property\n def window(self) -> Tuple[str, float, float, float]:\n \"\"\"Retrieve the DIA window.\"\"\"\n if self.ms_level == 1:\n return (\"precursor\",)\n\n mz_info = self._spectrum[\"precursorList\"][\"precursor\"][0]\n mz_info = mz_info[\"isolationWindow\"]\n mid = mz_info[\"isolation window target m/z\"]\n offset_low = mz_info[\"isolation window lower offset\"]\n offset_high = mz_info[\"isolation window upper offset\"]\n\n return (f\"m/z {mid-offset_low:0.0f}-{mid+offset_high:0.0f}\",\n mid, offset_low, offset_high)\n\n @property\n def mz(self) -> np.ndarray:\n \"\"\"Retrieve the m/z values of the spectrum\"\"\"\n return self._spectrum[\"m/z array\"]\n\n @mz.setter\n def mz(self, mz_array):\n \"\"\"Set the m/z array\"\"\"\n self._spectrum[\"m/z array\"] = mz_array\n\n @property\n def intensity(self) -> np.ndarray:\n \"\"\"Retrieve the intensity values of the spectrum\"\"\"\n return self._spectrum[\"intensity array\"]\n\n @intensity.setter\n def intensity(self, intensity_array):\n \"\"\"Set the intensity array\"\"\"\n self._spectrum[\"intensity array\"] = intensity_array\n\n @property\n def peaks(self) -> int:\n \"\"\"Get the current number of peaks in the spectrum\"\"\"\n return len(self.intensity)\n\n @property\n def activation(self) -> Dict:\n \"\"\"Retrieve the activation method for the spectrum\"\"\"\n if self.ms_level == 1:\n return None\n\n mz_info = self._spectrum[\"precursorList\"][\"precursor\"][0]\n return mz_info[\"activation\"]\n\n @property\n def scan(self) -> str:\n \"\"\"Retrieve the scan header\"\"\"\n return self._spectrum[\"id\"]\n\n @property\n def index(self) -> int:\n \"\"\"Retrieve the scan index number\"\"\"\n return self._spectrum[\"index\"]\n\n @property\n def tic(self) -> float:\n \"\"\"Retrieve the total ion current of the spectrum\"\"\"\n return self.intensity.sum()\n\n def vectorize(self, bin_width: float = 1.0005,\n min_mz: float = 0.4, max_mz: float = 2000.0) \\\n -> np.ndarray:\n \"\"\"\n Vectorize the mass spectrum\n\n Parameters\n ----------\n bin_width : float\n The bin width to use for vectorization\n min_mz : float\n The lowest m/z bin.\n max_mz : float\n The highest m/z bin.\n\n Returns\n -------\n numpy.ndarray\n A 1D numpy array of the vectorize spectrum.\n \"\"\"\n return _vectorize(self.mz, self.intensity, bin_width,\n min_mz, max_mz)\n \n\n def mask(self, mask_mz, tol=10) -> Tuple[np.ndarray]:\n \"\"\"\n Mask the mass spectrum by removing peaks within the tolerance any peaks in the list.\n \"\"\"\n # Returns the indexes of ions to keep.\n idx = _mask(self.mz, mask_mz, tol)\n before = len(self.mz)\n self.filter(idx)\n after = len(self.mz)\n\n return (before, after)\n\n\n def filter(self, index: np.ndarray) -> None:\n \"\"\"\n Filter the m/z and intensity arrays jointly, keeping those\n specified by index\n\n Parameters\n ----------\n index : numpy.ndarray\n The indices of elements to keep.\n \"\"\"\n self.mz = self.mz[index]\n self.intensity = self.intensity[index]\n\n\n def preprocess(self, min_intensity: float = None,\n max_peaks: int = None) -> None:\n \"\"\"\n Preprocess a mass spectrum.\n\n Note that this will modify the m/z and intensity arrays.\n\n Parameters\n ----------\n min_intensity : float\n Specify the minimal fraction the base peak intensity that a peak\n must have to be kept. None keeps all.\n max_peaks : float\n The maximum number of most intense peaks to keep. None keeps all.\n\n Returns\n -------\n DIAScan\n A DIAScan object that has been preprocessed.\n \"\"\"\n if min_intensity is not None:\n frac = self.intensity / self.intensity.max()\n self.filter(np.nonzero(frac >= min_intensity))\n\n if max_peaks is not None and max_peaks < len(self.intensity):\n n = -1*max_peaks\n self.filter(np.argpartition(self.intensity, n)[n:])\n\n# Utility Functions -----------------------------------------------------------\ndef _align(ref_mat, targ_mat, radius):\n \"\"\"Align a window\"\"\"\n _, path = fastdtw(ref_mat, targ_mat, radius)\n return _path2map(path, len(target_window.scans))\n\ndef _get_index(scan):\n \"\"\"Return the index of a scan\"\"\"\n return scan.index\n\ndef _path2map(path, targ_length):\n \"\"\"\n Turn a path into a list of lists mapping target scans to one or more\n reference scans.\n \"\"\"\n idx_map = [[] for x in range(targ_length)]\n for step in path:\n idx_map[step[1]].append(step[0])\n\n return idx_map\n\[email protected]\ndef _mask(targ_mz, mask_mz, tol):\n \"\"\"Filter the targ_mz array for peaks that do not have a match in mask_mz\"\"\"\n tol = tol * 1e-6\n ret_indices = []\n for idx, targ in enumerate(targ_mz):\n for mask in mask_mz:\n diff = (targ - mask) / targ\n within_tol = np.abs(diff) <= tol\n if within_tol:\n break\n\n if not within_tol:\n ret_indices.append(idx)\n\n return ret_indices\n\[email protected]\ndef _vectorize(mz_array, intensity_array, bin_width,\n min_mz, max_mz):\n \"\"\"Quickly vectorize a spectrum\"\"\"\n bins = np.arange(min_mz, max_mz, bin_width)\n bin_idx = np.digitize(mz_array, bins)\n unique_idx = np.unique(bin_idx)\n bin_int = [np.max(intensity_array[bin_idx == x]) for x in unique_idx]\n vec = np.zeros(len(bins)+1)\n vec[unique_idx] = np.array(bin_int)\n\n return vec[1:-1] # trim bins outside of (min_mz, max_mz)\n"
] | [
[
"numpy.dot",
"numpy.linalg.norm",
"numpy.finfo"
],
[
"numpy.abs",
"numpy.nonzero",
"numpy.unique",
"numpy.arange",
"numpy.concatenate",
"numpy.max",
"numpy.argpartition",
"numpy.digitize",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tclarkin/shread_dash | [
"a45e2f2946c74526e69c087587676aaa4cb15fba"
] | [
"plot_lib/snow_plot.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 27, 2022\n\nSHREAD Dash Snow Plot\n\nScript for running the snow plot in the dashboard (shread_dash.py)\n\n@author: buriona, tclarkin (2020-2022)\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plot_lib.utils import import_snotel,import_csas_live\n\nfrom database import snotel_sites\nfrom database import csas_gages\nfrom plot_lib.utils import screen_spatial,ba_stats_all,ba_stats_std,screen_csas,screen_snotel\nfrom plot_lib.utils import ba_min_plot, ba_max_plot, ba_mean_plot, ba_median_plot\nfrom plot_lib.utils import shade_forecast\n\ndef get_basin_stats(snodas_df,stype=\"swe\"):\n dates = snodas_df[\"Date\"].unique()\n last_date = dates.max()\n snodas_unique = snodas_df[snodas_df[\"Date\"]==last_date]\n mean_el = round(snodas_unique[\"elev_ft\"].mean(),0)\n points = len(snodas_unique)\n area = round(points * 0.386102, 0)\n\n if stype==\"swe\":\n mean_ft = snodas_unique[\"mean\"].mean()/12\n vol_af = round(mean_ft*area*640,0)\n stats = (\n f'Volume: ~{vol_af:,.0f} acre-feet | '\n f'Mean Elevation: {mean_el:,.0f} feet & Area: {area:,.0f} sq.mi. | '\n f'(approximated by {points} points)'\n )\n else:\n stats = (\n f'Mean Elevation: {mean_el:,.0f} feet & Area: {area:,.0f} sq.mi. |'\n f'(approximated by {points} points)'\n )\n\n return stats\n\ndef get_snow_plot(basin, stype, elrange, aspects, slopes, start_date,\n end_date, dtype,snotel_sel,csas_sel,forecast_sel,plot_albedo,\n offline=True):\n \"\"\"\n :description: this function updates the snowplot\n :param basin: the selected basins (checklist)\n :param stype: the snow type (swe/snowdepth)\n :param elrange: the range of elevations ([min,max])\n :param aspects: the range of aspects ([min,max])\n :param slopes: the range of slopes ([min,max])\n :param start_date: start date (from date selector)\n :param end_date: end date (from date selector)\n :param snotel_sel: list of selected snotel sites ([])\n :param albedo: boolean\n :return: update figure\n \"\"\"\n # Set dtype:\n dtype = \"dv\"\n\n # Create date axis\n dates = pd.date_range(start_date, end_date, freq=\"D\", tz='UTC')\n\n # Set snow type based on user selection\n if stype == \"swe\":\n ylabel = \"Mean SWE (in)\"\n dlabel = \"SWE\"\n slabel = \"WTEQ\"\n if stype == \"sd\":\n ylabel = \"Mean Snow Depth (in)\"\n dlabel = \"snow depth\"\n slabel = \"SNWD\"\n\n ## Process SHREAD data\n # Filter data\n if basin == None:\n snodas_plot = False\n snodas_max = np.nan\n basin_stats_str = ''\n else:\n snodas_plot = True\n snodas_df = screen_spatial(\n stype, start_date, end_date, basin, aspects, elrange, slopes\n )\n if snodas_df.empty:\n snodas_plot = False\n snodas_max = np.nan\n basin_stats_str = 'No valid SHREAD data for given parameters'\n else:\n # Calculate basin average values\n ba_snodas = ba_stats_all(snodas_df)\n snodas_max = ba_snodas['95%'].max()\n basin_stats_str = get_basin_stats(snodas_df,stype)\n \n ## Process SNOTEL data (if selected)\n\n # Add data for selected SNOTEL sites\n snotel_s_df = pd.DataFrame(index=dates)\n name_df = pd.DataFrame(index=snotel_sel)\n for s in snotel_sel:\n name_df.loc[s, \"name\"] = str(snotel_sites.loc[s, \"site_no\"]) + \" \" + snotel_sites.loc[s, \"name\"] + \" (\" + str(\n round(snotel_sites.loc[s, \"elev_ft\"], 0)) + \" ft)\"\n if offline:\n snotel_in = screen_snotel(f\"snotel_{s}\", start_date, end_date)\n else:\n snotel_in = import_snotel(s, start_date, end_date, vars=[slabel])\n snotel_in = snotel_s_df.merge(snotel_in[slabel], left_index=True, right_index=True, how=\"left\")\n snotel_s_df.loc[:, s] = snotel_in[slabel]\n\n if len(snotel_sel) == 0:\n snotel_max = np.nan\n else:\n snotel_max = snotel_s_df.max().max()\n\n ## Process CSAS data (if selected)\n csas_a_df = pd.DataFrame()\n for site in csas_sel:\n if offline:\n csas_df = screen_csas(site, start_date, end_date,dtype)\n else:\n csas_df = import_csas_live(site,start_date,end_date,dtype)\n\n if (plot_albedo) and (site != \"SBSG\") and (site != \"PTSP\"):\n csas_a_df[site] = csas_df[\"albedo\"]\n\n # Process NDFD, if selected\n\n # Filter data\n rhm = sky = snow = False\n\n if (basin != None) or (len(forecast_sel)>0):\n\n # remove rfc\n if \"flow\" in forecast_sel:\n forecast_sel.remove(\"flow\")\n\n # check if there are still items\n if len(forecast_sel) > 0:\n\n if dtype==\"iv\":\n step=\"D\"\n elif dtype==\"dv\":\n step=\"D\"\n\n ndfd_max = 0\n rhm = sky = snow = False\n for sensor in forecast_sel:\n\n if sensor in [\"qpf\",\"maxt\",\"mint\",\"pop12\"]:\n continue\n\n df = screen_spatial(sensor,start_date,end_date,basin,aspects,elrange,slopes,\"Date\")\n if df.empty:\n continue\n else:\n # Calculate basin average values\n ba_ndfd = ba_stats_std(df, \"Date\")\n ba_ndfd = ba_ndfd.tz_localize(tz=\"utc\")\n\n if sensor!=\"qpf\":\n ba_ndfd = ba_ndfd['mean'].resample(step).mean()\n else:\n ba_ndfd = ba_ndfd['mean'].resample(step).sum()\n\n ndfd = pd.DataFrame(index=dates)\n\n if sensor == \"sky\":\n sky = ndfd.merge(ba_ndfd,left_index=True,right_index=True,how=\"left\")\n\n if sensor == \"snow\":\n snow = ndfd.merge(ba_ndfd-1,left_index=True,right_index=True,how=\"left\")\n\n if sensor == \"rhm\":\n rhm = ndfd.merge(ba_ndfd, left_index=True, right_index=True, how=\"left\")\n\n ### Plot the data\n ymax = np.nanmax([snodas_max,snotel_max,20]) * 1.25\n\n print(\"Updating snow plot...\")\n fig = go.Figure()\n\n if snodas_plot==True:\n fig.add_trace(ba_max_plot(ba_snodas, dlabel))\n fig.add_trace(ba_min_plot(ba_snodas, dlabel))\n fig.add_trace(ba_mean_plot(ba_snodas, dlabel))\n fig.add_trace(ba_median_plot(ba_snodas, dlabel))\n\n for s in snotel_sel:\n fig.add_trace(go.Scatter(\n x=snotel_s_df.index,\n y=snotel_s_df[s],\n text=ylabel,\n mode='lines',\n line=dict(color=snotel_sites.loc[s, \"color\"]),\n name=name_df.loc[s, \"name\"]))\n\n if (plot_albedo) and (offline):\n for c in csas_a_df.columns:\n fig.add_trace(go.Scatter(\n x=csas_a_df.index,\n y=(1-csas_a_df[c])*100,\n text=\"100% - Albedo\",\n mode='lines',\n line=dict(color=csas_gages.loc[c, \"color\"], dash=\"dash\"),\n name=c + \" 100% - Albedo\",\n yaxis=\"y2\"))\n\n if snow is not False:\n fig.add_trace(go.Scatter(\n x=snow.index,\n y=[ymax - 2] * len(snow),\n mode=\"text\",\n textfont=dict(\n color=\"black\"\n ),\n marker=dict(color=\"black\"),\n text=snow.round(2),\n name=\"Snow (in, SWE)\",\n showlegend=False,\n yaxis=\"y1\"\n ))\n\n if sky is not False:\n fig.add_trace(go.Scatter(\n x=sky.index,\n y=[ymax-4]*len(sky),\n mode=\"text\",\n textfont=dict(\n color=\"green\"\n ),\n marker=dict(color=\"green\"),\n text=sky.round(0),\n name=\"Sky Coverage (%)\",\n showlegend=False,\n yaxis=\"y1\"\n ))\n\n if rhm is not False:\n fig.add_trace(go.Scatter(\n x=rhm.index,\n y=[ymax - 6] * len(rhm),\n mode=\"text\",\n textfont=dict(\n color=\"brown\"\n ),\n marker=dict(color=\"brown\"),\n text=rhm.round(0),\n name=\"Relative Humidity\",\n showlegend=False,\n yaxis=\"y1\"\n ))\n\n fig.add_trace(shade_forecast(ymax))\n fig.update_layout(\n xaxis=dict(\n range=[start_date, end_date],\n showline=True,\n linecolor=\"black\",\n mirror=True\n ),\n yaxis=dict(\n title = ylabel,\n type = 'linear',\n range = [0, ymax],\n showline = True,\n linecolor = \"black\",\n mirror = True\n ),\n margin={'l': 40, 'b': 40, 't': 10, 'r': 45},\n height=400,\n legend={'x': 0, 'y': 1, 'bgcolor': 'rgba(255,255,255,0.8)'},\n hovermode='closest',\n plot_bgcolor='white',\n )\n if (plot_albedo) and (offline):\n fig.update_layout(\n yaxis2=dict(\n title=\"100% - Albedo\",\n side=\"right\",\n overlaying='y',\n range=[0, 100]),\n margin={'l': 40, 'b': 40, 't': 0, 'r': 40},\n )\n\n if snodas_plot:\n return fig, basin_stats_str\n \n return fig, basin_stats_str\n"
] | [
[
"numpy.nanmax",
"pandas.DataFrame",
"pandas.date_range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
tblut/NNFS | [
"75320c546043bc74f368a7a6edcd8bb70aa90dc4"
] | [
"nnfs/model.py"
] | [
"import numpy as np\nfrom nnfs.layers import Linear\nfrom nnfs.optimizers import SGD\n\n\nclass Model:\n def __init__(self, layers, loss, optimizer=SGD(lr=0.01)):\n self.layers = layers\n self.loss = loss\n self.optimizer = optimizer\n\n def save_weights(self, filename):\n weights = []\n for layer in self.layers:\n for param in layer.get_parameters():\n weights.append(param.value)\n np.savez(filename, *weights)\n\n def load_weights(self, filename):\n weights = np.load(filename)\n param_index = 0\n for layer in self.layers:\n for param in layer.get_parameters():\n param.value = weights[f'arr_{param_index}']\n param_index += 1\n\n def predict(self, inputs):\n outputs = inputs\n for layer in self.layers:\n outputs = layer.forward(outputs)\n return outputs\n\n def train(self, X, y, epochs=20, batch_size=32, validation_data=None, metrics=None, verbose=1):\n history = {'train_loss': [0.0] * epochs}\n if validation_data:\n history['valid_loss'] = [0.0] * epochs\n if metrics:\n for name, _ in metrics.items():\n history[f'train_{name}'] = [0.0] * epochs\n if validation_data:\n history[f'valid_{name}'] = [0.0] * epochs\n\n n_batches = (len(X) + batch_size - 1) // batch_size\n for epoch in range(epochs):\n train_loss = 0.0\n for batch_index in range(n_batches):\n batch_start = batch_index * batch_size\n batch_end = min((batch_index + 1) * batch_size, X.shape[0])\n X_batch = X[batch_start:batch_end, ...]\n y_batch = y[batch_start:batch_end, ...]\n\n y_pred = self.predict(X_batch)\n batch_loss = self.loss(y_pred, y_batch)\n batch_loss += np.sum([layer.get_loss() for layer in self.layers])\n train_loss += batch_loss / n_batches\n\n parameters = []\n grad_in = self.loss.get_grad_in(y_pred, y_batch)\n for layer in reversed(self.layers):\n grad_in = layer.backward(grad_in)\n for param in layer.get_parameters():\n parameters.append(param)\n\n self.optimizer.apply_gradients(parameters)\n\n if metrics:\n for name, metric in metrics.items():\n history[f'train_{name}'][epoch] += metric(y_pred, y_batch) / n_batches\n\n history['train_loss'][epoch] = train_loss\n\n if validation_data:\n valid_loss = 0.0\n n_valid_batches = (len(validation_data[0]) + batch_size - 1) // batch_size\n for batch_index in range(n_valid_batches):\n batch_start = batch_index * batch_size\n batch_end = min((batch_index + 1) * batch_size, validation_data[0].shape[0])\n X_batch = validation_data[0][batch_start:batch_end, ...]\n y_batch = validation_data[1][batch_start:batch_end, ...]\n y_pred = self.predict(X_batch)\n batch_loss = self.loss(y_pred, y_batch)\n batch_loss += np.sum([layer.get_loss() for layer in self.layers])\n valid_loss += batch_loss / n_valid_batches\n if metrics:\n for name, metric in metrics.items():\n history[f'valid_{name}'][epoch] += metric(y_pred, y_batch) / n_valid_batches\n history['valid_loss'][epoch] = valid_loss\n\n if not verbose:\n continue\n log_str = f\"epoch: {epoch+1}/{epochs} - train_loss: {train_loss:.8f}\"\n if metrics:\n for name, metric in metrics.items():\n value = history[f'train_{name}'][epoch]\n log_str += f\" - train_{name}: {value:.8f}\"\n if validation_data:\n log_str += f\" - valid_loss: {valid_loss:.8f}\"\n if metrics:\n for name, metric in metrics.items():\n value = history[f'valid_{name}'][epoch]\n log_str += f\" - valid_{name}: {value:.8f}\"\n print(log_str)\n return history\n"
] | [
[
"numpy.load",
"numpy.savez"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mozhumz/machine_learning_py | [
"880f6778ac16b0a16a80b31972a35304caa91dc1"
] | [
"demoDay25_CNNAndWord2Vec/boston_multi.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n#加载数据集\nboston_housing = tf.keras.datasets.boston_housing\n(train_x,train_y),(test_x,test_y) = boston_housing.load_data()\n\nnum_train=len(train_x) #训练集和测试机中样本的数量\nnum_test=len(test_x)\n\n#对训练样本和测试样本进行标准化(归一化),这里有用到张量的广播运算机制\nx_train=(train_x-train_x.min(axis=0))/(train_x.max(axis=0)-train_x.min(axis=0))\ny_train = train_y\n\nx_test=(test_x-test_x.min(axis=0))/(test_x.max(axis=0)-test_x.min(axis=0))\ny_test = test_y\n\n#生成多元回归需要的二维形式\nx0_train = np.ones(num_train).reshape(-1,1)\nx0_test = np.ones(num_test).reshape(-1,1)\n\n#对张量数据类型转换和进行堆叠\nX_train = tf.cast(tf.concat([x0_train,x_train],axis=1), tf.float32)\nX_test = tf.cast(tf.concat([x0_test, x_test], axis=1), tf.float32)\n\n#将房价转换为列向量\nY_train = tf.constant(y_train.reshape(-1,1), tf.float32)\nY_test = tf.constant(y_test.reshape(-1,1), tf.float32)\n\n#设置超参数\nlearn_rate = 0.01\niter = 2000\ndisplay_step=200\n\n#设置模型变量初始值\nnp.random.seed(612)\nW = tf.Variable(np.random.randn(14,1), dtype = tf.float32)\n\n#训练模型\nmse_train=[]\nmse_test=[]\n\nfor i in range(iter+1):\n with tf.GradientTape() as tape:\n PRED_train = tf.matmul(X_train,W)\n Loss_train = 0.5*tf.reduce_mean(tf.square(Y_train-PRED_train))\n\n PRED_test = tf.matmul(X_test,W)\n Loss_test = 0.5*tf.reduce_mean(tf.square(Y_test-PRED_test))\n\n mse_train.append(Loss_train)\n mse_test.append(Loss_test)\n\n dL_dW = tape.gradient(Loss_train, W)\n W.assign_sub(learn_rate*dL_dW)\n\n if i % display_step == 0:\n print('i: %i, Train_loss:%f, Test_loss: %f' % (i,Loss_train,Loss_test))\n\n\n#可视化输出\nplt.figure(figsize=(20,10))\n\nplt.subplot(221)\nplt.ylabel('MSE')\nplt.plot(mse_train,color = 'blue',linewidth=3)\nplt.plot(mse_test,color = 'red',linewidth=3)\nplt.title('训练误差和测试误差',fontsize = 20)\n\nplt.subplot(222)\nplt.ylabel('Price')\nplt.plot(y_train,color='blue', marker='o', label='true_price')\nplt.plot(PRED_train, color ='red', marker='.', label='predict')\nplt.legend()\nplt.title('训练数据集房价和训练数据集预测房价',fontsize = 20)\n\nplt.subplot(223)\nplt.ylabel('Price')\nplt.plot(y_test, color='blue', marker='o', label='true_price')\nplt.plot(PRED_test, color='red', marker='.', label='predict')\nplt.legend()\nplt.title('测试数据集房价和测试数据集预测房价',fontsize = 20)\n\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"tensorflow.matmul",
"tensorflow.concat",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.ones",
"matplotlib.pyplot.plot",
"tensorflow.GradientTape",
"matplotlib.pyplot.subplot",
"numpy.random.randn",
"tensorflow.square",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
tak-sakumoto/formatomato | [
"4713338135b2ac3960cc2f9a6f017199853cdc52"
] | [
"for_imagenet/make_df_imagenet.py"
] | [
"import pandas as pd\nfrom PIL import Image\nfrom pathlib import Path\n\ndef make_df_imagenet(dataset):\n \"\"\"\n Making Pandas Dataframes of the extracted data\n \"\"\"\n # Making lists of class columns\n classes = list(Path(dataset).iterdir())\n classes = [p.stem for p in classes if p.is_dir()]\n class_ids = [i for i in range(len(classes))]\n\n class_df_dict = {\n 'CLASS_ID': class_ids,\n 'CLASS': classes\n }\n\n # Making a Pandas Dataframe\n class_df = pd.DataFrame(class_df_dict)\n\n # Set IMAGE_ID as index\n class_df = class_df.set_index('CLASS_ID')\n\n image_ids = []\n image_names = []\n widths = []\n heights = []\n img_classes = []\n \n # Making lists of image information columns\n for _class in classes:\n img_path_list = list((Path(dataset) / _class).glob('*.JPEG'))\n\n for img_path in img_path_list:\n img = Image.open(img_path)\n image_names.append(img_path.name)\n widths.append(img.width)\n heights.append(img.height)\n img_classes.append(_class)\n \n image_ids = [i for i in range(len(image_names))]\n\n image_df_dict = {\n 'IMAGE_ID': image_ids,\n 'IMAGE_NAME': image_names,\n 'WIDTH': widths,\n 'HEIGHT': heights\n }\n \n # Making a Pandas Dataframe\n image_df = pd.DataFrame(image_df_dict)\n # Set IMAGE_ID as index\n image_df = image_df.set_index('IMAGE_ID')\n\n df_dict = {\n 'IMAGE_ID': image_ids,\n 'IMAGE_NAME': image_names,\n 'CLASS': img_classes\n }\n\n # Making a Pandas Dataframe\n df = pd.DataFrame(df_dict)\n\n # Set IMAGE_ID as index\n df = df.set_index('IMAGE_ID')\n\n return df, image_df, class_df \n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cyber-meow/Robotic_state_repr_learning | [
"d74fe372bea0b1cf42107450a8c3344a99279e91"
] | [
"utility.py"
] | [
"\n\"\"\"\nUtility functions\n\"\"\"\n\nimport numpy as np\n\n\ndef set_all_args(obj, argdict):\n for k in argdict.keys():\n if hasattr(obj, k):\n setattr(obj, k, argdict[k])\n else:\n print(\"Warning: parameter name {} not found!\".format(k))\n\ndef div0(a,b):\n with np.errstate(divide='ignore', invalid='ignore'):\n c = np.true_divide(a, b)\n c = np.nan_to_num(c)\n return c\n\n"
] | [
[
"numpy.errstate",
"numpy.true_divide",
"numpy.nan_to_num"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yashbonde/GAN-textures | [
"7e9bfa61c474f17812bad2430e63a2383ac85067"
] | [
"mgan.py"
] | [
"\nimport os\nimport time\nimport random\nimport argparse\nimport numpy as np\nfrom tqdm import trange\nfrom types import SimpleNamespace\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\nfrom torch.utils import tensorboard as tb\n\nfrom maze import Maze\n\nos.makedirs(\"images\", exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=256, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=128, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--size\", type=int, default=32, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=1, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=50, help=\"interval between image sampling\")\nparser.add_argument(\"--model_folder\", type = str, default = \"models\", help = \"folder to save model to\")\nparser.add_argument(\"--save_every\", type = int, default = 50, help = \"interval to save the models\")\nparser.add_argument(\"--seed\", type = int, default = 4, help = \"seed value\")\nopt = parser.parse_args()\nopt = SimpleNamespace(**vars(opt), img_size = opt.size + int(opt.size % 2 == 0))\n\ncuda = True if torch.cuda.is_available() else False\nos.makedirs(opt.model_folder, exist_ok=True)\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\nset_seed(opt.seed)\n\nclass Mazze():\n def __init__(self, w, h):\n self.width = int(w/2)\n self.height = int(h/2)\n\n def __len__(self):\n # just any random number\n return 10000\n\n def __getitem__(self, *args, **kwargs):\n m = Maze().generate(width=self.width, height=self.height)\n m = m._to_str_matrix(_np = True)\n m = torch.from_numpy(m)\n return m\n\n def __iter__(self):\n m = Maze().generate(width=self.width, height=self.height)\n m = m._to_str_matrix(_np = True)\n m = torch.from_numpy(m)\n yield m\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n self.init_size = opt.img_size // 4\n self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))\n\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, z, _pad = False):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n if _pad:\n new_size = img.size(2) + 1\n img_padded = torch.ones((img.size(0), 1, new_size, new_size))\n img_padded[:, :, 1:, 1:] = img\n return img_padded\n else:\n return img\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2 ** 4\n self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())\n\n def forward(self, img):\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n return validity\n\n\n# Loss function\nadversarial_loss = torch.nn.BCELoss()\n\n# Initialize generator and discriminator\ngenerator = Generator()\ndiscriminator = Discriminator()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n adversarial_loss.cuda()\n\n# Initialize weights\ngenerator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\n\n# print(f\"Generator: {generator}\")\n# print(f\"Discriminator: {discriminator}\")\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n# z = Variable(Tensor(np.random.normal(0, 1, (opt.batch_size, opt.latent_dim))))\n# print(f\"Latent: {z.size()}\")\n# gen_imgs = generator(z)\n# print(f\"gen_imgs: {gen_imgs.size()}\")\n# gen_dis = discriminator(gen_imgs.detach())\n# print(gen_dis.size())\n\n# Configure data loader\nm = Mazze(opt.size, opt.size)\n# dataloader = DataLoader(m, batch_size = opt.batch_size)\n# for i,m in enumerate(dataloader):\n# if i: break\n# print(m.shape)\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n# ----------\n# Training\n# ----------\n\nwith tb.SummaryWriter(log_dir = opt.model_folder, flush_secs = 20) as sw:\n try:\n global_step = 0\n for epoch in range(opt.n_epochs):\n size = (len(m) // opt.batch_size) + int(len(m) % opt.batch_size != 1)\n pbar = trange(size)\n dataloader = DataLoader(m, batch_size = opt.batch_size)\n for i, imgs in zip(pbar, dataloader):\n b, x, y = imgs.shape\n imgs = imgs.view(b, 1, x, y)\n\n # Adversarial ground truths\n valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)\n fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(Tensor))[:,:,1:,1:]\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise as generator input\n z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))\n\n # Generate a batch of images\n gen_imgs = generator(z)\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = adversarial_loss(discriminator(gen_imgs), valid)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # run the discriminator on real and generated values\n gen_dis = discriminator(gen_imgs.detach())\n real_dis = discriminator(real_imgs)\n\n # Measure discriminator's ability to classify real from generated samples\n real_loss = adversarial_loss(real_dis, valid)\n fake_loss = adversarial_loss(gen_dis, fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n\n pbar.set_description(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())\n )\n\n batches_done = epoch * len(dataloader) + i\n if batches_done % opt.sample_interval == 0:\n print(f\"saving images at: images/{batches_done}.png\")\n images_to_save = gen_imgs.data[:25]\n images_to_save[images_to_save >= 0.5] = 1\n images_to_save[images_to_save < 0.5] = 0\n save_image(images_to_save, f\"images/{batches_done}.png\", nrow=5, normalize=True)\n\n if batches_done % opt.save_every == 0:\n print(f\"Saving model in folder: {opt.model_folder}\")\n torch.save(generator.state_dict(), f\"{opt.model_folder}/generator.pt\")\n torch.save(discriminator.state_dict(), f\"{opt.model_folder}/discriminator.pt\")\n\n sw.add_scalar(\"Dis-Loss/Real\", real_loss.item(), global_step = global_step, walltime = time.time())\n sw.add_scalar(\"Dis-Loss/Fake\", fake_loss.item(), global_step = global_step, walltime = time.time())\n sw.add_scalar(\"Dis-Loss/Total\", d_loss.item(), global_step = global_step, walltime = time.time())\n sw.add_scalar(\"Gen-Loss/Loss\", g_loss.item(), global_step = global_step, walltime = time.time())\n\n gen_img_sharpened = gen_imgs[0].clone()\n gen_img_sharpened[gen_img_sharpened >= 0.5] = 1\n gen_img_sharpened[gen_img_sharpened < 0.5] = 0\n sw.add_image(\"Generated\", gen_img_sharpened, global_step = global_step, walltime = time.time())\n sw.add_image(\"Real\", real_imgs[0], global_step = global_step, walltime = time.time())\n\n global_step += 1\n except KeyboardInterrupt:\n pass\n\nprint(f\"Saving model in folder: {opt.model_folder}\")\ntorch.save(generator.state_dict(), f\"{opt.model_folder}/generator.pt\")\ntorch.save(discriminator.state_dict(), f\"{opt.model_folder}/discriminator.pt\")\n"
] | [
[
"torch.nn.Dropout2d",
"torch.utils.data.DataLoader",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.from_numpy",
"torch.nn.Sigmoid",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.BCELoss",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.Tanh",
"numpy.random.normal",
"torch.nn.Upsample"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Gerzer/coremltools | [
"47e2010a68668bd1960dca040f5f87c0e66a0cbd",
"47e2010a68668bd1960dca040f5f87c0e66a0cbd"
] | [
"examples/neural_network_inference/tensorflow_converter/Tensorflow_1/linear_mnist_train.py",
"coremltools/converters/nnssa/frontend/tensorflow/graph_pass/constant_propagation.py"
] | [
"from __future__ import print_function\nimport os\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data # Import MINST data\n\ndef linear_model(x):\n # x is the image input\n # mnist data image of shape 28*28=784\n\n # Set model weights\n W = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n\n # Construct model\n pred = tf.nn.softmax(tf.matmul(x, W) + b)\n\n # Return the last op\n return pred\n\n\ndef train():\n mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n # instantiate the model in the default graph\n x = tf.placeholder(tf.float32, [None, 784])\n\n print('image_input: ', x) \n #print 'image_input: ', x\n pred = linear_model(x)\n #print 'pred output:', pred\n\n print('pred output:', pred)\n\n # Add training components to it\n # 0-9 digits recognition => 10 classes\n y = tf.placeholder(tf.float32, [None, 10])\n\n # Define training hyper-parameters\n learning_rate = 0.01\n training_epochs = 25\n batch_size = 100\n display_step = 1\n\n # Define Cross Entropy loss\n cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n # Use Gradient Descent\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n\n # Use a saver to save checkpoints\n saver = tf.train.Saver()\n # Training starts here\n with tf.Session() as sess:\n sess.run(init)\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Fit training using batch data\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,\n y: batch_ys})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if (epoch+1) % display_step == 0:\n print((\"Epoch: {:04d} , cost= {:.9f}\").format(epoch+1,avg_cost))\n #print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost)\n print('Training Done. Now save the checkpoint...')\n #print 'Training Done. Now save the checkpoint...'\n save_dir = './checkpoints'\n save_path = os.path.join(save_dir, 'model.ckpt')\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n save_path = saver.save(sess, save_path)\n tf.train.write_graph(sess.graph, './', 'model.pbtxt')\n\n\nif __name__ == '__main__':\n\n # Read the data\n train()\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nimport tensorflow as tf\n\nfrom ...graph_pass.delete_constant import delete_unnecessary_constant_nodes\nfrom ....commons import builtins\nfrom ....commons.parse import numpy_val_to_builtin_val\nfrom ....commons.basic_graph_ops import const_determined_nodes\n\n\ndef constant_propagation(nnssa):\n # we are going to rely on the tensorflow graph to perform constant\n # propagation. We construct a new graph comprising of only the\n # constant nodes.\n\n from tensorflow.core.framework import graph_pb2\n from tensorflow.core.framework import node_def_pb2\n new_graph = graph_pb2.GraphDef()\n constant_nodes = set()\n constant_node_num_outputs = {}\n for f in nnssa.functions.values():\n generated_nodes = [k for k, v in f.graph.items() if v.original_node is None]\n const_nodes_in_this_graph = const_determined_nodes(f.graph, set(generated_nodes))\n # we can only run TF on nodes with outputs since we must evaluate\n # tensors and not ops\n const_nodes_in_this_graph = [\n i for i in const_nodes_in_this_graph if f.graph[i].op != \"NoOp\"\n ]\n constant_nodes = constant_nodes.union(set(const_nodes_in_this_graph))\n\n # topological sort const nodes\n topsort = []\n topsort_set = set()\n while len(const_nodes_in_this_graph) > 0:\n for n in const_nodes_in_this_graph:\n if len(set(f.graph[n].inputs).difference(topsort_set)) == 0:\n topsort.append(n)\n topsort_set.add(n)\n\n const_nodes_in_this_graph = set(const_nodes_in_this_graph).difference(topsort_set)\n\n for node in topsort:\n new_node = node_def_pb2.NodeDef()\n new_node.CopyFrom(f.graph[node].original_node)\n if '_class' in new_node.attr:\n del new_node.attr['_class']\n del new_node.input[:]\n new_node.input.extend(f.graph[node].inputs)\n if '_output_shapes' in f.graph[node].attr:\n constant_node_num_outputs[node] = len(f.graph[node].attr['_output_shapes'])\n else:\n constant_node_num_outputs[node] = 1\n new_graph.node.extend([new_node])\n result = {}\n constant_nodes = list(constant_nodes)\n try:\n if len(constant_nodes) > 0:\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(new_graph, name=\"\")\n with tf.compat.v1.Session(graph=graph) as sess:\n query_list = []\n for c in constant_nodes:\n for j in range(constant_node_num_outputs[c]):\n query_list.append(c + ':' + str(j))\n result_list = sess.run(query_list)\n result = {query_list[i]: result_list[i] for i in range(len(query_list))}\n print(query_list)\n for f in nnssa.functions.values():\n for k, v in f.graph.items():\n if k in constant_node_num_outputs:\n if constant_node_num_outputs[k] == 1:\n result_entry = k + ':0'\n try:\n v.value, v.datatype = numpy_val_to_builtin_val(result[result_entry])\n except:\n print(result_entry)\n print(result[result_entry])\n else:\n values = [\n result[k + ':' + str(i)]\n for i in range(constant_node_num_outputs[k])\n ]\n try:\n npval = [numpy_val_to_builtin_val(i) for i in values]\n v.value = [val[0] for val in npval]\n v.datatype = builtins.tuple(tuple([val[1] for val in npval]))\n except:\n print(values)\n except:\n print(\"Constant Propagation pass failed\")\n\n delete_unnecessary_constant_nodes(nnssa)\n"
] | [
[
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.train.write_graph"
],
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.Session",
"tensorflow.core.framework.node_def_pb2.NodeDef",
"tensorflow.core.framework.graph_pb2.GraphDef"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mlopstemplates/Ignitedemo | [
"9a8329d8aaa4c82b0f322b6e677df5b1769050ea"
] | [
"code/train/train.py"
] | [
"import os\nimport argparse\nimport itertools\nimport numpy as np\nimport joblib\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score\nfrom sklearn.model_selection import train_test_split\n\nfrom azureml.core import Dataset, Run\nrun = Run.get_context()\n\n\ndef log_confusion_matrix_image(cm, labels, normalize=False, log_name='confusion_matrix', title='Confusion matrix', cmap=plt.cm.Blues):\n '''\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n '''\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\", color='white' if cm[i, j] > thresh else 'black')\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n run.log_image(log_name, plot=plt)\n plt.savefig(os.path.join('outputs', '{0}.png'.format(log_name)))\n\n\ndef log_confusion_matrix(cm, labels):\n # log confusion matrix as object\n cm_json = {\n 'schema_type': 'confusion_matrix',\n 'schema_version': 'v1',\n 'data': {\n 'class_labels': labels,\n 'matrix': cm.tolist()\n }\n }\n run.log_confusion_matrix('confusion_matrix', cm_json)\n\n # log confusion matrix as image\n log_confusion_matrix_image(cm, labels, normalize=False, log_name='confusion_matrix_unnormalized', title='Confusion matrix')\n\n # log normalized confusion matrix as image\n log_confusion_matrix_image(cm, labels, normalize=True, log_name='confusion_matrix_normalized', title='Normalized confusion matrix')\n\n\ndef main(args):\n # create the outputs folder\n os.makedirs('outputs', exist_ok=True)\n\n # Log arguments\n run.log('Kernel type', np.str(args.kernel))\n run.log('Penalty', np.float(args.penalty))\n\n # Load iris dataset\n X, y = datasets.load_iris(return_X_y=True)\n\n # dividing X,y into train and test data\n x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=223)\n data = {'train': {'X': x_train, 'y': y_train},\n 'test': {'X': x_test, 'y': y_test}}\n\n # train a SVM classifier\n svm_model = SVC(kernel=args.kernel, C=args.penalty, gamma='scale').fit(data['train']['X'], data['train']['y'])\n svm_predictions = svm_model.predict(data['test']['X'])\n\n # accuracy for X_test\n accuracy = svm_model.score(data['test']['X'], data['test']['y'])\n print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy))\n run.log('Accuracy', np.float(accuracy))\n\n # precision for X_test\n precision = precision_score(svm_predictions, data[\"test\"][\"y\"], average='weighted')\n print('Precision of SVM classifier on test set: {:.2f}'.format(precision))\n run.log('precision', precision)\n\n # recall for X_test\n recall = recall_score(svm_predictions, data[\"test\"][\"y\"], average='weighted')\n print('Recall of SVM classifier on test set: {:.2f}'.format(recall))\n run.log('recall', recall)\n\n # f1-score for X_test\n f1 = f1_score(svm_predictions, data[\"test\"][\"y\"], average='weighted')\n print('F1-Score of SVM classifier on test set: {:.2f}'.format(f1))\n run.log('f1-score', f1)\n\n # create a confusion matrix\n labels = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']\n labels_numbers = [0, 1, 2]\n cm = confusion_matrix(y_test, svm_predictions, labels_numbers)\n log_confusion_matrix(cm, labels)\n\n # files saved in the \"outputs\" folder are automatically uploaded into run history\n model_file_name = \"model.pkl\"\n joblib.dump(svm_model, os.path.join('outputs', model_file_name))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--kernel', type=str, default='rbf', help='Kernel type to be used in the algorithm')\n parser.add_argument('--penalty', type=float, default=1.0, help='Penalty parameter of the error term')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args=args)\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.str",
"sklearn.metrics.precision_score",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"sklearn.svm.SVC",
"sklearn.metrics.f1_score",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"sklearn.metrics.recall_score",
"numpy.float",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ktian08/6784-drugs | [
"7c3ae9f65ce60b031008b0026bb9b954575315fa",
"7c3ae9f65ce60b031008b0026bb9b954575315fa",
"7c3ae9f65ce60b031008b0026bb9b954575315fa"
] | [
"datasetIO.py",
"collect_crossvalidation_results.py",
"get_candidate_features.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nAndrew D. Rouillard\nComputational Biologist\nTarget Sciences\nGSK\[email protected]\n\"\"\"\n\nimport os\nimport gzip\nimport pickle\nimport numpy as np\nimport dataclasses as dc\n\ndef load_datasetinfo(datasetspath):\n dataset_info = []\n with open(datasetspath, mode='rt', encoding=\"utf-8\", errors=\"surrogateescape\") as fr:\n fields = [x.strip() for x in fr.readline().split('\\t')]\n for line in fr:\n entries = [x.strip() for x in line.split('\\t')]\n dataset_info.append({field:entry for field,entry in zip(fields,entries)})\n return dataset_info\n\ndef save_datasetinfo(datasetspath, dataset_infos):\n fields = sorted(dataset_infos[0].keys())\n with open(datasetspath, mode='wt', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join(fields) + '\\n')\n for dataset_info in dataset_infos:\n entries = [dataset_info[field] for field in fields]\n fw.write('\\t'.join([entry if type(entry)==str else '{0:1.6g}'.format(entry) for entry in entries]) + '\\n')\n\ndef append_datasetinfo(datasetspath, dataset_info):\n fields = sorted(dataset_info.keys())\n entries = [dataset_info[field] for field in fields]\n if not os.path.exists(datasetspath):\n with open(datasetspath, mode='wt', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join(fields) + '\\n')\n with open(datasetspath, mode='at', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join([entry if type(entry)==str else '{0:1.6g}'.format(entry) for entry in entries]) + '\\n')\n\ndef load_examples(examplespath):\n examples = set()\n with open(examplespath, mode='rt', encoding='utf-8', errors='surrogateescape') as fr:\n fr.readline()\n for line in fr:\n examples.add(line.split('\\t', maxsplit=1)[0].strip())\n return examples\n\ndef load_clusterassignments(clusterassignmentspath):\n if '.pickle' in clusterassignmentspath:\n with open(clusterassignmentspath, 'rb') as fr:\n return pickle.load(fr)\n else:\n item_cluster = {}\n with open(clusterassignmentspath, mode='rt', encoding='utf-8', errors='surrogateescape') as fr:\n fr.readline()\n for line in fr:\n item, cluster = [x.strip() for x in line.split('\\t')]\n item_cluster[item] = int(cluster)\n return item_cluster\n\ndef save_clusterassignments(clusterassignmentspath, item_cluster, itemname):\n if '.pickle' in clusterassignmentspath:\n with open(clusterassignmentspath, 'wb') as fw:\n pickle.dump(item_cluster, fw)\n else:\n with open(clusterassignmentspath, mode='wt', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join([itemname, 'cluster']) + '\\n')\n for item, cluster in item_cluster.items():\n fw.write('\\t'.join([item, str(cluster)]) + '\\n')\n\ndef load_datamatrix(datasetpath, delimiter='\\t', dtype='float64', getmetadata=True, getmatrix=True):\n if '.pickle' in datasetpath:\n with open(datasetpath, 'rb') as fr:\n return pickle.load(fr)\n else:\n if '.gz' in datasetpath:\n openfunc = gzip.open\n else:\n openfunc = open\n with openfunc(datasetpath, mode='rt', encoding=\"utf-8\", errors=\"surrogateescape\") as fr:\n rowmeta = {}\n columnmeta = {}\n rowlabels = []\n entries = [x.strip() for x in fr.readline().split(delimiter)]\n skipcolumns = sum([entry=='#' for entry in entries]) + 1\n columnname = entries[skipcolumns-1]\n columnlabels = np.array(entries[skipcolumns:], dtype='object')\n firstentry = entries[0]\n skiprows = 1\n if getmetadata:\n while firstentry == '#':\n entries = [x.strip() for x in fr.readline().split(delimiter)]\n columnmetaname = entries[skipcolumns-1].split('/')[-1]\n if columnmetaname.lower() != 'na':\n columnmeta[columnmetaname] = np.array(entries[skipcolumns:], dtype='object')\n firstentry = entries[0]\n skiprows += 1\n rowname = firstentry\n rowmetanames = entries[1:skipcolumns]\n if len(rowmetanames) > 0:\n rowmetanames[-1] = rowmetanames[-1].split('/')[0]\n rowmetaname_idx = {}\n for i, rowmetaname in enumerate(rowmetanames):\n if rowmetaname.lower() != 'na':\n rowmeta[rowmetaname] = []\n rowmetaname_idx[rowmetaname] = i\n for line in fr:\n entries = [x.strip() for x in line.split(delimiter, maxsplit=skipcolumns)[:skipcolumns]]\n rowlabels.append(entries.pop(0))\n for rowmetaname, idx in rowmetaname_idx.items():\n rowmeta[rowmetaname].append(entries[idx])\n rowlabels = np.array(rowlabels, dtype='object')\n for rowmetaname, rowmetavalues in rowmeta.items():\n rowmeta[rowmetaname] = np.array(rowmetavalues, dtype='object')\n else:\n while firstentry == '#':\n entries = [x.strip() for x in fr.readline().split(delimiter)]\n firstentry = entries[0]\n skiprows += 1\n rowname = firstentry\n for line in fr:\n rowlabels.append(line.split(delimiter, maxsplit=1)[0].strip())\n rowlabels = np.array(rowlabels, dtype='object')\n if getmatrix:\n matrix = np.loadtxt(datasetpath, dtype=dtype, delimiter=delimiter, skiprows=skiprows,\n usecols=range(skipcolumns,len(columnlabels)+skipcolumns), ndmin=2)\n else:\n matrix = np.zeros((0,0), dtype=dtype)\n matrixname = rowname + '_' + columnname + '_associations_from_' + datasetpath\n return dc.datamatrix(rowname, rowlabels, columnname, columnlabels, matrixname, matrix, rowmeta, columnmeta)\n\ndef save_datamatrix(datasetpath, dm):\n if '.pickle' in datasetpath:\n with open(datasetpath, 'wb') as fw:\n pickle.dump(dm, fw)\n else:\n if '.gz' in datasetpath:\n openfunc = gzip.open\n else:\n openfunc = open\n np.savetxt(datasetpath.replace('.txt', '.temp.txt'), dm.matrix, fmt='%1.6g', delimiter='\\t', newline='\\n')\n with openfunc(datasetpath, mode='wt', encoding=\"utf-8\", errors=\"surrogateescape\") as fw, openfunc(datasetpath.replace('.txt', '.temp.txt'), 'rt') as fr:\n rowmeta_names_and_dtypes = [(k,v.dtype) for k,v in dm.rowmeta.items()]\n spacers = ['#' for x in range(len(rowmeta_names_and_dtypes)+1)]\n fw.write('\\t'.join(spacers + [dm.columnname] + dm.columnlabels.tolist()) + '\\n')\n for columnmetaname, columnmetadata in dm.columnmeta.items():\n if columnmetadata.dtype == 'object':\n fw.write('\\t'.join(spacers + [columnmetaname] + columnmetadata.tolist()) + '\\n')\n else:\n fw.write('\\t'.join(spacers + [columnmetaname] + ['{0:1.6g}'.format(x) for x in columnmetadata]) + '\\n')\n fw.write('\\t'.join([dm.rowname] + [k for k,t in rowmeta_names_and_dtypes] + ['na/na'] + ['na' for i in range(dm.shape[1])]) + '\\n')\n for i, line in enumerate(fr):\n rowmetadata = [dm.rowmeta[k][i] if t=='object' else '{0:1.6g}'.format(dm.rowmeta[k][i]) for k,t in rowmeta_names_and_dtypes]\n fw.write('\\t'.join([dm.rowlabels[i]] + rowmetadata + ['na']) + '\\t' + line)\n os.remove(datasetpath.replace('.txt', '.temp.txt'))\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nAndrew D. Rouillard\nComputational Biologist\nTarget Sciences\nGSK\[email protected]\n\"\"\"\n\nimport os\nimport copy\nimport numpy as np\nimport datasetIO\nimport dataclasses\nimport modelevaluation\n\nvalidation_reps = 200\nvalidation_folds = 5\n\nclassifier_cutoff = 'mcc_cutoff'\nclassifier_stats = np.array(['p', 'n', 'ap', 'an', 'pp', 'pn', 'tp', 'fp', 'tn', 'fn', 'tpr', 'fpr', 'auroc', 'fnr', 'tnr',\n 'mcr', 'acc', 'fdr', 'ppv', 'auprc', 'fomr', 'npv', 'plr', 'nlr', 'dor', 'drr', 'darr',\n 'mrr', 'marr', 'f1s', 'mcc', 'fnlp'], dtype='object')\n\n# classifier stats for each of 200 repetitions of cross-validation\nstat_rep = dataclasses.datamatrix(rowname='classifier_performance_stat',\n rowlabels=classifier_stats.copy(),\n rowmeta={},\n columnname='validation_rep',\n columnlabels=np.array(['Rep'+str(x) for x in range(validation_reps)], dtype='object'),\n columnmeta={'validation_folds':np.zeros(validation_reps, dtype='int64')},\n matrixname='crossvalidation_classifier_performance_stats_across_validation_reps',\n matrix=np.zeros((classifier_stats.size, validation_reps), dtype='float64'))\n\n# classifier stats for each of 200reps*5folds=1000 train-test cycles\nstat_fold = dataclasses.datamatrix(rowname='classifier_performance_stat',\n rowlabels=classifier_stats.copy(),\n rowmeta={},\n columnname='validation_rep_and_fold',\n columnlabels=np.full(validation_reps*validation_folds, '', dtype='object'),\n columnmeta={'validation_rep':np.zeros(validation_reps*validation_folds, dtype='int64'),\n 'validation_fold':np.zeros(validation_reps*validation_folds, dtype='int64'),\n 'num_features':np.zeros(validation_reps*validation_folds, dtype='int64'),\n 'features':np.full(validation_reps*validation_folds, '', dtype='object'),\n 'model_type':np.full(validation_reps*validation_folds, '', dtype='object')},\n matrixname='crossvalidation_classifier_performance_stats_across_validation_reps_and_folds',\n matrix=np.zeros((classifier_stats.size, validation_reps*validation_folds), dtype='float64'))\n\n# iterate over cross-validation reps and folds\nprint('iterating over cross-validation reps and folds...', flush=True)\nR = 0\nfor validation_rep in range(validation_reps):\n Y = np.zeros(0, dtype='bool')\n P = np.zeros(0, dtype='float64')\n F = 0\n for validation_fold in range(validation_folds):\n gene_model_path = 'datasets/useful_features/rep{0!s}_fold{1!s}/gene_model_selected.txt.gz'.format(validation_rep, validation_fold) \n stat_model_path = 'datasets/useful_features/rep{0!s}_fold{1!s}/stat_model_selected.txt.gz'.format(validation_rep, validation_fold) \n if os.path.exists(gene_model_path):\n \n # load predictions for validation and unlabelled examples\n print('loading predictions for validation and unlabelled examples...', flush=True)\n gene_model = datasetIO.load_datamatrix(gene_model_path)\n stat_model = datasetIO.load_datamatrix(stat_model_path)\n isunknown = gene_model.rowmeta['class'] == 'unknown'\n Yf = gene_model.rowmeta['class'][~isunknown] == 'positive'\n Pf = gene_model.matrix[~isunknown,:].reshape(-1)\n \n # evaluate performance of predictions on individual fold\n print('evaluating performance of predictions on individual fold...', flush=True)\n stat_cut = modelevaluation.get_classifier_performance_stats(Y=Yf, P=Pf, classifier_stats=classifier_stats, plot_curves=False, get_priority_cutoffs=True)\n stat_fold.matrix[:,validation_rep*validation_folds+validation_fold] = stat_cut.matrix[:,stat_cut.columnmeta[classifier_cutoff]].reshape(-1)\n stat_fold.columnmeta['validation_rep'][validation_rep*validation_folds+validation_fold] = validation_rep\n stat_fold.columnmeta['validation_fold'][validation_rep*validation_folds+validation_fold] = validation_fold\n stat_fold.columnmeta['num_features'][validation_rep*validation_folds+validation_fold] = stat_model.columnmeta['num_features'][0]\n stat_fold.columnmeta['features'][validation_rep*validation_folds+validation_fold] = stat_model.columnmeta['features'][0]\n stat_fold.columnmeta['model_type'][validation_rep*validation_folds+validation_fold] = stat_model.columnmeta['model_type'][0]\n stat_fold.columnlabels[validation_rep*validation_folds+validation_fold] = 'Rep{0!s}Fold{1!s}'.format(validation_rep,validation_fold)\n print(' rep {0:1.3g} fold {1:1.3g} auroc {2:1.3g} auprc {3:1.3g}'.format(validation_rep, validation_fold, stat_fold.select('auroc',[])[validation_rep*validation_folds+validation_fold], stat_fold.select('auprc',[])[validation_rep*validation_folds+validation_fold]), flush=True)\n print(' model_type:{0} num_features:{1} features:{2}'.format(stat_model.columnmeta['model_type'][0], stat_model.columnmeta['num_features'][0], stat_model.columnmeta['features'][0]), flush=True) \n print(' inner_loop auroc {0:1.3g} auprc {1:1.3g}'.format(stat_model.select('auroc_mean',[]), stat_model.select('auprc_mean',[])), flush=True)\n \n # collect fold predictions\n print('collecting fold predictions...', flush=True) \n Y = np.append(Y, Yf)\n P = np.append(P, Pf)\n if F == 0:\n gene_fold = copy.deepcopy(gene_model)\n else:\n all_genes = np.union1d(gene_fold.rowlabels, gene_model.rowlabels)\n gene_fold = gene_fold.tolabels(rowlabels=all_genes, fillvalue=np.nan)\n gene_model = gene_model.tolabels(rowlabels=all_genes, fillvalue=np.nan)\n gene_fold.append(gene_model, 1)\n F += 1\n \n if F > 0:\n # evaluate performance of predictions on all folds\n print('evaluating performance of predictions on all folds...', flush=True)\n stat_cut = modelevaluation.get_classifier_performance_stats(Y=Y, P=P, classifier_stats=classifier_stats, plot_curves=False, get_priority_cutoffs=True)\n stat_rep.matrix[:,validation_rep] = stat_cut.matrix[:,stat_cut.columnmeta[classifier_cutoff]].reshape(-1)\n stat_rep.columnmeta['validation_folds'][validation_rep] = F\n gene_fold.matrix = np.nanmean(gene_fold.matrix, 1, keepdims=True)\n gene_fold.columnlabels = np.array(['Rep'+str(validation_rep)], dtype='object')\n gene_fold.columnmeta = {'validation_folds':np.array([F], dtype='int64')}\n gene_fold.columnname = 'validation_rep'\n gene_fold.updatesizeattribute()\n gene_fold.updateshapeattribute()\n if R == 0:\n gene_rep = copy.deepcopy(gene_fold)\n else:\n all_genes = np.union1d(gene_fold.rowlabels, gene_rep.rowlabels)\n gene_fold = gene_fold.tolabels(rowlabels=all_genes, fillvalue=np.nan)\n gene_rep = gene_rep.tolabels(rowlabels=all_genes, fillvalue=np.nan)\n gene_rep.append(gene_fold, 1)\n R += 1\n print(' rep {0:1.3g} folds {1:1.3g} auroc {2:1.3g} auprc {3:1.3g}'.format(validation_rep, F, stat_rep.select('auroc',[])[validation_rep], stat_rep.select('auprc',[])[validation_rep]), flush=True)\n\nstat_fold.discard((stat_fold.matrix==0).all(0), 1)\nstat_rep.discard((stat_rep.matrix==0).all(0), 1)\n\n# save cross-validation performance stats for folds and reps\nprint('saving cross-validation performance stats for folds and reps...', flush=True)\ndatasetIO.save_datamatrix('datasets/useful_features/stat_fold_crossvalidation.pickle', stat_fold)\ndatasetIO.save_datamatrix('datasets/useful_features/stat_fold_crossvalidation.txt.gz', stat_fold)\ndatasetIO.save_datamatrix('datasets/useful_features/stat_rep_crossvalidation.pickle', stat_rep)\ndatasetIO.save_datamatrix('datasets/useful_features/stat_rep_crossvalidation.txt.gz', stat_rep)\ndatasetIO.save_datamatrix('datasets/useful_features/gene_rep_crossvalidation.pickle', gene_rep)\ndatasetIO.save_datamatrix('datasets/useful_features/gene_rep_crossvalidation.txt.gz', gene_rep)\n\nprint('done.', flush=True)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nAndrew D. Rouillard\nComputational Biologist\nTarget Sciences\nGSK\[email protected]\n\"\"\"\n\nimport os\nimport numpy as np\nimport copy\nimport datasetIO\nimport dataclasses\n\ndef main():\n \n # load class examples\n print('loading class examples...', flush=True)\n class_examples_folder = 'targets/pharmaprojects'\n class_examples = {'positive':datasetIO.load_examples('{0}/positive.txt'.format(class_examples_folder)),\n 'negative':datasetIO.load_examples('{0}/negative.txt'.format(class_examples_folder)),\n 'unknown':datasetIO.load_examples('{0}/unknown.txt'.format(class_examples_folder))}\n \n # load dataset info\n print('loading dataset info...', flush=True)\n dataset_info_path = 'datasets/harmonizome/dataset_info.txt'\n dataset_infos = datasetIO.load_datasetinfo(dataset_info_path)\n \n # specify results folder\n print('specifying results folder...', flush=True)\n results_folder = 'datasets/candidate_features'\n if not os.path.exists(results_folder):\n os.mkdir(results_folder)\n \n # iterate over datasets\n print('iterating over datasets...', flush=True)\n for dataset_info in dataset_infos:\n \n# # just work with hpatissuesmrna for testing/debugging the pipeline\n# if dataset_info['abbreviation'] != 'hpatissuesmrna_cleaned':\n# print('skipping {0}. not in testing set...'.format(dataset_info['abbreviation']), flush=True)\n# continue\n \n # check if another python instance is already working on this dataset\n if os.path.exists('{0}/{1}_in_progress.txt'.format(results_folder, dataset_info['abbreviation'])):\n print('skipping {0}. already in progress...'.format(dataset_info['abbreviation']), flush=True)\n continue\n \n # log start of processing\n with open('{0}/{1}_in_progress.txt'.format(results_folder, dataset_info['abbreviation']), mode='wt', encoding='utf-8', errors='surrogateescape') as fw:\n print('working on {0}...'.format(dataset_info['abbreviation']), flush=True)\n fw.write('working on {0}...'.format(dataset_info['abbreviation']))\n \n # load dataset\n print('loading dataset...', flush=True)\n gene_atb = datasetIO.load_datamatrix(datasetpath=dataset_info['path'])\n dataset_info['original_genes'] = gene_atb.shape[0]\n dataset_info['original_features'] = gene_atb.shape[1]\n \n # decide feature normalization\n print('deciding feature normalization...', flush=True)\n if ('standardized' in dataset_info['abbreviation'] or 'cleaned' in dataset_info['abbreviation']) and (gene_atb.matrix == 0).sum()/gene_atb.size <= 0.5:\n # dataset is many-valued and filled-in\n print(' dataset is many-valued and filled-in...', flush=True)\n print(' z-scoring features...', flush=True)\n dataset_info['feature_normalization'] = 'z-score'\n mnv = np.nanmean(gene_atb.matrix, axis=0, keepdims=True)\n sdv = np.nanstd(gene_atb.matrix, axis=0, keepdims=True)\n gene_atb.matrix = (gene_atb.matrix - mnv)/sdv\n gene_atb.columnmeta['mean'] = mnv.reshape(-1)\n gene_atb.columnmeta['stdv'] = sdv.reshape(-1)\n else:\n # dataset is binary or tertiary or sparse\n print(' dataset is binary, tertiary, or sparse...', flush=True)\n print(' no feature normalization...', flush=True)\n dataset_info['feature_normalization'] = 'none'\n \n # assign class labels to genes\n print('assigning class labels to genes...', flush=True)\n gene_atb.rowmeta['class'] = np.full(gene_atb.shape[0], 'unknown', dtype='object')\n gene_atb.rowmeta['class'][np.in1d(gene_atb.rowlabels, list(class_examples['positive']))] = 'positive'\n gene_atb.rowmeta['class'][np.in1d(gene_atb.rowlabels, list(class_examples['negative']))] = 'negative'\n \n # add dataset mean and stdv as features\n print('adding dataset mean and stdv as features...', flush=True)\n gene_stat = dataclasses.datamatrix(rowname=gene_atb.rowname,\n rowlabels=gene_atb.rowlabels.copy(),\n rowmeta=copy.deepcopy(gene_atb.rowmeta),\n columnname=gene_atb.columnname,\n columnlabels=np.array(['mean', 'stdv'], dtype='object'),\n columnmeta={},\n matrixname=gene_atb.matrixname,\n matrix=np.append(gene_atb.matrix.mean(1, keepdims=True), gene_atb.matrix.std(1, keepdims=True), 1))\n gene_atb.append(gene_stat, 1)\n gene_atb.columnmeta['isrowstat'] = np.in1d(gene_atb.columnlabels, gene_stat.columnlabels)\n del gene_stat\n \n # identify features with little information about labelled examples\n print('identifying features with little information about labelled examples...', flush=True)\n isunknown = gene_atb.rowmeta['class'] == 'unknown'\n tobediscarded = np.logical_or.reduce(((gene_atb.matrix[~isunknown,:] != 0).sum(axis=0) < 3, (gene_atb.matrix[~isunknown,:] != 1).sum(axis=0) < 3, np.isnan(gene_atb.matrix[~isunknown,:]).any(axis=0)))\n if tobediscarded.any():\n # discard features\n print(' discarding {0!s} features. {1!s} features remaining...'.format(tobediscarded.sum(), (~tobediscarded).sum()), flush=True)\n gene_atb.discard(tobediscarded, axis=1)\n else:\n # keep all features\n print(' no features to discard. {0!s} features remaining...'.format(gene_atb.shape[1]), flush=True)\n \n # save if dataset has content\n print('saving if dataset has content...', flush=True)\n if gene_atb.shape[0] == 0 or gene_atb.shape[1] == 0:\n # no content\n print(' nothing to save...', flush=True)\n else:\n # save candidate features\n print(' saving {0!s} candidate features...'.format(gene_atb.shape[1]), flush=True)\n dataset_info['path'] = '{0}/{1}.txt.gz'.format(results_folder, dataset_info['abbreviation'])\n dataset_info['candidate_genes'] = gene_atb.shape[0]\n dataset_info['candidate_features'] = gene_atb.shape[1]\n dataset_info['positive_examples'] = (gene_atb.rowmeta['class'] == 'positive').sum()\n dataset_info['negative_examples'] = (gene_atb.rowmeta['class'] == 'negative').sum()\n dataset_info['unknown_examples'] = (gene_atb.rowmeta['class'] == 'unknown').sum()\n datasetIO.save_datamatrix(dataset_info['path'], gene_atb)\n datasetIO.append_datasetinfo('{0}/dataset_info.txt'.format(results_folder), dataset_info)\n \n print('done.', flush=True)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.zeros"
],
[
"numpy.union1d",
"numpy.full",
"numpy.append",
"numpy.nanmean",
"numpy.array",
"numpy.zeros"
],
[
"numpy.isnan",
"numpy.in1d",
"numpy.full",
"numpy.nanmean",
"numpy.nanstd",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
0xLiso/DeepLearningFromScratch | [
"997e94953b9e5e1ffd8c38af9277e7925e0b4ea7"
] | [
"solutions/python/Lesson02-03/Operation.py"
] | [
"import numpy as np\n\nfrom Tensor import Tensor\n\n\nclass Operation:\n\tresult = None\n\tdef forward(self):\n\t\traise NotImplementedError\n\tdef backward(self, gradOutput: Tensor):\n\t\traise NotImplementedError\n\n\nclass Negative(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\t\n\n\tdef forward(self):\n\t\tself.result = -self.A \n\n\nclass Add(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tself.result = self.A + self.B\n\n\nclass Substract(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tself.result = self.A - self.B\n\nclass Divide(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\tdef forward(self):\n\t\tself.result = self.A/self.B\n\n\nclass Multiply(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\tdef forward(self):\n\t\tself.result = self.A * self.B\n\n\nclass Sum(Operation):\n\tdef __init__(self, A: Tensor,axis:int = -1):\n\t\tself.A = A\n\t\tself.axis = axis\n\n\tdef forward(self):\n\t\tself.result = np.sum(self.A,self.axis)\n\nclass MatMul(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tself.result = np.matmul( self.A , self.B )\n\n\n\nclass MatMulNaive(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tcshape=(self.A.shape[0],self.B.shape[1])\n\t\tC=Tensor([x for x in range(np.prod(cshape))]).reshape(cshape)\n\t\tfor i in range(0, self.A.shape[0]):\n\t\t\tfor j in range(0, self.B.shape[1]):\n\t\t\t\tC[i,j]=0\n\t\t\t\tfor k in range(0,self.A.shape[1]):\n\t\t\t\t\tC[i,j]+=self.A[i,k]*self.B[k,j]\n\t\tself.result = C\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.matmul",
"numpy.sum",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OliverSchmitz/lue | [
"da097e8c1de30724bfe7667cc04344b6535b40cd"
] | [
"source/data_model/python/test/lue_test/test_case.py"
] | [
"import os\nimport shlex\nimport subprocess\nimport unittest\nimport numpy\nimport lue\nimport lue_test\n\n\nclass TestCase(unittest.TestCase):\n\n @classmethod\n def dataset_name(self,\n module_name,\n filename):\n return \"{}.lue\".format(\n os.path.join(os.path.dirname(module_name), filename))\n\n\n def assertArraysEqual(self,\n lhs,\n rhs):\n self.assertEqual(lhs.dtype, rhs.dtype)\n try:\n numpy.testing.assert_equal(lhs, rhs)\n except AssertionError as exception:\n self.fail(str(exception))\n\n\n @classmethod\n def add_method(cls,\n method):\n \"\"\"\n Binds the `method` passed in to the class.\n This is a convenience function to use when adding test methods to\n test cases programmatically at runtime.\n \"\"\"\n setattr(cls, method.__name__, method)\n\n\n @classmethod\n def create_dataset(cls,\n name):\n \"\"\"\n Create dataset, removing an existing dataset first\n \"\"\"\n lue_test.remove_file_if_existant(name)\n\n return lue.create_dataset(name)\n\n\n @classmethod\n def relative_pathname(cls,\n directory_pathname,\n filename):\n \"\"\"\n Return a relative pathname to *filename*, given that the test module\n is located in *directory_pathname*.\n \"\"\"\n return os.path.join(\n lue_test.relative_pathname(__file__, directory_pathname),\n filename)\n\n\n def assertDatasetIsValid(self,\n dataset):\n \"\"\"\n Validate *dataset*\n \"\"\"\n\n if isinstance(dataset, str):\n self.assertTrue(os.path.exists(dataset_pathname))\n dataset = lue.open_dataset(dataset_pathname)\n\n try:\n lue.assert_is_valid(dataset, fail_on_warning=True)\n except RuntimeError as exception:\n self.fail(\"dataset {} is not valid\\n{}\".format(\n dataset.pathname, exception))\n"
] | [
[
"numpy.testing.assert_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xrael/orbit-predictor | [
"9ff616122be0b33e43144bd32a055e1f676801dd"
] | [
"tests/test_numerical_predictor.py"
] | [
"import datetime as dt\nfrom unittest import TestCase\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_almost_equal\nimport pytest\n\nfrom orbit_predictor.locations import ARG\nfrom orbit_predictor.predictors.numerical import (\n J2Predictor, InvalidOrbitError, R_E_KM, is_sun_synchronous\n)\n\n\nclass J2PredictorTests(TestCase):\n def setUp(self):\n # Converted to classical orbital elements\n sma = 6780\n ecc = 0.001\n inc = 28.5\n raan = 67.0\n argp = 355.0\n ta = 250.0\n\n self.epoch = dt.datetime(2000, 1, 1, 12, 0)\n\n self.predictor = J2Predictor(sma, ecc, inc, raan, argp, ta, self.epoch)\n\n def test_propagate_eci(self):\n # Data from GMAT\n expected_position = np.array([2085.9287615146, -6009.5713894563, -2357.3802307070])\n expected_velocity = np.array([6.4787522759177, 3.2366136616580, -2.5063420188165])\n\n when_utc = self.epoch + dt.timedelta(hours=3)\n\n position_eci, velocity_eci = self.predictor.propagate_eci(when_utc)\n\n assert_allclose(position_eci, expected_position, rtol=1e-2)\n assert_allclose(velocity_eci, expected_velocity, rtol=1e-2)\n\n def test_get_next_pass(self):\n pass_ = self.predictor.get_next_pass(ARG)\n\n assert pass_.sate_id == \"<custom>\"\n\n\nclass SunSynchronousTests(TestCase):\n def test_invalid_parameters_raises_error(self):\n self.assertRaises(\n InvalidOrbitError, J2Predictor.sun_synchronous, alt_km=400, inc_deg=90)\n self.assertRaises(\n InvalidOrbitError, J2Predictor.sun_synchronous, alt_km=10000, ecc=0)\n\n def test_sun_sync_from_altitude_and_eccentricity(self):\n # Vallado 3rd edition, example 11-2\n expected_inc = 98.6\n\n pred = J2Predictor.sun_synchronous(alt_km=800, ecc=0)\n self.assertAlmostEqual(pred.get_position().osculating_elements[2], expected_inc, places=2)\n\n def test_sun_sync_from_altitude_and_inclination(self):\n # Hardcoded from our implementation\n expected_ecc = 0.14546153131334466\n\n pred = J2Predictor.sun_synchronous(alt_km=475, inc_deg=97)\n self.assertAlmostEqual(pred.get_position().osculating_elements[1], expected_ecc, places=14)\n\n def test_sun_sync_from_eccentricity_and_inclination(self):\n # Vallado 3rd edition, example 11-2\n expected_sma = 7346.846\n\n pred = J2Predictor.sun_synchronous(ecc=0.2, inc_deg=98.6)\n self.assertAlmostEqual(pred.get_position().osculating_elements[0], expected_sma, places=1)\n\n def test_sun_sync_delta_true_anomaly_has_expected_anomaly_and_epoch(self):\n date = dt.datetime.today().date()\n ltan_h = 12\n expected_ref_epoch = dt.datetime(date.year, date.month, date.day, 12)\n\n for expected_ta_deg in [-30, 0, 30]:\n pred = J2Predictor.sun_synchronous(\n alt_km=800, ecc=0, date=date, ltan_h=ltan_h, ta_deg=expected_ta_deg\n )\n\n ta_deg = pred.get_position(expected_ref_epoch).osculating_elements[5]\n self.assertAlmostEqual(ta_deg, expected_ta_deg % 360, places=12)\n\n def test_sun_sync_delta_true_anomaly_non_circular(self):\n date = dt.datetime.today().date()\n ltan_h = 12\n expected_ref_epoch = dt.datetime(date.year, date.month, date.day, 12)\n\n for expected_ta_deg in [-30, 30]:\n pred = J2Predictor.sun_synchronous(\n alt_km=475, ecc=0.1455, date=date, ltan_h=ltan_h, ta_deg=expected_ta_deg\n )\n\n ta_deg = pred.get_position(expected_ref_epoch).osculating_elements[5]\n self.assertAlmostEqual(ta_deg, expected_ta_deg % 360, places=12)\n\n\n# Test data from Wertz et al. \"Space Mission Engineering: The New SMAD\" (2011), table 9-13\[email protected](\"orbits,days,inc_deg,expected_h\", [\n (14, 1, 28, 817.14),\n (43, 3, 28, 701.34),\n (29, 2, 28, 645.06),\n (59, 4, 28, 562.55),\n (74, 5, 28, 546.31),\n (15, 1, 28, 482.25),\n])\ndef test_repeated_groundtrack_sma(orbits, days, inc_deg, expected_h):\n pred = J2Predictor.repeating_ground_track(orbits=orbits, days=days, ecc=0.0, inc_deg=inc_deg)\n\n assert_almost_equal(pred.get_position().osculating_elements[0] - R_E_KM, expected_h, decimal=0)\n\n\ndef test_is_sun_sync_returns_false_for_non_sun_sync_orbit():\n pred1 = J2Predictor(7000, 0, 0, 0, 0, 0, dt.datetime.now())\n\n assert not is_sun_synchronous(pred1)\n\n\ndef test_is_sun_sync_detects_almost_sun_sync_orbit():\n pred2 = J2Predictor(R_E_KM + 460, 0.001, 97.4, 0, 0, 0, dt.datetime.now())\n\n assert not is_sun_synchronous(pred2)\n assert is_sun_synchronous(pred2, rtol=1e-1)\n\n\ndef test_is_sun_sync_returns_true_for_sun_sync_orbit():\n pred1 = J2Predictor.sun_synchronous(alt_km=500, ecc=0)\n pred2 = J2Predictor.sun_synchronous(alt_km=500, inc_deg=97)\n pred3 = J2Predictor.sun_synchronous(ecc=0, inc_deg=97)\n\n assert is_sun_synchronous(pred1)\n assert is_sun_synchronous(pred2)\n assert is_sun_synchronous(pred3)\n"
] | [
[
"numpy.array",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PingjunChen/ThyroidRule | [
"1213cf0783c84da5917ca903c156e5e4280402f5"
] | [
"utils/wsi_util.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport os, sys, pdb\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.utils.data as data\nfrom torchvision import datasets, transforms\n\nimport numpy as np\nimport cv2, copy, time\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import binary_fill_holes, binary_closing, binary_dilation\nfrom skimage import transform, morphology, filters\nfrom skimage.morphology import remove_small_objects\n\nimport loader\n\n\ndef refine_prediction(pred, thresh, min_size):\n binary = pred > thresh # Threshold\n binary = binary_dilation(binary, structure=np.ones((5,5))) # dilation to connect\n binary = binary_fill_holes(binary) # Fill holes\n # Remove outliers\n mask = remove_small_objects(binary, min_size=min_size, connectivity=8)\n\n return mask\n\n\ndef pred_patches(cls_model, patches, args):\n preds = []\n\n start_time = time.time()\n slide_dset = loader.PatchDataset(patches)\n dset_loader = data.DataLoader(slide_dset, batch_size=args.batch_size, shuffle=False, num_workers=4)\n with torch.no_grad():\n for ind, inputs in enumerate(dset_loader):\n inputs = inputs.type(torch.FloatTensor)\n inputs = Variable(inputs.cuda())\n outputs = cls_model(inputs)\n _, batch_preds = outputs.max(1)\n preds.extend(batch_preds.cpu().tolist())\n\n elapsed_time = time.time() - start_time\n print(\"{} seconds for {} patches.\".format(elapsed_time, patches.shape[0]))\n \n return preds\n\n\ndef slide_pred(cls_model, split_arr, patches, wsi_dim, args):\n # Save prediction results\n RAW_SIZE = 299\n SIZE1, SIZE2, SIZE4 = int(RAW_SIZE/4), int(RAW_SIZE/2), RAW_SIZE\n class_num = 3\n result_map = np.zeros((wsi_dim[0], wsi_dim[1], class_num), dtype=np.uint8)\n\n # Prediction\n if patches.shape[0] > 0: # exist\n preds = pred_patches(cls_model, patches, args)\n for coor, pred in zip(split_arr, preds):\n result_map[coor[0]+SIZE1:coor[0]+SIZE1+SIZE2, coor[1]+SIZE1:coor[1]+SIZE1+SIZE2, pred] = 255\n\n # Resize results\n args.img_cnt_ratio = 2**(args.cnt_level - args.img_level)\n s_height, s_width = wsi_dim[0] / args.img_cnt_ratio, wsi_dim[1] / args.img_cnt_ratio\n result_img = transform.resize(result_map, (s_height, s_width))\n\n MINIMUM_REGION_SIZE = (np.floor(SIZE2 / args.img_cnt_ratio))**2\n # refine unsure\n unsure_min_size = MINIMUM_REGION_SIZE * args.unsure_grid_num\n result_img[:,:,1] = refine_prediction(result_img[:,:,1], thresh=args.unsure_prob, min_size=unsure_min_size)\n unsure_img = (result_img[:,:,1] * 255).astype(np.uint8)\n _, unsure_cnts, _ = cv2.findContours(unsure_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n max_unsure = 0\n if len(unsure_cnts) != 0:\n max_unsure_cnt = max(unsure_cnts, key = cv2.contourArea)\n max_unsure = cv2.contourArea(max_unsure_cnt)\n unsure_num_grid = int(max_unsure / MINIMUM_REGION_SIZE)\n # refine malignant\n yes_min_size = MINIMUM_REGION_SIZE * args.malignant_num_min\n result_img[:,:,2] = refine_prediction(result_img[:,:,2], thresh=args.malignant_prob, min_size=yes_min_size)\n yes_img = (result_img[:,:,2] * 255).astype(np.uint8)\n _, yes_cnts, _ = cv2.findContours(yes_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n max_yes = 0\n if len(yes_cnts) != 0:\n max_yes_cnt = max(yes_cnts, key = cv2.contourArea)\n max_yes = cv2.contourArea(max_yes_cnt)\n yes_num_grid = int(max_yes / MINIMUM_REGION_SIZE)\n\n # Rule-based diagnosis\n diag_flag = thyroid_diagnosis_rule(unsure_num_grid, yes_num_grid, args)\n return result_img, diag_flag\n\n\ndef thyroid_diagnosis_rule(unsure_num, yes_num, args):\n diag_flag = \"Benign\"\n # if there are unsure regions, take it unsure\n if unsure_num != 0:\n diag_flag = \"Unsure\"\n else:\n # if malignant regions large than 16, take it as malignant\n if yes_num >= args.malignant_num_max:\n diag_flag = \"Malignant\"\n # if malignant regions num between 2-16, take is as Unsure\n elif yes_num >= args.malignant_num_min and yes_num < args.malignant_num_max:\n diag_flag = \"Unsure\"\n else:\n diag_flag = \"Benign\"\n return diag_flag\n\n\n\ndef pred_feas(cls_model, patches, args):\n probs, logits, vecs = [], [], []\n\n def fea_hook(module, input, output):\n t_fea2048 = input[0].cpu().tolist()\n cur_vecs = copy.deepcopy(t_fea2048)\n t_logit3 = output.cpu().tolist()\n cur_logits = copy.deepcopy(t_logit3)\n t_fea3 = F.softmax(output, dim=-1)\n cur_fea3 = t_fea3.cpu().tolist()\n cur_probs = copy.deepcopy(cur_fea3)\n\n vecs.extend(cur_vecs)\n logits.extend(cur_logits)\n probs.extend(cur_probs)\n\n cls_model.fc.register_forward_hook(fea_hook)\n slide_dset = loader.PatchDataset(patches)\n dset_loader = data.DataLoader(slide_dset, batch_size=args.batch_size, shuffle=False, num_workers=4)\n with torch.no_grad():\n for ind, inputs in enumerate(dset_loader):\n inputs = inputs.type(torch.FloatTensor)\n inputs = Variable(inputs.cuda())\n outputs = cls_model(inputs)\n\n return probs, logits, vecs\n\n\n\ndef sort_by_prob(BBoxes, ClsProbs, ClsLogits, FeaVecs):\n fea_dict = {}\n norm_prob_list = [ele[0] for ele in ClsProbs]\n sorting_indx = np.argsort(norm_prob_list)\n fea_dict[\"bbox\"] = [BBoxes[ind] for ind in sorting_indx]\n fea_dict[\"prob\"] = [ClsProbs[ind] for ind in sorting_indx]\n fea_dict[\"logit\"] = [ClsLogits[ind] for ind in sorting_indx]\n fea_dict[\"feaVec\"] = [FeaVecs[ind] for ind in sorting_indx]\n\n return fea_dict\n\n\ndef gen_slide_feas(cls_model, split_arr, patches, wsi_dim, args):\n RAW_SIZE = 299\n SIZE1, SIZE2, SIZE4 = int(RAW_SIZE/4), int(RAW_SIZE/2), RAW_SIZE\n class_num = 3\n\n FeasList = []\n BBoxes, ClsProbs, ClsLogits, FeaVecs = [], [], [], []\n # Prediction\n if patches.shape[0] > 0: # exist\n ClsProbs, ClsLogits, FeaVecs = pred_feas(cls_model, patches, args)\n for coor in split_arr:\n cur_x, cur_y = coor[1]+SIZE1, coor[0]+SIZE1\n cur_bbox = [cur_x, cur_y, SIZE2, SIZE2]\n BBoxes.append(cur_bbox)\n\n fea_dict = sort_by_prob(BBoxes, ClsProbs, ClsLogits, FeaVecs)\n return fea_dict\n"
] | [
[
"torch.nn.functional.softmax",
"torch.utils.data.DataLoader",
"numpy.ones",
"torch.no_grad",
"numpy.floor",
"numpy.argsort",
"scipy.ndimage.binary_fill_holes",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
shawnkx/NAT-with-Local-AT | [
"16b29e068ad568e3a020f1309e140aa0dbc38479",
"16b29e068ad568e3a020f1309e140aa0dbc38479"
] | [
"Mask-Predict/fairseq/data/language_pair_context_mask.py",
"Mask-Predict/fairseq/data/dictionary.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport numpy as np\nimport torch\nimport random\n\nfrom fairseq import utils\n\nfrom . import data_utils, FairseqDataset\n\n\ndef collate(\n samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False,\n input_feeding=True,\n):\n if len(samples) == 0:\n return {}\n\n def merge(key, is_list=False):\n if is_list:\n res = []\n for i in range(len(samples[0][key])):\n res.append(data_utils.collate_tokens(\n [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,\n ))\n return res\n else:\n return data_utils.collate_tokens(\n [s[key] for s in samples], pad_idx, eos_idx, left_pad=False,\n )\n\n is_target_list = isinstance(samples[0]['dec_target'], list)\n return {\n 'id': torch.LongTensor([s['id'] for s in samples]),\n 'ntokens': sum(s['ntokens'] for s in samples),\n 'net_input': {\n 'src_tokens': merge('enc_source'),\n 'src_lengths': torch.LongTensor([\n s['enc_source'].numel() for s in samples\n ]),\n 'prev_output_tokens': merge('dec_source')\n },\n 'target': merge('dec_target', is_target_list),\n 'nsentences': samples[0]['enc_source'].size(0),\n }\n\n \"\"\"id = torch.LongTensor([s['id'] for s in samples])\n src_tokens = merge('source', left_pad=left_pad_source)\n # sort by descending source length\n src_lengths = torch.LongTensor([s['source'].numel() for s in samples])\n src_lengths, sort_order = src_lengths.sort(descending=True)\n id = id.index_select(0, sort_order)\n src_tokens = src_tokens.index_select(0, sort_order)\n prev_output_tokens = None\n target = None\n if samples[0].get('target', None) is not None:\n target = merge('target', left_pad=left_pad_target)\n target = target.index_select(0, sort_order)\n ntokens = sum(len(s['target']) for s in samples)\n if input_feeding:\n # we create a shifted version of targets for feeding the\n # previous output token(s) into the next decoder step\n prev_output_tokens = merge(\n 'target',\n left_pad=left_pad_target,\n move_eos_to_beginning=True,\n )\n prev_output_tokens = prev_output_tokens.index_select(0, sort_order)\n else:\n ntokens = sum(len(s['source']) for s in samples)\n \n batch = {\n 'id': id,\n 'ntokens': ntokens,\n 'net_input': {\n 'src_tokens': src_tokens,\n 'src_lengths': src_lengths,\n },\n 'target': target,\n 'nsentences': samples[0]['source'].size(0),\n }\n if prev_output_tokens is not None:\n batch['net_input']['prev_output_tokens'] = prev_output_tokens\n return batch\"\"\"\n\n\nclass LanguagePairContextMask(FairseqDataset):\n \"\"\"\n A pair of torch.utils.data.Datasets.\n Args:\n src (torch.utils.data.Dataset): source dataset to wrap\n src_sizes (List[int]): source sentence lengths\n src_dict (~fairseq.data.Dictionary): source vocabulary\n tgt (torch.utils.data.Dataset, optional): target dataset to wrap\n tgt_sizes (List[int], optional): target sentence lengths\n tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary\n left_pad_source (bool, optional): pad source tensors on the left side.\n Default: ``True``\n left_pad_target (bool, optional): pad target tensors on the left side.\n Default: ``False``\n max_source_positions (int, optional): max number of tokens in the source\n sentence. Default: ``1024``\n max_target_positions (int, optional): max number of tokens in the target\n sentence. Default: ``1024``\n shuffle (bool, optional): shuffle dataset elements before batching.\n Default: ``True``\n input_feeding (bool, optional): create a shifted version of the targets\n to be passed into the model for input feeding/teacher forcing.\n Default: ``True``\n \"\"\"\n\n def __init__(\n self, src, src_sizes, src_dict,\n tgt=None, tgt_sizes=None, tgt_dict=None,\n left_pad_source=True, left_pad_target=False,\n max_source_positions=2048, max_target_positions=2048,\n shuffle=True, input_feeding=True,\n dynamic_length=False,\n mask_range=False,\n train=True,\n seed=None,\n len_context=None\n ):\n if tgt_dict is not None:\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n self.src = src\n self.tgt = tgt\n self.src_sizes = np.array(src_sizes)\n self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n self.left_pad_source = left_pad_source\n self.left_pad_target = left_pad_target\n self.max_source_positions = max_source_positions\n self.max_target_positions = max_target_positions\n self.shuffle = shuffle\n self.input_feeding = input_feeding\n self.dynamic_length = dynamic_length\n self.mask_range = mask_range\n self.train = train\n self.seed = seed\n self.random = np.random.RandomState(seed)\n self.seed = seed\n self.len_context = len_context\n\n def __getitem__(self, index):\n enc_source, dec_source, dec_target, ntokens = self._make_source_target(self.src[index], self.tgt[index])\n return {'id': index, 'enc_source': enc_source, 'dec_source': dec_source, 'dec_target': dec_target, 'ntokens': ntokens}\n\n def __len__(self):\n return len(self.src)\n\n def _make_source_target(self, source, target):\n if self.dynamic_length:\n max_len = 3 * len(source) // 2 + 1\n target = target.new((target.tolist() + ([self.tgt_dict.eos()] * (max_len - len(target))))[:max_len])\n \n min_num_masks = 1\n \n enc_source = source\n target = target.new([self.tgt_dict.bos()] + target.tolist())\n dec_source = target.new(target.tolist())\n dec_target_cp = target.new(target.tolist())\n dec_target = target.new([self.tgt_dict.pad()] * len(dec_source))\n \n if self.train:\n if min_num_masks < len(dec_source):\n sample_size = self.random.randint(min_num_masks, len(dec_source))\n else:\n sample_size = len(dec_source)\n\n if self.mask_range:\n start = self.random.randint(len(dec_source) - sample_size + 1)\n ind = list(range(start, start + sample_size))\n else:\n ind = self.random.choice(len(dec_source) , size=sample_size, replace=False)\n \n dec_source[ind] = self.tgt_dict.mask()\n dec_target[ind] = dec_target_cp[ind]\n \n left_context = dec_target.new([self.tgt_dict.bos()] * self.len_context)\n right_context = dec_target.new([self.tgt_dict.eos()] * (self.len_context))\n len_ori_target = dec_target.size(0)\n # print (\"original tokens\", self.tgt_dict.string(dec_target, remove_eos=False))\n dec_target = torch.cat((left_context, dec_target, right_context))\n context_dec_target = torch.cat([dec_target[i:i + len_ori_target] for i in range(self.len_context * 2 + 1)], dim=0)\n context_dec_target = context_dec_target.view(self.len_context * 2 + 1, -1).transpose(0, 1).contiguous().view(-1)\n context_ind = ind * (self.len_context * 2 + 1) + 1\n context_ind = np.concatenate((context_ind, context_ind - 2, context_ind + 2))\n context_ind = context_ind[context_ind >= 0]\n context_ind = context_ind[context_ind < len(context_dec_target)]\n pad_ctx_dec_target = context_dec_target.new([self.tgt_dict.pad()] * len(context_dec_target))\n pad_ctx_dec_target[context_ind] = context_dec_target[context_ind]\n context_dec_target = pad_ctx_dec_target\n else:\n dec_target = dec_target_cp\n dec_source[:] = self.tgt_dict.mask()\n left_context = dec_target.new([self.tgt_dict.bos()] * self.len_context)\n right_context = dec_target.new([self.tgt_dict.eos()] * (self.len_context))\n len_ori_target = dec_target.size(0)\n # print (\"original tokens\", self.tgt_dict.string(dec_target, remove_eos=False))\n dec_target = torch.cat((left_context, dec_target, right_context))\n context_dec_target = torch.cat([dec_target[i:i + len_ori_target] for i in range(self.len_context * 2 + 1)], dim=0)\n context_dec_target = context_dec_target.view(self.len_context * 2 + 1, -1).transpose(0, 1).contiguous().view(-1)\n ntokens = dec_target.ne(self.tgt_dict.pad()).sum(-1).item()\n \n\n # print (\"masked tokens\", self.tgt_dict.string(dec_source), len(dec_source))\n # # print (\"original tokens\", self.tgt_dict.string(dec_target, remove_eos=False), len(dec_target))\n # print(\"context dec target tokens\", self.tgt_dict.string(context_dec_target, remove_eos=False))\n # # print (\"source tokens\", self.src_dict.string(enc_source))\n # exit()\n return enc_source, dec_source, context_dec_target, ntokens\n\n def collater(self, samples):\n \"\"\"Merge a list of samples to form a mini-batch.\n Args:\n samples (List[dict]): samples to collate\n Returns:\n dict: a mini-batch with the following keys:\n - `id` (LongTensor): example IDs in the original input order\n - `ntokens` (int): total number of tokens in the batch\n - `net_input` (dict): the input to the Model, containing keys:\n - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in\n the source sentence of shape `(bsz, src_len)`. Padding will\n appear on the left if *left_pad_source* is ``True``.\n - `src_lengths` (LongTensor): 1D Tensor of the unpadded\n lengths of each source sentence of shape `(bsz)`\n - `prev_output_tokens` (LongTensor): a padded 2D Tensor of\n tokens in the target sentence, shifted right by one position\n for input feeding/teacher forcing, of shape `(bsz,\n tgt_len)`. This key will not be present if *input_feeding*\n is ``False``. Padding will appear on the left if\n *left_pad_target* is ``True``.\n - `target` (LongTensor): a padded 2D Tensor of tokens in the\n target sentence of shape `(bsz, tgt_len)`. Padding will appear\n on the left if *left_pad_target* is ``True``.\n \"\"\"\n return collate(\n samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),\n left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,\n input_feeding=self.input_feeding,\n )\n\n def get_dummy_batch(self, num_tokens, max_positions, src_len=128, tgt_len=128):\n \"\"\"Return a dummy batch with a given number of tokens.\"\"\"\n src_len, tgt_len = utils.resolve_max_positions(\n (src_len, tgt_len),\n max_positions,\n (self.max_source_positions, self.max_target_positions),\n )\n bsz = num_tokens // max(src_len, tgt_len)\n\n enc_source, dec_source, dec_target, ntokens = self._make_source_target(self.src_dict.dummy_sentence(src_len), self.tgt_dict.dummy_sentence(tgt_len))\n\n return self.collater([\n {\n 'id': i,\n 'enc_source': enc_source,\n 'dec_source': dec_source,\n 'dec_target': dec_target,\n 'ntokens': ntokens,\n }\n for i in range(bsz)\n ])\n\n def num_tokens(self, index):\n \"\"\"Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.\"\"\"\n return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n if self.shuffle and self.train and self.seed is None:\n return np.random.permutation(len(self))\n \n indices = np.arange(len(self))\n if self.tgt_sizes is not None:\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\n\n def prefetch(self, indices):\n self.src.prefetch(indices)\n self.tgt.prefetch(indices)\n\n @property\n def supports_prefetch(self):\n return (\n hasattr(self.src, 'supports_prefetch')\n and self.src.supports_prefetch\n and hasattr(self.tgt, 'supports_prefetch')\n and self.tgt.supports_prefetch\n )\n",
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom collections import Counter\nfrom multiprocessing import Pool\nimport os\n\nimport torch\n\nfrom fairseq.tokenizer import tokenize_line\nfrom fairseq.binarizer import safe_readline\nfrom fairseq.data import data_utils\n\n\nclass Dictionary(object):\n \"\"\"A mapping from symbols to consecutive integers\"\"\"\n\n def __init__(\n self,\n pad='<pad>',\n eos='</s>',\n unk='<unk>',\n bos='<s>',\n mask='<mask>',\n extra_special_symbols=None,\n ):\n self.unk_word, self.pad_word, self.eos_word, self.mask_word = unk, pad, eos, mask\n self.symbols = []\n self.count = []\n self.indices = {}\n self.bos_index = self.add_symbol(bos)\n self.pad_index = self.add_symbol(pad)\n self.eos_index = self.add_symbol(eos)\n self.unk_index = self.add_symbol(unk)\n self.mask_index = self.add_symbol(mask)\n self.segment_start_index = self.add_symbol('<ss>')\n if extra_special_symbols:\n for s in extra_special_symbols:\n self.add_symbol(s)\n self.nspecial = len(self.symbols)\n\n def __eq__(self, other):\n return self.indices == other.indices\n\n def __getitem__(self, idx):\n if idx < len(self.symbols):\n return self.symbols[idx]\n return self.unk_word\n\n def __len__(self):\n \"\"\"Returns the number of symbols in the dictionary\"\"\"\n return len(self.symbols)\n\n def index(self, sym):\n \"\"\"Returns the index of the specified symbol\"\"\"\n assert isinstance(sym, str)\n if sym in self.indices:\n return self.indices[sym]\n return self.unk_index\n\n def string(self, tensor, bpe_symbol=None, escape_unk=False, remove_eos=True):\n \"\"\"Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n \"\"\"\n if torch.is_tensor(tensor) and tensor.dim() == 2:\n return '\\n'.join(self.string(t, bpe_symbol, escape_unk) for t in tensor)\n\n def token_string(i):\n if i == self.unk():\n return self.unk_string(escape_unk)\n else:\n return self[i]\n if remove_eos:\n sent = ' '.join(token_string(i) for i in tensor if i != self.eos() and i != self.bos())\n else:\n sent = ' '.join(token_string(i) for i in tensor)\n return data_utils.process_bpe_symbol(sent, bpe_symbol)\n\n def unk_string(self, escape=False):\n \"\"\"Return unknown string, optionally escaped as: <<unk>>\"\"\"\n if escape:\n return '<{}>'.format(self.unk_word)\n else:\n return self.unk_word\n\n def add_symbol(self, word, n=1):\n \"\"\"Adds a word to the dictionary\"\"\"\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + n\n return idx\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(n)\n return idx\n\n def update(self, new_dict):\n \"\"\"Updates counts from new dictionary.\"\"\"\n for word in new_dict.symbols:\n idx2 = new_dict.indices[word]\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + new_dict.count[idx2]\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(new_dict.count[idx2])\n\n def finalize(self, threshold=-1, nwords=-1, padding_factor=8):\n \"\"\"Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n if nwords <= 0:\n nwords = len(self)\n\n new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))\n new_symbols = self.symbols[:self.nspecial]\n new_count = self.count[:self.nspecial]\n\n c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:])))\n for symbol, count in c.most_common(nwords - self.nspecial):\n if count >= threshold:\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(count)\n else:\n break\n\n threshold_nwords = len(new_symbols)\n if padding_factor > 1:\n i = 0\n while threshold_nwords % padding_factor != 0:\n symbol = 'madeupword{:04d}'.format(i)\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(0)\n i += 1\n threshold_nwords += 1\n\n assert len(new_symbols) % padding_factor == 0\n assert len(new_symbols) == len(new_indices)\n\n self.count = list(new_count)\n self.symbols = list(new_symbols)\n self.indices = new_indices\n\n def bos(self):\n \"\"\"Helper to get index of beginning-of-sentence symbol\"\"\"\n return self.bos_index\n\n def boseg(self):\n return self.segment_start_index\n\n def pad(self):\n \"\"\"Helper to get index of pad symbol\"\"\"\n return self.pad_index\n\n def eos(self):\n \"\"\"Helper to get index of end-of-sentence symbol\"\"\"\n return self.eos_index\n\n def unk(self):\n \"\"\"Helper to get index of unk symbol\"\"\"\n return self.unk_index\n\n def mask(self):\n \"\"\"Helper to get index of mask symbol\"\"\"\n return self.mask_index\n\n @classmethod\n def load(cls, f, ignore_utf_errors=False):\n \"\"\"Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n \"\"\"\n d = cls()\n d.add_from_file(f, ignore_utf_errors)\n return d\n\n def add_from_file(self, f, ignore_utf_errors=False):\n \"\"\"\n Loads a pre-existing dictionary from a text file and adds its symbols\n to this instance.\n \"\"\"\n if isinstance(f, str):\n try:\n if not ignore_utf_errors:\n with open(f, 'r', encoding='utf-8') as fd:\n self.add_from_file(fd)\n else:\n with open(f, 'r', encoding='utf-8', errors='ignore') as fd:\n self.add_from_file(fd)\n except FileNotFoundError as fnfe:\n raise fnfe\n except UnicodeError:\n raise Exception(\"Incorrect encoding detected in {}, please \"\n \"rebuild the dataset\".format(f))\n return\n\n lines = f.readlines()\n indices_start_line = self._load_meta(lines)\n for line in lines[indices_start_line:]:\n idx = line.rfind(' ')\n if idx == -1:\n raise ValueError(\"Incorrect dictionary format, expected '<token> <cnt>'\")\n word = line[:idx]\n count = int(line[idx + 1:])\n self.indices[word] = len(self.symbols)\n self.symbols.append(word)\n self.count.append(count)\n\n def _save(self, f, kv_iterator):\n if isinstance(f, str):\n os.makedirs(os.path.dirname(f), exist_ok=True)\n with open(f, 'w', encoding='utf-8') as fd:\n return self.save(fd)\n for k, v in kv_iterator:\n print('{} {}'.format(k, v), file=f)\n\n def _get_meta(self):\n return [], []\n\n def _load_meta(self, lines):\n return 0\n\n def save(self, f):\n \"\"\"Stores dictionary into a text file\"\"\"\n ex_keys, ex_vals = self._get_meta()\n self._save(f, zip(ex_keys + self.symbols[self.nspecial:], ex_vals + self.count[self.nspecial:]))\n\n def dummy_sentence(self, length):\n t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()\n t[-1] = self.eos()\n return t\n\n def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True,\n consumer=None, append_eos=True, reverse_order=False):\n words = line_tokenizer(line)\n if reverse_order:\n words = list(reversed(words))\n nwords = len(words)\n ids = torch.IntTensor(nwords + 1 if append_eos else nwords)\n\n for i, word in enumerate(words):\n if add_if_not_exist:\n idx = self.add_symbol(word)\n else:\n idx = self.index(word)\n if consumer is not None:\n consumer(word, idx)\n ids[i] = idx\n if append_eos:\n ids[nwords] = self.eos_index\n return ids\n\n @staticmethod\n def _add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):\n counter = Counter()\n with open(filename, 'r', encoding='utf-8') as f:\n size = os.fstat(f.fileno()).st_size\n chunk_size = size // num_workers\n offset = worker_id * chunk_size\n end = offset + chunk_size\n f.seek(offset)\n if offset > 0:\n safe_readline(f) # drop first incomplete line\n line = f.readline()\n while line:\n for word in tokenize(line):\n counter.update([word])\n counter.update([eos_word])\n if f.tell() > end:\n break\n line = f.readline()\n return counter\n\n @staticmethod\n def add_file_to_dictionary(filename, dict, tokenize, num_workers):\n def merge_result(counter):\n for w, c in counter.items():\n dict.add_symbol(w, c)\n\n if num_workers > 1:\n pool = Pool(processes=num_workers)\n results = []\n for worker_id in range(num_workers):\n results.append(pool.apply_async(\n Dictionary._add_file_to_dictionary_single_worker,\n (filename, tokenize, dict.eos_word, worker_id, num_workers)\n ))\n pool.close()\n pool.join()\n for r in results:\n merge_result(r.get())\n else:\n merge_result(Dictionary._add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))\n\n\nclass TruncatedDictionary(object):\n\n def __init__(self, wrapped_dict, length):\n self.__class__ = type(\n wrapped_dict.__class__.__name__,\n (self.__class__, wrapped_dict.__class__),\n {}\n )\n self.__dict__ = wrapped_dict.__dict__\n self.wrapped_dict = wrapped_dict\n self.length = min(len(self.wrapped_dict), length)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, i):\n if i < self.length:\n return self.wrapped_dict[i]\n return self.wrapped_dict.unk()\n"
] | [
[
"torch.LongTensor",
"torch.cat",
"numpy.concatenate",
"numpy.argsort",
"numpy.array",
"numpy.random.RandomState"
],
[
"torch.is_tensor",
"torch.Tensor",
"torch.IntTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sooftware/Fairseq-Listen-Attend-Spell | [
"9c66b3e7afef8bdcd24c6e71efffc45b8db6ae04"
] | [
"fairseq_las/data/data_utils.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\n\n\ndef calc_mean_invstddev(feature):\n if len(feature.size()) != 2:\n raise ValueError(\"We expect the input feature to be 2-D tensor\")\n mean = feature.mean(0)\n var = feature.var(0)\n # avoid division by ~zero\n eps = 1e-8\n if (var < eps).any():\n return mean, 1.0 / (torch.sqrt(var) + eps)\n return mean, 1.0 / torch.sqrt(var)\n\n\ndef apply_mv_norm(features):\n # If there is less than 2 spectrograms, the variance cannot be computed (is NaN)\n # and normalization is not possible, so return the item as it is\n if features.size(0) < 2:\n return features\n mean, invstddev = calc_mean_invstddev(features)\n res = (features - mean) * invstddev\n return res\n\n\ndef lengths_to_encoder_padding_mask(lengths, batch_first=False):\n \"\"\"\n convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor\n\n Args:\n lengths: a (B, )-shaped tensor\n\n Return:\n max_length: maximum length of B sequences\n encoder_padding_mask: a (max_length, B) binary mask, where\n [t, b] = 0 for t < lengths[b] and 1 otherwise\n\n TODO:\n kernelize this function if benchmarking shows this function is slow\n \"\"\"\n max_lengths = torch.max(lengths).item()\n bsz = lengths.size(0)\n encoder_padding_mask = torch.arange(\n max_lengths\n ).to( # a (T, ) tensor with [0, ..., T-1]\n lengths.device\n ).view( # move to the right device\n 1, max_lengths\n ).expand( # reshape to (1, T)-shaped tensor\n bsz, -1\n ) >= lengths.view( # expand to (B, T)-shaped tensor\n bsz, 1\n ).expand(\n -1, max_lengths\n )\n if not batch_first:\n return encoder_padding_mask.t(), max_lengths\n else:\n return encoder_padding_mask, max_lengths\n\n\ndef encoder_padding_mask_to_lengths(encoder_padding_mask, max_lengths, batch_size, device):\n \"\"\"\n convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor\n\n Conventionally, encoder output contains a encoder_padding_mask, which is\n a 2-D mask in a shape (T, B), whose (t, b) element indicate whether\n encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we\n need to convert this mask tensor to a 1-D tensor in shape (B, ), where\n [b] denotes the valid length of b-th sequence\n\n Args:\n encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,\n indicating all are valid\n Return:\n seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the\n number of valid elements of b-th sequence\n\n max_lengths: maximum length of all sequence, if encoder_padding_mask is\n not None, max_lengths must equal to encoder_padding_mask.size(0)\n\n batch_size: batch size; if encoder_padding_mask is\n not None, max_lengths must equal to encoder_padding_mask.size(1)\n\n device: which device to put the result on\n \"\"\"\n if encoder_padding_mask is None:\n return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)\n\n assert encoder_padding_mask.size(0) == max_lengths, \"max_lengths does not match\"\n assert encoder_padding_mask.size(1) == batch_size, \"batch_size does not match\"\n\n return max_lengths - torch.sum(encoder_padding_mask, dim=0)\n"
] | [
[
"torch.max",
"torch.Tensor",
"torch.sqrt",
"torch.sum",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
colesbury/awkward-1.0 | [
"d036ab18eb54de8a2571d9f179d315ac8ee22119",
"d036ab18eb54de8a2571d9f179d315ac8ee22119",
"d036ab18eb54de8a2571d9f179d315ac8ee22119",
"d036ab18eb54de8a2571d9f179d315ac8ee22119"
] | [
"tests/test_0006-deep-iteration.py",
"tests/test_0074-argsort-and-sort.py",
"tests-cuda/test_0345-cuda-num.py",
"tests/v2/test_1116-project-maskedarrays.py"
] | [
"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport sys\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_iterator():\n content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3]))\n offsets = ak.layout.Index32(np.array([0, 2, 2, 3], \"i4\"))\n array = ak.layout.ListOffsetArray32(offsets, content)\n assert list(content) == [1.1, 2.2, 3.3]\n assert [np.asarray(x).tolist() for x in array] == [[1.1, 2.2], [], [3.3]]\n\n\ndef test_refcount():\n content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3]))\n offsets = ak.layout.Index32(np.array([0, 2, 2, 3], \"i4\"))\n array = ak.layout.ListOffsetArray32(offsets, content)\n\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n iter1 = iter(content)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n x1 = next(iter1)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n iter2 = iter(array)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n x2 = next(iter2)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n del iter1\n del x1\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n del iter2\n del x2\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n",
"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_bool_sort():\n array = ak.layout.NumpyArray(np.array([True, False, True, False, False]))\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n False,\n False,\n False,\n True,\n True,\n ]\n\n\ndef test_keep_None_in_place_test():\n array = ak.Array([[3, 2, 1], [], None, [4, 5]])\n\n assert ak.to_list(ak.argsort(array, axis=1)) == [\n [2, 1, 0],\n [],\n None,\n [0, 1],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1)) == [\n [1, 2, 3],\n [],\n None,\n [4, 5],\n ]\n\n assert ak.to_list(array[ak.argsort(array, axis=1)]) == ak.to_list(\n ak.sort(array, axis=1)\n )\n\n\ndef test_EmptyArray():\n array = ak.layout.EmptyArray()\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n assert str(ak.type(ak.sort(array))) == \"0 * float64\"\n assert str(ak.type(ak.argsort(array))) == \"0 * int64\"\n\n array2 = ak.Array([[], [], []])\n assert ak.to_list(ak.argsort(array2)) == [[], [], []]\n assert str(ak.type(ak.argsort(array2))) == \"3 * var * int64\"\n\n\ndef test_NumpyArray():\n array = ak.layout.NumpyArray(np.array([3.3, 2.2, 1.1, 5.5, 4.4]))\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=False)) == [\n 2,\n 1,\n 0,\n 4,\n 3,\n ]\n assert ak.to_list(ak.argsort(array, axis=0, ascending=False, stable=False)) == [\n 3,\n 4,\n 0,\n 1,\n 2,\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n ]\n assert ak.to_list(ak.sort(array, axis=0, ascending=False, stable=False)) == [\n 5.5,\n 4.4,\n 3.3,\n 2.2,\n 1.1,\n ]\n\n array2 = ak.layout.NumpyArray(np.array([[3.3, 2.2, 4.4], [1.1, 5.5, 3.3]]))\n\n assert ak.to_list(\n ak.sort(array2, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array2, axis=1))\n assert ak.to_list(\n ak.sort(array2, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array2, axis=0))\n\n assert ak.to_list(\n ak.argsort(array2, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array2, 1))\n assert ak.to_list(\n ak.argsort(array2, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array2, 0))\n\n with pytest.raises(ValueError) as err:\n ak.sort(array2, axis=2, ascending=True, stable=False)\n assert str(err.value).startswith(\n \"axis=2 exceeds the depth of the nested list structure (which is 2)\"\n )\n\n\ndef test_IndexedOptionArray():\n array = ak.Array(\n [\n [None, None, 2.2, 1.1, 3.3],\n [None, None, None],\n [4.4, None, 5.5],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n )\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n [-4.4, -5.5, -6.6, 1.1, 3.3],\n [4.4, None, 2.2],\n [5.5, None, 5.5],\n [None, None, None],\n [None, None, None],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=True, stable=False)) == [\n [1.1, 2.2, 3.3, None, None],\n [None, None, None],\n [4.4, 5.5, None],\n [5.5, None, None],\n [-6.6, -5.5, -4.4],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=False, stable=True)) == [\n [3.3, 2.2, 1.1, None, None],\n [None, None, None],\n [5.5, 4.4, None],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=False, stable=False)) == [\n [3.3, 2.2, 1.1, None, None],\n [None, None, None],\n [5.5, 4.4, None],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=True)) == [\n [4, 4, 4, 0, 0],\n [2, 0, 0],\n [3, 1, 2],\n [0, 2, 1],\n [1, 3, 3],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=False)) == [\n [4, 4, 4, 0, 0],\n [2, 0, 0],\n [3, 1, 2],\n [0, 2, 1],\n [1, 3, 3],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=False, stable=True)) == [\n [3, 4, 2, 0, 0],\n [2, 0, 0],\n [4, 1, 4],\n [0, 2, 1],\n [1, 3, 3],\n ]\n assert ak.to_list(ak.argsort(array, axis=0, ascending=False, stable=False)) == [\n [3, 4, 2, 0, 0],\n [2, 0, 0],\n [4, 1, 4],\n [0, 2, 1],\n [1, 3, 3],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=True, stable=True)) == [\n [3, 2, 4, 0, 1],\n [0, 1, 2],\n [0, 2, 1],\n [0, 1, 2],\n [2, 1, 0],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=True, stable=False)) == [\n [3, 2, 4, 0, 1],\n [0, 1, 2],\n [0, 2, 1],\n [0, 1, 2],\n [2, 1, 0],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=False, stable=True)) == [\n [4, 2, 3, 0, 1],\n [0, 1, 2],\n [2, 0, 1],\n [0, 1, 2],\n [0, 1, 2],\n ]\n\n array2 = ak.Array([None, None, 1, -1, 30])\n assert ak.to_list(ak.argsort(array2, axis=0, ascending=True, stable=True)) == [\n 3,\n 2,\n 4,\n 0,\n 1,\n ]\n\n array3 = ak.Array(\n [[2.2, 1.1, 3.3], [], [4.4, 5.5], [5.5], [-4.4, -5.5, -6.6]]\n ).layout\n\n assert ak.to_list(ak.sort(array3, axis=1, ascending=False, stable=False)) == [\n [3.3, 2.2, 1.1],\n [],\n [5.5, 4.4],\n [5.5],\n [-4.4, -5.5, -6.6],\n ]\n\n assert ak.to_list(ak.sort(array3, axis=0, ascending=True, stable=False)) == [\n [-4.4, -5.5, -6.6],\n [],\n [2.2, 1.1],\n [4.4],\n [5.5, 5.5, 3.3],\n ]\n\n # FIXME: Based on NumPy list sorting:\n #\n # array([list([2.2, 1.1, 3.3]), list([]), list([4.4, 5.5]), list([5.5]),\n # list([-4.4, -5.5, -6.6])], dtype=object)\n # np.sort(array, axis=0)\n # array([list([]), list([-4.4, -5.5, -6.6]), list([2.2, 1.1, 3.3]),\n # list([4.4, 5.5]), list([5.5])], dtype=object)\n #\n # the result should be:\n #\n # [[ -4.4, -5.5, -6.6 ],\n # [ 2.2, 1.1, 3.3 ],\n # [ 4.4, 5.5 ],\n # [ 5.5 ],\n # []]\n\n # This can be done following the steps: pad, sort,\n # and dropna to strip off the None's\n #\n array4 = array3.rpad(3, 1)\n assert ak.to_list(array4) == [\n [2.2, 1.1, 3.3],\n [None, None, None],\n [4.4, 5.5, None],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n\n array5 = ak.sort(array4, axis=0, ascending=True, stable=False)\n assert ak.to_list(array5) == [\n [-4.4, -5.5, -6.6],\n [2.2, 1.1, 3.3],\n [4.4, 5.5, None],\n [5.5, None, None],\n [None, None, None],\n ]\n\n array4 = array3.rpad(5, 1)\n assert ak.to_list(array4) == [\n [2.2, 1.1, 3.3, None, None],\n [None, None, None, None, None],\n [4.4, 5.5, None, None, None],\n [5.5, None, None, None, None],\n [-4.4, -5.5, -6.6, None, None],\n ]\n\n array5 = ak.sort(array4, axis=0, ascending=True, stable=False)\n assert ak.to_list(array5) == [\n [-4.4, -5.5, -6.6, None, None],\n [2.2, 1.1, 3.3, None, None],\n [4.4, 5.5, None, None, None],\n [5.5, None, None, None, None],\n [None, None, None, None, None],\n ]\n\n array5 = ak.argsort(array4, axis=0, ascending=True, stable=False)\n assert ak.to_list(array5) == [\n [4, 4, 4, 0, 0],\n [0, 0, 0, 1, 1],\n [2, 2, 1, 2, 2],\n [3, 1, 2, 3, 3],\n [1, 3, 3, 4, 4],\n ]\n\n # FIXME: implement dropna to strip off the None's\n #\n # array6 = array5.dropna(0)\n # assert ak.to_list(array6) == [\n # [ -4.4, -5.5, -6.6 ],\n # [ 2.2, 1.1, 3.3 ],\n # [ 4.4, 5.5 ],\n # [ 5.5 ],\n # []]\n\n content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5]))\n index1 = ak.layout.Index32(np.array([1, 2, 3, 4], dtype=np.int32))\n indexedarray1 = ak.layout.IndexedArray32(index1, content)\n assert ak.to_list(\n ak.argsort(indexedarray1, axis=0, ascending=True, stable=False)\n ) == [0, 1, 2, 3]\n\n index2 = ak.layout.Index64(np.array([1, 2, 3], dtype=np.int64))\n indexedarray2 = ak.layout.IndexedArray64(index2, indexedarray1)\n assert ak.to_list(\n ak.sort(indexedarray2, axis=0, ascending=False, stable=False)\n ) == [5.5, 4.4, 3.3]\n\n index3 = ak.layout.Index32(np.array([1, 2], dtype=np.int32))\n indexedarray3 = ak.layout.IndexedArray32(index3, indexedarray2)\n assert ak.to_list(ak.sort(indexedarray3, axis=0, ascending=True, stable=False)) == [\n 4.4,\n 5.5,\n ]\n\n\ndef test_3d():\n array = ak.layout.NumpyArray(\n np.array(\n [\n # axis 2: 0 1 2 3 4 # axis 1:\n [\n [1.1, 2.2, 3.3, 4.4, 5.5], # 0\n [6.6, 7.7, 8.8, 9.9, 10.10], # 1\n [11.11, 12.12, 13.13, 14.14, 15.15],\n ], # 2\n [\n [-1.1, -2.2, -3.3, -4.4, -5.5], # 3\n [-6.6, -7.7, -8.8, -9.9, -10.1], # 4\n [-11.11, -12.12, -13.13, -14.14, -15.15],\n ],\n ]\n )\n ) # 5\n assert ak.to_list(\n ak.argsort(array, axis=2, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array, 2))\n assert ak.to_list(\n ak.sort(array, axis=2, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array, 2))\n assert ak.to_list(\n ak.argsort(array, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array, 1))\n assert ak.to_list(\n ak.sort(array, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array, 1))\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=False, stable=False)) == [\n [\n [11.11, 12.12, 13.13, 14.14, 15.15],\n [6.6, 7.7, 8.8, 9.9, 10.1],\n [1.1, 2.2, 3.3, 4.4, 5.5],\n ],\n [\n [-1.1, -2.2, -3.3, -4.4, -5.5],\n [-6.6, -7.7, -8.8, -9.9, -10.1],\n [-11.11, -12.12, -13.13, -14.14, -15.15],\n ],\n ]\n\n assert ak.to_list(\n ak.sort(array, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array, 0))\n assert ak.to_list(\n ak.argsort(array, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array, 0))\n\n\ndef test_RecordArray():\n array = ak.Array(\n [\n {\"x\": 0.0, \"y\": []},\n {\"x\": 1.1, \"y\": [1]},\n {\"x\": 2.2, \"y\": [2, 2]},\n {\"x\": 3.3, \"y\": [3, 3, 3]},\n {\"x\": 4.4, \"y\": [4, 4, 4, 4]},\n {\"x\": 5.5, \"y\": [5, 5, 5]},\n {\"x\": 6.6, \"y\": [6, 6]},\n {\"x\": 7.7, \"y\": [7]},\n {\"x\": 8.8, \"y\": []},\n ]\n )\n assert ak.to_list(array) == [\n {\"x\": 0.0, \"y\": []},\n {\"x\": 1.1, \"y\": [1]},\n {\"x\": 2.2, \"y\": [2, 2]},\n {\"x\": 3.3, \"y\": [3, 3, 3]},\n {\"x\": 4.4, \"y\": [4, 4, 4, 4]},\n {\"x\": 5.5, \"y\": [5, 5, 5]},\n {\"x\": 6.6, \"y\": [6, 6]},\n {\"x\": 7.7, \"y\": [7]},\n {\"x\": 8.8, \"y\": []},\n ]\n\n assert ak.to_list(array.layout.sort(-1, True, False)) == {\n \"x\": [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8],\n \"y\": [[], [1], [2, 2], [3, 3, 3], [4, 4, 4, 4], [5, 5, 5], [6, 6], [7], []],\n }\n\n assert ak.to_list(array.layout.sort(-1, False, False)) == {\n \"x\": [8.8, 7.7, 6.6, 5.5, 4.4, 3.3, 2.2, 1.1, 0.0],\n \"y\": [[], [1], [2, 2], [3, 3, 3], [4, 4, 4, 4], [5, 5, 5], [6, 6], [7], []],\n }\n\n assert ak.to_list(array.layout.argsort(-1, True, False)) == {\n \"x\": [0, 1, 2, 3, 4, 5, 6, 7, 8],\n \"y\": [[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3], [0, 1, 2], [0, 1], [0], []],\n }\n\n assert ak.to_list(array.x.layout.argsort(0, True, False)) == [\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n ]\n assert ak.to_list(array.x.layout.argsort(0, False, False)) == [\n 8,\n 7,\n 6,\n 5,\n 4,\n 3,\n 2,\n 1,\n 0,\n ]\n\n array_y = array.y\n assert ak.to_list(array_y) == [\n [],\n [1],\n [2, 2],\n [3, 3, 3],\n [4, 4, 4, 4],\n [5, 5, 5],\n [6, 6],\n [7],\n [],\n ]\n assert ak.to_list(array.y.layout.argsort(0, True, False)) == [\n # FIXME?\n [],\n [1],\n [2, 2],\n [3, 3, 3],\n [4, 4, 4, 4],\n [5, 5, 5],\n [6, 6],\n [7],\n []\n # [],\n # [0],\n # [1, 0],\n # [2, 1, 0],\n # [3, 2, 1, 0],\n # [4, 3, 2],\n # [5, 4],\n # [6],\n # [],\n ]\n\n assert ak.to_list(array.y.layout.argsort(1, True, True)) == [\n [],\n [0],\n [0, 1],\n [0, 1, 2],\n [0, 1, 2, 3],\n [0, 1, 2],\n [0, 1],\n [0],\n [],\n ]\n\n\ndef test_ByteMaskedArray():\n content = ak.from_iter(\n [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False\n )\n mask = ak.layout.Index8(np.array([0, 0, 1, 1, 0], dtype=np.int8))\n array = ak.layout.ByteMaskedArray(mask, content, valid_when=False)\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=False)) == [\n [0, 0, 0],\n [],\n [2, 2, 2, 2],\n None,\n None,\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n [0.0, 1.1, 2.2],\n [],\n [6.6, 7.7, 8.8, 9.9],\n None,\n None,\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=False, stable=False)) == [\n [6.6, 7.7, 8.8],\n [],\n [0.0, 1.1, 2.2, 9.9],\n None,\n None,\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=True, stable=False)) == [\n [0, 1, 2],\n [],\n None,\n None,\n [0, 1, 2, 3],\n ]\n\n assert ak.to_list(array.sort(1, False, False)) == [\n [2.2, 1.1, 0.0],\n [],\n None,\n None,\n [9.9, 8.8, 7.7, 6.6],\n ]\n\n\ndef test_UnionArray():\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter(\n [[\"one\"], [\"two\"], [\"three\"], [\"four\"], [\"five\"]], highlevel=False\n )\n tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n\n with pytest.raises(ValueError) as err:\n ak.sort(array, axis=1, ascending=True, stable=False)\n assert str(err.value).startswith(\"cannot sort UnionArray8_32\")\n\n\ndef test_sort_strings():\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n assert ak.to_list(content1) == [\"one\", \"two\", \"three\", \"four\", \"five\"]\n\n assert ak.to_list(ak.sort(content1, axis=0, ascending=True, stable=False)) == [\n \"five\",\n \"four\",\n \"one\",\n \"three\",\n \"two\",\n ]\n assert ak.to_list(ak.sort(content1, axis=0, ascending=False, stable=False)) == [\n \"two\",\n \"three\",\n \"one\",\n \"four\",\n \"five\",\n ]\n\n\ndef test_sort_bytestrings():\n array = ak.from_iter(\n [b\"one\", b\"two\", b\"three\", b\"two\", b\"two\", b\"one\", b\"three\"], highlevel=False\n )\n assert ak.to_list(array) == [\n b\"one\",\n b\"two\",\n b\"three\",\n b\"two\",\n b\"two\",\n b\"one\",\n b\"three\",\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n b\"one\",\n b\"one\",\n b\"three\",\n b\"three\",\n b\"two\",\n b\"two\",\n b\"two\",\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=True)) == [\n 0,\n 5,\n 2,\n 6,\n 1,\n 3,\n 4,\n ]\n\n\ndef test_sort_zero_length_arrays():\n array = ak.layout.IndexedArray64(\n ak.layout.Index64([]), ak.layout.NumpyArray([1, 2, 3])\n )\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n tags = ak.layout.Index8([])\n index = ak.layout.Index32([])\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n content = ak.from_iter(\n [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False\n )\n mask = ak.layout.Index8([])\n array = ak.layout.ByteMaskedArray(mask, content, valid_when=False)\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n array = ak.layout.NumpyArray([])\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n array = ak.layout.RecordArray([])\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n content = ak.layout.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n starts1 = ak.layout.Index64([])\n stops1 = ak.layout.Index64([])\n offsets1 = ak.layout.Index64(np.array([0]))\n array = ak.layout.ListArray64(starts1, stops1, content)\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n array = ak.layout.ListOffsetArray64(offsets1, content)\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n",
"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport cupy as cp # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_num_1():\n content = ak.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n bitmask = ak.layout.IndexU8(np.array([40, 34], dtype=np.uint8))\n array = ak.Array(ak.layout.BitMaskedArray(bitmask, content, False, 9, False))\n cuda_array = ak.to_kernels(array, \"cuda\")\n assert ak.num(cuda_array, 0) == ak.num(array, 0)\n assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()\n\n\ndef test_num_2():\n content = ak.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n bytemask = ak.layout.Index8(np.array([False, True, False], dtype=np.bool))\n array = ak.Array(ak.layout.ByteMaskedArray(bytemask, content, True))\n cuda_array = ak.to_kernels(array, \"cuda\")\n assert ak.num(cuda_array, 0) == ak.num(array, 0)\n assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()\n\n\ndef test_num_3():\n array = ak.Array(ak.layout.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])))\n cuda_array = ak.to_kernels(array, \"cuda\")\n assert ak.num(cuda_array, 0) == ak.num(array, 0)\n\n\ndef test_num_4():\n array = ak.Array(\n ak.layout.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]))\n )\n cuda_array = ak.to_kernels(array, \"cuda\")\n assert ak.num(cuda_array, 0) == ak.num(array, 0)\n assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()\n\n\ndef test_num_5():\n array = ak.Array(ak.layout.EmptyArray())\n cuda_array = ak.to_kernels(array, \"cuda\")\n assert ak.num(cuda_array, 0) == ak.num(array, 0)\n\n\ndef test_num_6():\n content = ak.layout.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9]))\n array = ak.Array(ak.layout.ListOffsetArray64(offsets, content))\n cuda_array = ak.to_kernels(array, \"cuda\")\n assert ak.num(cuda_array, 0) == ak.num(array, 0)\n assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()\n\n\ndef test_num_7():\n content = ak.layout.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak.layout.IndexU32(np.array([0, 3, 3, 5, 6, 9]))\n array = ak.Array(ak.layout.ListOffsetArrayU32(offsets, content))\n cuda_array = ak.to_kernels(array, \"cuda\")\n assert ak.num(cuda_array, 0) == ak.num(array, 0)\n assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()\n\n\ndef test_num_8():\n content = ak.layout.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak.layout.ListOffsetArray64(offsets, content)\n regulararray = ak.layout.RegularArray(listoffsetarray, 2)\n starts = ak.layout.Index64(np.array([0, 1]))\n stops = ak.layout.Index64(np.array([2, 3]))\n listarray = ak.layout.ListArray64(starts, stops, regulararray)\n\n cuda_listoffsetarray = ak.to_kernels(listoffsetarray, \"cuda\")\n assert ak.num(cuda_listoffsetarray, 0) == ak.num(ak.Array(listoffsetarray), 0)\n assert (\n ak.num(cuda_listoffsetarray, 1).tolist()\n == ak.num(ak.Array(listoffsetarray), 1).tolist()\n )\n\n cuda_regulararray = ak.to_kernels(regulararray, \"cuda\")\n assert ak.num(cuda_regulararray, 0) == ak.num(ak.Array(regulararray), 0)\n assert (\n ak.num(cuda_regulararray, 1).tolist()\n == ak.num(ak.Array(regulararray), 1).tolist()\n )\n\n cuda_listarray = ak.to_kernels(listarray, \"cuda\")\n assert ak.num(cuda_listarray, 0) == ak.num(ak.Array(listarray), 0)\n assert ak.num(cuda_listarray, 1).tolist() == ak.num(ak.Array(listarray), 1).tolist()\n\n content1 = ak.layout.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak.layout.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak.layout.Index32(np.array([0, 3, 3, 5, 6, 9]))\n\n recordarray = ak.Array(\n ak.layout.RecordArray(\n [content1, listoffsetarray, content2, content1],\n keys=[\"one\", \"two\", \"2\", \"wonky\"],\n )\n )\n\n cuda_recordarray = ak.to_kernels(recordarray, \"cuda\")\n assert ak.num(cuda_recordarray, 0).tolist() == ak.num(recordarray, 0).tolist()\n\n content0 = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout\n content = ak.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n unionarray = ak.Array(ak.layout.UnionArray8_32(tags, index, [content0, content1]))\n\n cuda_unionarray = ak.to_kernels(unionarray, \"cuda\")\n assert ak.num(cuda_unionarray, 0) == ak.num(unionarray, 0)\n\n\ndef test_num_9():\n content = ak.layout.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n index = ak.layout.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64))\n indexedarray = ak.Array(ak.layout.IndexedArray32(index, content))\n\n cuda_indexedarray = ak.to_kernels(indexedarray, \"cuda\")\n assert ak.num(cuda_indexedarray, 0) == ak.num(indexedarray, 0)\n\n ioa = ak.Array(\n ak.layout.IndexedOptionArray32(\n ak.layout.Index32([-30, 19, 6, 7, -3, 21, 13, 22, 17, 9, -12, 16]),\n ak.layout.NumpyArray(\n np.array(\n [\n 5.2,\n 1.7,\n 6.7,\n -0.4,\n 4.0,\n 7.8,\n 3.8,\n 6.8,\n 4.2,\n 0.3,\n 4.6,\n 6.2,\n 6.9,\n -0.7,\n 3.9,\n 1.6,\n 8.7,\n -0.7,\n 3.2,\n 4.3,\n 4.0,\n 5.8,\n 4.2,\n 7.0,\n 5.6,\n 3.8,\n ]\n )\n ),\n )\n )\n cuda_ioa = ak.to_kernels(ioa, \"cuda\")\n ak.to_kernels(cuda_ioa, \"cpu\")\n assert ak.num(cuda_ioa, 0) == ak.num(ioa, 0)\n",
"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\nto_list = ak._v2.operations.convert.to_list\n\n\ndef test_bytemaskedarray():\n array = ak._v2.operations.convert.from_iter(\n [[0.0, 1.1, 2.2], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False\n )\n mask2 = ak._v2.index.Index8(np.array([0, 1, 0, 0], dtype=np.int8))\n maskedarray2 = ak._v2.contents.ByteMaskedArray(mask2, array, valid_when=False)\n\n assert to_list(maskedarray2.project()) == [\n [0.0, 1.1, 2.2],\n [5.5],\n [6.6, 7.7, 8.8, 9.9],\n ]\n\n assert to_list(maskedarray2.project(mask2)) == [\n [0.0, 1.1, 2.2],\n [5.5],\n [6.6, 7.7, 8.8, 9.9],\n ]\n\n\ndef test_bitmaskedarray():\n array = ak._v2.operations.convert.from_iter(\n [[0.0, 1.1, 2.2], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False\n )\n mask = ak._v2.index.IndexU8(np.array([0, 1, 0, 0], dtype=np.uint8))\n maskedarray2 = ak._v2.contents.BitMaskedArray(\n mask, array, valid_when=False, length=4, lsb_order=True\n )\n\n assert to_list(maskedarray2.project()) == [\n [0.0, 1.1, 2.2],\n [3.3, 4.4],\n [5.5],\n [6.6, 7.7, 8.8, 9.9],\n ]\n\n\ndef test_unmasked():\n array = ak._v2.operations.convert.from_iter(\n [[0.0, 1.1, 2.2], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False\n )\n unmaskedarray2 = ak._v2.contents.UnmaskedArray(array)\n\n assert to_list(unmaskedarray2.project()) == [\n [0.0, 1.1, 2.2],\n [3.3, 4.4],\n [5.5],\n [6.6, 7.7, 8.8, 9.9],\n ]\n\n\ndef test_indexed():\n array2 = ak._v2.highlevel.Array([1, 2, 3, None, 4, None, None, 5]).layout\n mask2 = ak._v2.index.Index8(np.array([0, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n\n assert to_list(array2.project()) == [1, 2, 3, 4, 5]\n assert to_list(array2.project(mask2)) == [1, 3]\n"
] | [
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.argsort",
"numpy.array",
"numpy.sort"
],
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
naoyam/lbann | [
"d30e053b6f86d1cf8cca1d61c94bbbdbfc4945c4"
] | [
"bamboo/unit_tests/test_unit_layer_squared_difference.py"
] | [
"import functools\nimport operator\nimport os\nimport os.path\nimport sys\nimport numpy as np\n\n# Bamboo utilities\ncurrent_file = os.path.realpath(__file__)\ncurrent_dir = os.path.dirname(current_file)\nsys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))\nimport tools\n\n# ==============================================\n# Objects for Python data reader\n# ==============================================\n# Note: The Python data reader imports this file as a module and calls\n# the functions below to ingest data.\n\n# Data\nnp.random.seed(2019102415)\n_samples = np.random.normal(size=(23,2,7)).astype(np.float32)\n\n# Sample access functions\ndef get_sample(index):\n return _samples[index].reshape(-1)\ndef num_samples():\n return _samples.shape[0]\ndef sample_dims():\n return (2*_samples.shape[-1],)\n\n# ==============================================\n# Setup LBANN experiment\n# ==============================================\n\ndef setup_experiment(lbann):\n \"\"\"Construct LBANN experiment.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n trainer = lbann.Trainer()\n model = construct_model(lbann)\n data_reader = construct_data_reader(lbann)\n optimizer = lbann.NoOptimizer()\n return trainer, model, data_reader, optimizer\n\ndef construct_model(lbann):\n \"\"\"Construct LBANN model.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Input data\n # Note: Sum with weights layers so that gradient checking will\n # verify that error signals are correct.\n slice_size = _samples.shape[-1]\n x0_weights = lbann.Weights(optimizer=lbann.SGD(),\n initializer=lbann.ConstantInitializer(value=0.0),\n name='input0_weights')\n x1_weights = lbann.Weights(optimizer=lbann.SGD(),\n initializer=lbann.ConstantInitializer(value=0.0),\n name='input1_weights')\n x_slice = lbann.Slice(lbann.Input(),\n slice_points=tools.str_list([0, slice_size, 2*slice_size]))\n x0 = lbann.Sum(x_slice,\n lbann.WeightsLayer(weights=x0_weights, dims=str(slice_size)))\n x1 = lbann.Sum(x_slice,\n lbann.WeightsLayer(weights=x1_weights, dims=str(slice_size)))\n x0_lbann = x0\n x1_lbann = x1\n\n # Objects for LBANN model\n obj = []\n metrics = []\n callbacks = []\n\n # ------------------------------------------\n # Data-parallel layout\n # ------------------------------------------\n\n # LBANN implementation\n x0 = x0_lbann\n x1 = x1_lbann\n y = lbann.SquaredDifference(x0, x1, data_layout='data_parallel')\n z = lbann.L2Norm2(y)\n obj.append(z)\n metrics.append(lbann.Metric(z, name='data-parallel layout'))\n\n # NumPy implementation\n vals = []\n for i in range(num_samples()):\n x = get_sample(i).astype(np.float64)\n x0 = x[:slice_size]\n x1 = x[slice_size:]\n y = (x1-x0)**2\n z = tools.numpy_l2norm2(y)\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metrics[-1].name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # ------------------------------------------\n # Model-parallel layout\n # ------------------------------------------\n\n # LBANN implementation\n x0 = x0_lbann\n x1 = x1_lbann\n y = lbann.SquaredDifference(x0, x1, data_layout='model_parallel')\n z = lbann.L2Norm2(y)\n obj.append(z)\n metrics.append(lbann.Metric(z, name='model-parallel layout, unbiased'))\n\n # NumPy implementation\n vals = []\n for i in range(num_samples()):\n x = get_sample(i).astype(np.float64)\n x0 = x[:slice_size]\n x1 = x[slice_size:]\n y = (x1-x0)**2\n z = tools.numpy_l2norm2(y)\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metrics[-1].name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # ------------------------------------------\n # Gradient checking\n # ------------------------------------------\n\n callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))\n\n # ------------------------------------------\n # Construct model\n # ------------------------------------------\n\n mini_batch_size = num_samples() // 2\n num_epochs = 0\n return lbann.Model(mini_batch_size,\n num_epochs,\n layers=lbann.traverse_layer_graph(x0_lbann),\n objective_function=obj,\n metrics=metrics,\n callbacks=callbacks)\n\ndef construct_data_reader(lbann):\n \"\"\"Construct Protobuf message for Python data reader.\n\n The Python data reader will import the current Python file to\n access the sample access functions.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Note: The training data reader should be removed when\n # https://github.com/LLNL/lbann/issues/1098 is resolved.\n message = lbann.reader_pb2.DataReader()\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'train'\n )\n ])\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'test'\n )\n ])\n return message\n\n# ==============================================\n# Setup PyTest\n# ==============================================\n\n# Create test functions that can interact with PyTest\nfor test in tools.create_tests(setup_experiment, __file__):\n globals()[test.__name__] = test\n"
] | [
[
"numpy.random.normal",
"numpy.mean",
"numpy.random.seed",
"numpy.finfo"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hizb-resume/LTD-local-track-to-detect-for-VID | [
"7147ac7c6cd4b22a956aaaabaa151e5ed5410c68",
"7147ac7c6cd4b22a956aaaabaa151e5ed5410c68"
] | [
"projects/adnet/mains/ADNet2.py",
"projects/adnet/utils/overlap_ratio.py"
] | [
"import _init_paths\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nfrom trainers.adnet_train_sl import adnet_train_sl\nimport argparse\nfrom options.general2 import opts\nfrom models.ADNet import adnet\nfrom utils.get_train_videos import get_train_videos\nfrom trainers.adnet_train_rl import adnet_train_rl\nimport torch\ntorch.multiprocessing.set_start_method('spawn', force=True)\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport os\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\nparser = argparse.ArgumentParser(\n description='ADNet training')\n# parser.add_argument('--resume', default='weights/ADNet_SL_backup.pth', type=str, help='Resume from checkpoint')\n# parser.add_argument('--resume', default='weights/ADNet_RL_2epoch8_backup.pth', type=str, help='Resume from checkpoint')\n# parser.add_argument('--resume', default='weights/ADNet_SL_epoch27_final.pth', type=str, help='Resume from checkpoint')\nparser.add_argument('--resume', default='models/weights_mul_step3_new/ADNet_SL_.pth', type=str, help='Resume from checkpoint')\nparser.add_argument('--num_workers', default=6, type=int, help='Number of workers used in dataloading')\nparser.add_argument('--start_iter', default=2, type=int, help='Begin counting iterations starting from this value (should be used with resume)')\nparser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')\nparser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')\nparser.add_argument('--visualize', default=True, type=str2bool, help='Use tensorboardx to for loss visualization')\nparser.add_argument('--send_images_to_visualization', type=str2bool, default=False, help='Sample a random image from each 10th batch, send it to visdom after augmentations step')\nparser.add_argument('--save_folder', default='models/weights_del', help='Location to save checkpoint models')\nparser.add_argument('--tensorlogdir', default='logs/tensorboardx_log_del', help='Location to save tensorboardx_log')\nparser.add_argument('--train_consecutive', default=False, type=str2bool, help='Whether to train consecutive frames')\nparser.add_argument('--train_mul_step', default=False, type=str2bool, help='Whether to train multiple steps')\n\nparser.add_argument('--save_file', default='ADNet_SL_', type=str, help='save file part of file name for SL')\nparser.add_argument('--save_file_RL', default='ADNet_RL_', type=str, help='save file part of file name for RL')\nparser.add_argument('--start_epoch', default=0, type=int, help='Begin counting epochs starting from this value')\n\nparser.add_argument('--run_supervised', default=True, type=str2bool, help='Whether to run supervised learning or not')\n\nparser.add_argument('--multidomain', default=False, type=str2bool, help='Separating weight for each videos (default) or not')\n\nparser.add_argument('--save_result_images', default=False, type=str2bool, help='Whether to save the results or not. Save folder: images/')\nparser.add_argument('--display_images', default=False, type=str2bool, help='Whether to display images or not')\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n # Supervised Learning part\n if args.run_supervised:\n #opts['minibatch_size'] = 128\n opts['minibatch_size'] = 256\n # train with supervised learning\n _, _, train_videos = adnet_train_sl(args, opts)\n args.resume = os.path.join(args.save_folder, args.save_file) + '.pth'\n\n # reinitialize the network with network from SL\n net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=True,\n multidomain=args.multidomain)\n\n args.start_epoch = 0\n args.start_iter = 0\n\n else:\n assert args.resume is not None, \\\n \"Please put result of supervised learning or reinforcement learning with --resume (filename)\"\n train_videos = get_train_videos(opts)\n if train_videos == None:\n opts['num_videos'] = 1\n else:\n opts['num_videos'] = len(train_videos['video_names'])\n\n if args.start_iter == 0: # means the weight came from the SL\n # net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=True, multidomain=args.multidomain)\n net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=False,\n multidomain=args.multidomain)\n else: # resume the adnet\n net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=False, multidomain=args.multidomain)\n\n if args.cuda:\n net = nn.DataParallel(net)\n cudnn.benchmark = True\n\n net = net.cuda()\n\n # Reinforcement Learning part\n #opts['minibatch_size'] = 32\n opts['minibatch_size'] = 128\n\n net = adnet_train_rl(net, domain_specific_nets, train_videos, opts, args)\n\n\n\n",
"# matlab source:\n# https://github.com/hellbell/ADNet/blob/3a7955587b5d395401ebc94a5ab067759340680d/utils/overlap_ratio.m\n\nimport numpy as np\n\n\n# def rectint(a, b): # returns 0 if rectangles don't intersect\n# assert (isinstance(a, (list, np.ndarray)) and isinstance(b, (list, np.ndarray))) or \\\n# (not isinstance(a, (list, np.ndarray)) and not isinstance(b, (list, np.ndarray)))\n#\n# if isinstance(a, (list, np.ndarray)) and isinstance(b, (list, np.ndarray)):\n# results = []\n# for _a, _b in zip(a, b):\n# _a_xmin = _a[0]\n# _a_ymin = _a[1]\n# _a_xmax = _a[0] + _a[2]\n# _a_ymax = _a[1] + _a[3]\n#\n# _b_xmin = _b[0]\n# _b_ymin = _b[1]\n# _b_xmax = _b[0] + _b[2]\n# _b_ymax = _b[1] + _b[3]\n#\n# dx = min(_a_xmax, _b_xmax) - max(_a_xmin, _b_xmin)\n# dy = min(_a_ymax, _b_ymax) - max(_a_ymin, _b_ymin)\n#\n# if (dx >= 0) and (dy >= 0):\n# results.append(dx * dy)\n# else:\n# results.append(0)\n#\n# return results\n#\n# else:\n# a_xmin = a[0]\n# a_ymin = a[1]\n# a_xmax = a[0] + a[2]\n# a_ymax = a[1] + a[3]\n#\n# b_xmin = b[0]\n# b_ymin = b[1]\n# b_xmax = b[0] + b[2]\n# b_ymax = b[1] + b[3]\n#\n# dx = min(a_xmax, b_xmax) - max(a_xmin, b_xmin)\n# dy = min(a_ymax, b_ymax) - max(a_ymin, b_ymin)\n#\n# if (dx >= 0) and (dy >= 0):\n# return dx*dy\n# else:\n# return 0\n#\n# # each rectangle is [x,y,width,height]\n# # x and y specifies one corner of the rectangle\n# # width and height define the size in units along the x and y axes respectively.\n# def overlap_ratio(rect1, rect2):\n# inter_area = rectint(rect1, rect2)\n# union_area = np.multiply(rect1[:, 2], rect1[:, 3]) + np.multiply(rect1[:, 2], rect1[:, 3]) - inter_area\n#\n# r = np.divide(inter_area, union_area)\n# return r\n\n\n# https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/\ndef overlap_ratio(rect1, rect2):\n assert isinstance(rect1, (list, np.ndarray)) and isinstance(rect2, (list, np.ndarray))\n\n if len(np.array(rect1).shape) == 2 and len(np.array(rect2).shape) == 2:\n\n iou = []\n\n for _rect1, _rect2 in zip(rect1, rect2):\n\n boxA = [_rect1[0], _rect1[1], _rect1[0] + _rect1[2], _rect1[1] + _rect1[3]]\n boxB = [_rect2[0], _rect2[1], _rect2[0] + _rect2[2], _rect2[1] + _rect2[3]]\n\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n # compute the area of intersection rectangle\n interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\n # compute the area of both the prediction and ground-truth\n # rectangles\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n _iou = interArea / float(boxAArea + boxBArea - interArea)\n\n if _iou < 0:\n _iou = 0\n\n iou.append(_iou)\n else:\n assert len(np.array(rect1).shape) == len(np.array(rect2).shape)\n\n boxA = [rect1[0], rect1[1], rect1[0] + rect1[2], rect1[1] + rect1[3]]\n boxB = [rect2[0], rect2[1], rect2[0] + rect2[2], rect2[1] + rect2[3]]\n\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n # compute the area of intersection rectangle\n interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\n # compute the area of both the prediction and ground-truth\n # rectangles\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n if iou < 0:\n iou = 0\n\n # return the intersection over union value\n return iou\n"
] | [
[
"torch.multiprocessing.set_start_method",
"torch.nn.DataParallel",
"tensorflow.compat.v1.logging.set_verbosity"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ruclion/Fantasy_Mix-Lingual_Tacotron_Version_7_NOVAE-Phoneme-HCSI-NOLID_DBMIX | [
"baa4c8c3f3ba80acf68cec88aed53084a97c8aa1"
] | [
"tacotron/models/tacotron.py"
] | [
"import tensorflow as tf \nfrom tacotron.utils.symbols import symbols\nfrom tacotron.utils.symbols import tone_stress_symbols_max_no\nfrom tacotron.utils.symbols import symbols_tag\nfrom infolog import log\nfrom tacotron.models.helpers import TacoTrainingHelper, TacoTestHelper\nfrom tacotron.models.modules import *\nfrom tensorflow.contrib.seq2seq import dynamic_decode\nfrom tacotron.models.Architecture_wrappers import TacotronEncoderCell, VAECell, TacotronDecoderCell\nfrom tacotron.models.custom_decoder import CustomDecoder\nfrom tacotron.models.attention import LocationSensitiveAttention\n\nimport numpy as np\n\nassert symbols_tag == 'MIX_Phoneme_Version'\n\ndef split_func(x, split_pos):\n\trst = []\n\tstart = 0\n\t# x will be a numpy array with the contents of the placeholder below\n\tfor i in range(split_pos.shape[0]):\n\t\trst.append(x[:,start:start+split_pos[i]])\n\t\tstart += split_pos[i]\n\treturn rst\n\nclass Tacotron():\n\t\"\"\"Tacotron-2 Feature prediction Model.\n\t\"\"\"\n\tdef __init__(self, hparams):\n\t\tself._hparams = hparams\n\n\tdef initialize(self, inputs, inputs_tone_stress, speaker_labels, language_labels, input_lengths, mel_targets=None, stop_token_targets=None, linear_targets=None, targets_lengths=None, gta=False,\n\t\t\tglobal_step=None, is_training=False, is_evaluating=False, split_infos=None):\n\t\t\"\"\"\n\t\tInitializes the model for inference\n\t\tsets \"mel_outputs\" and \"alignments\" fields.\n\t\tArgs:\n\t\t\t- inputs: int32 Tensor with shape [N, T_in] where N is batch size, T_in is number of\n\t\t\t steps in the input time series, and values are character IDs\n\t\t\t speaker_labels: note the speaker id\n\t\t\t language_labels:note the language id\n\t\t\t- input_lengths: int32 Tensor with shape [N] where N is batch size and values are the lengths\n\t\t\tof each sequence in inputs.\n\t\t\t- mel_targets: float32 Tensor with shape [N, T_out, M] where N is batch size, T_out is number\n\t\t\tof steps in the output time series, M is num_mels, and values are entries in the mel\n\t\t\tspectrogram. Only needed for training.\n\t\t\"\"\"\n\t\tif mel_targets is None and stop_token_targets is not None:\n\t\t\traise ValueError('no multi targets were provided but token_targets were given')\n\t\tif mel_targets is not None and stop_token_targets is None and not gta:\n\t\t\traise ValueError('Mel targets are provided without corresponding token_targets')\n\t\tif not gta and self._hparams.predict_linear==True and linear_targets is None and is_training:\n\t\t\traise ValueError('Model is set to use post processing to predict linear spectrograms in training but no linear targets given!')\n\t\tif gta and linear_targets is not None:\n\t\t\traise ValueError('Linear spectrogram prediction is not supported in GTA mode!')\n\t\tif is_training and self._hparams.mask_decoder and targets_lengths is None:\n\t\t\traise RuntimeError('Model set to mask paddings but no targets lengths provided for the mask!')\n\t\tif is_training and is_evaluating:\n\t\t\traise RuntimeError('Model can not be in training and evaluation modes at the same time!')\n\n\t\t# self.inputs_printout = inputs\n\t\t# self.inputs_tone_stress_printout = inputs_tone_stress\n\n\t\tsplit_device = '/cpu:0' if self._hparams.tacotron_num_gpus > 1 or self._hparams.split_on_cpu else '/gpu:{}'.format(self._hparams.tacotron_gpu_start_idx)\n\t\twith tf.device(split_device):\n\t\t\thp = self._hparams\n\t\t\tlout_int = [tf.int32]*hp.tacotron_num_gpus\n\t\t\tlout_float = [tf.float32]*hp.tacotron_num_gpus\n\n\t\t\ttower_input_lengths = tf.split(input_lengths, num_or_size_splits=hp.tacotron_num_gpus, axis=0)\n\t\t\ttower_targets_lengths = tf.split(targets_lengths, num_or_size_splits=hp.tacotron_num_gpus, axis=0) if targets_lengths is not None else targets_lengths\n\t\t\ttower_speaker_labels = tf.split(speaker_labels, num_or_size_splits=hp.tacotron_num_gpus, axis=0)\n\t\t\ttower_language_labels = tf.split(language_labels, num_or_size_splits=hp.tacotron_num_gpus, axis=0)\n\n\t\t\tp_inputs = tf.py_func(split_func, [inputs, split_infos[:, 0]], lout_int)\n\t\t\tp_inputs_tone_stress = tf.py_func(split_func, [inputs_tone_stress, split_infos[:, 0]], lout_int)\n\t\t\tp_mel_targets = tf.py_func(split_func, [mel_targets, split_infos[:,1]], lout_float) if mel_targets is not None else mel_targets\n\t\t\tp_stop_token_targets = tf.py_func(split_func, [stop_token_targets, split_infos[:,2]], lout_float) if stop_token_targets is not None else stop_token_targets\n\t\t\tp_linear_targets = tf.py_func(split_func, [linear_targets, split_infos[:, 3]], lout_float) if linear_targets is not None else linear_targets\n\n\t\t\ttower_inputs = []\n\t\t\ttower_inputs_tone_stress = []\n\t\t\ttower_mel_targets = []\n\t\t\ttower_stop_token_targets = []\n\t\t\ttower_linear_targets = []\n\n\n\t\t\tbatch_size = tf.shape(inputs)[0]\n\t\t\tmel_channels = hp.num_mels\n\t\t\tlinear_channels = hp.num_freq\n\t\t\tfor i in range (hp.tacotron_num_gpus):\n\t\t\t\ttower_inputs.append(tf.reshape(p_inputs[i], [batch_size, -1]))\n\t\t\t\ttower_inputs_tone_stress.append(tf.reshape(p_inputs_tone_stress[i], [batch_size, -1]))\n\t\t\t\tif p_mel_targets is not None:\n\t\t\t\t\ttower_mel_targets.append(tf.reshape(p_mel_targets[i], [batch_size, -1, mel_channels]))\n\t\t\t\tif p_stop_token_targets is not None:\n\t\t\t\t\ttower_stop_token_targets.append(tf.reshape(p_stop_token_targets[i], [batch_size, -1]))\n\t\t\t\tif p_linear_targets is not None:\n\t\t\t\t\ttower_linear_targets.append(tf.reshape(p_linear_targets[i], [batch_size, -1, linear_channels]))\n\n\t\tself.tower_decoder_output = []\n\t\tself.tower_alignments = []\n\t\tself.tower_stop_token_prediction = []\n\t\tself.tower_mel_outputs = []\n\t\tself.tower_linear_outputs = []\n\t\tself.tower_predict_speaker_labels = []\n\n\t\t# 添加分别的phoneme embedding和 声调重读embedding 和 concat的inputs embedding\n\t\ttower_embedded_inputs_phoneme = []\n\t\ttower_embedded_inputs_tone_stress = []\n\t\ttower_embedded_inputs_concat = []\n\t\ttower_enc_conv_output_shape = []\n\t\ttower_encoder_outputs = []\n\t\ttower_residual = []\n\t\ttower_projected_residual = []\n\n\t\t# 1. Declare GPU Devices\n\t\tgpus = [\"/gpu:{}\".format(i) for i in range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx+hp.tacotron_num_gpus)]\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\twith tf.device(tf.train.replica_device_setter(ps_tasks=1,ps_device=\"/cpu:0\",worker_device=gpus[i])):\n\t\t\t\twith tf.variable_scope('inference') as scope:\n\t\t\t\t\tassert hp.tacotron_teacher_forcing_mode in ('constant', 'scheduled')\n\t\t\t\t\tif hp.tacotron_teacher_forcing_mode == 'scheduled' and is_training:\n\t\t\t\t\t\tassert global_step is not None\n\n\t\t\t\t\t#GTA is only used for predicting mels to train Wavenet vocoder, so we ommit post processing when doing GTA synthesis\n\t\t\t\t\tpost_condition = hp.predict_linear and not gta\n\n\t\t\t\t\t# tf.print(tower_inputs[i])\n\t\t\t\t\t# tf.print(tower_inputs[i])\n\n\t\t\t\t\t# phoneme Embeddings ==> [batch_size, sequence_length, embedding_dim], 512\n\t\t\t\t\tself.phoneme_embedding_table = tf.get_variable(\n\t\t\t\t\t\t'inputs_phoneme_embedding', [len(symbols), hp.phoneme_embedding_dim], dtype=tf.float32)\n\t\t\t\t\tembedded_inputs_phoneme = tf.nn.embedding_lookup(self.phoneme_embedding_table, tower_inputs[i])\n\n\t\t\t\t\t# tone and stress Embeddings ==> [batch_size, sequence_length, embedding_dim], 16\n\t\t\t\t\tself.tone_stress_embedding_table = tf.get_variable(\n\t\t\t\t\t\t'inputs_tone_stress_embedding', [tone_stress_symbols_max_no, hp.tone_stress_embedding_dim], dtype=tf.float32)\n\t\t\t\t\tembedded_inputs_tone_stress = tf.nn.embedding_lookup(self.tone_stress_embedding_table, tower_inputs_tone_stress[i])\n\n\t\t\t\t\t# 拼接, 512 + 16\n\t\t\t\t\tembedded_inputs_concat = tf.concat([embedded_inputs_phoneme, embedded_inputs_tone_stress], axis=-1)\n\n\n\n\t\t\t\t\tself.speaker_embedding_table = tf.get_variable(\n\t\t\t\t\t\t'speaker_embedding', [hp.speaker_num, hp.speaker_dim], dtype=tf.float32)\n\t\t\t\t\tembedded_speaker_label = tf.nn.embedding_lookup(self.speaker_embedding_table, tower_speaker_labels[i])\n\t\t\t\t\t\n\t\t\t\t\t# phoneme天然分开语言, 先不使用LID\n\t\t\t\t\t# self.language_embedding_table = tf.get_variable(\n\t\t\t\t\t# \t'language_embedding', [hp.language_num, hp.language_dim], dtype=tf.float32)\n\t\t\t\t\t# embedded_language_label = tf.nn.embedding_lookup(self.language_embedding_table, tower_language_labels[i])\n\n\t\t\t\t\t#Encoder Cell ==> [batch_size, encoder_steps, encoder_lstm_units]\n\t\t\t\t\tencoder_cell = TacotronEncoderCell(\n\t\t\t\t\t\tEncoderConvolutions(is_training, hparams=hp, scope='encoder_convolutions'),\n\t\t\t\t\t\tEncoderRNN(is_training, size=hp.encoder_lstm_units,\n\t\t\t\t\t\t\tzoneout=hp.tacotron_zoneout_rate, scope='encoder_LSTM'))\n\n\t\t\t\t\tencoder_outputs = encoder_cell(embedded_inputs_concat, tower_input_lengths[i])\n\n\t\t\t\t\t#For shape visualization purpose\n\t\t\t\t\tenc_conv_output_shape = encoder_cell.conv_output_shape\n\n\t\t\t\t\t# Adversarial Speaker-Classifiers,\tinput:encoder_output,output:predicted speaker_label\n\t\t\t\t\tspeaker_classify = Speaker_Classifier(is_training, layer_size=hp.softmax_hidden_layer,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t speaker_size=hp.speaker_num)\n\t\t\t\t\tpredict_speaker_labels = speaker_classify(encoder_outputs, hp.grad_rev_scale)\n\n\t\t\t\t\t# # Variational AutoEncoder\n\t\t\t\t\t# if is_training:\n\t\t\t\t\t# \tVAE_cell = VAECell(VAEConvolutions(is_training, hparams=hp, scope='VAE_convolutions'),\n\t\t\t\t\t# \t\t\t\t\t VAERNN(is_training, layers=hp.VAE_lstm_num_layers,\n\t\t\t\t\t# \t\t\t\t\t\t\t size=hp.VAE_lstm_layer_size,\n\t\t\t\t\t# \t\t\t\t\t\t\t zoneout=hp.tacotron_zoneout_rate, scope='VAE_LSTM'), hp.VAE_pool_size, hp.VAE_D_size)\n\t\t\t\t\t# \tresidual_encoding, self.kl_div, self.D_mean, self.D_var = VAE_cell(tower_mel_targets[i], hp.tacotron_batch_size)\n\n\t\t\t\t\t# elif is_evaluating:\n\t\t\t\t\t# \tresidual_encoding,self.kl_div = tf.zeros([hp.tacotron_batch_size, hp.VAE_D_size], dtype=tf.float32), 0\n\t\t\t\t\t# else:\n\t\t\t\t\t# \tresidual_encoding = tf.zeros([hp.tacotron_synthesis_batch_size, hp.VAE_D_size],\n\t\t\t\t\t# \t\t\t\t\t\t\t\t dtype=tf.float32)\n\t\t\t\t\t# self.residual_encoding=residual_encoding\n\n\t\t\t\t\t#Decoder Parts\n\t\t\t\t\t#Attention Decoder Prenet\n\t\t\t\t\tprenet = Prenet(is_training, layers_sizes=hp.prenet_layers, drop_rate=hp.tacotron_dropout_rate, scope='decoder_prenet')\n\t\t\t\t\t#Attention Mechanism\n\t\t\t\t\tattention_mechanism = LocationSensitiveAttention(hp.attention_dim, encoder_outputs, hparams=hp,\n\t\t\t\t\t\tmask_encoder=hp.mask_encoder, memory_sequence_length=tf.reshape(tower_input_lengths[i], [-1]), smoothing=hp.smoothing,\n\t\t\t\t\t\tcumulate_weights=hp.cumulative_weights)\n\t\t\t\t\t#Decoder LSTM Cells\n\t\t\t\t\tdecoder_lstm = DecoderRNN(is_training, layers=hp.decoder_layers,\n\t\t\t\t\t\tsize=hp.decoder_lstm_units, zoneout=hp.tacotron_zoneout_rate, scope='decoder_LSTM')\n\t\t\t\t\t#Frames Projection layer\n\t\t\t\t\tframe_projection = FrameProjection(hp.num_mels * hp.outputs_per_step, scope='linear_transform_projection')\n\t\t\t\t\t#<stop_token> projection layer\n\t\t\t\t\tstop_projection = StopProjection(is_training or is_evaluating, shape=hp.outputs_per_step, scope='stop_token_projection')\n\n\t\t\t\t\t#Decoder Cell ==> [batch_size, decoder_steps, num_mels * r] (after decoding)\n\t\t\t\t\tdecoder_cell = TacotronDecoderCell(\n\t\t\t\t\t\tprenet,\n\t\t\t\t\t\tattention_mechanism,\n\t\t\t\t\t\tdecoder_lstm,\n\t\t\t\t\t\tembedded_speaker_label,\n\t\t\t\t\t\t# embedded_language_label,\n\t\t\t\t\t\t# residual_encoding,\n\t\t\t\t\t\tframe_projection,\n\t\t\t\t\t\tstop_projection)\n\n\n\t\t\t\t\t#Define the helper for our decoder\n\t\t\t\t\tif is_training or is_evaluating or gta:\n\t\t\t\t\t\tself.helper = TacoTrainingHelper(batch_size, tower_mel_targets[i], hp, gta, is_evaluating, global_step)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.helper = TacoTestHelper(batch_size, hp)\n\n\n\t\t\t\t\t#initial decoder state\n\t\t\t\t\tdecoder_init_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32)\n\n\t\t\t\t\t#Only use max iterations at synthesis time\n\t\t\t\t\tmax_iters = hp.max_iters if not (is_training or is_evaluating) else None\n\n\t\t\t\t\t#Decode\n\t\t\t\t\t(frames_prediction, stop_token_prediction, _), final_decoder_state, _ = dynamic_decode(\n\t\t\t\t\t\tCustomDecoder(decoder_cell, self.helper, decoder_init_state),\n\t\t\t\t\t\timpute_finished=False,\n\t\t\t\t\t\tmaximum_iterations=max_iters,\n\t\t\t\t\t\tswap_memory=hp.tacotron_swap_with_cpu)\n\n\n\t\t\t\t\t# Reshape outputs to be one output per entry \n\t\t\t\t\t#==> [batch_size, non_reduced_decoder_steps (decoder_steps * r), num_mels]\n\t\t\t\t\tdecoder_output = tf.reshape(frames_prediction, [batch_size, -1, hp.num_mels])\n\t\t\t\t\tstop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])\n\n\t\t\t\t\t#Postnet\n\t\t\t\t\tpostnet = Postnet(is_training, hparams=hp, scope='postnet_convolutions')\n\n\t\t\t\t\t#Compute residual using post-net ==> [batch_size, decoder_steps * r, postnet_channels]\n\t\t\t\t\tresidual = postnet(decoder_output)\n\n\t\t\t\t\t#Project residual to same dimension as mel spectrogram \n\t\t\t\t\t#==> [batch_size, decoder_steps * r, num_mels]\n\t\t\t\t\tresidual_projection = FrameProjection(hp.num_mels, scope='postnet_projection')\n\t\t\t\t\tprojected_residual = residual_projection(residual)\n\n\n\t\t\t\t\t#Compute the mel spectrogram\n\t\t\t\t\tmel_outputs = decoder_output + projected_residual\n\n\n\t\t\t\t\tif post_condition:\n\t\t\t\t\t\t# Add post-processing CBHG. This does a great job at extracting features from mels before projection to Linear specs.\n\t\t\t\t\t\tpost_cbhg = CBHG(hp.cbhg_kernels, hp.cbhg_conv_channels, hp.cbhg_pool_size, [hp.cbhg_projection, hp.num_mels],\n\t\t\t\t\t\t\thp.cbhg_projection_kernel_size, hp.cbhg_highwaynet_layers, \n\t\t\t\t\t\t\thp.cbhg_highway_units, hp.cbhg_rnn_units, is_training, name='CBHG_postnet')\n\n\t\t\t\t\t\t#[batch_size, decoder_steps(mel_frames), cbhg_channels]\n\t\t\t\t\t\tpost_outputs = post_cbhg(mel_outputs, None)\n\n\t\t\t\t\t\t#Linear projection of extracted features to make linear spectrogram\n\t\t\t\t\t\tlinear_specs_projection = FrameProjection(hp.num_freq, scope='cbhg_linear_specs_projection')\n\n\t\t\t\t\t\t#[batch_size, decoder_steps(linear_frames), num_freq]\n\t\t\t\t\t\tlinear_outputs = linear_specs_projection(post_outputs)\n\n\t\t\t\t\t#Grab alignments from the final decoder state\n\t\t\t\t\talignments = tf.transpose(final_decoder_state.alignment_history.stack(), [1, 2, 0])\n\n\t\t\t\t\tself.tower_decoder_output.append(decoder_output)\n\t\t\t\t\tself.tower_alignments.append(alignments)\n\t\t\t\t\tself.tower_stop_token_prediction.append(stop_token_prediction)\n\t\t\t\t\tself.tower_mel_outputs.append(mel_outputs)\n\t\t\t\t\tself.tower_predict_speaker_labels.append(predict_speaker_labels)\n\t\t\t\t\ttower_embedded_inputs_phoneme.append(embedded_inputs_phoneme)\n\t\t\t\t\ttower_embedded_inputs_tone_stress.append(embedded_inputs_tone_stress)\n\t\t\t\t\ttower_embedded_inputs_concat.append(embedded_inputs_concat)\n\t\t\t\t\ttower_enc_conv_output_shape.append(enc_conv_output_shape)\n\t\t\t\t\ttower_encoder_outputs.append(encoder_outputs)\n\t\t\t\t\ttower_residual.append(residual)\n\t\t\t\t\ttower_projected_residual.append(projected_residual)\n\n\n\t\t\t\t\tif post_condition:\n\t\t\t\t\t\tself.tower_linear_outputs.append(linear_outputs)\n\t\t\tlog('initialisation done {}'.format(gpus[i]))\n\n\n\t\tif is_training:\n\t\t\tself.ratio = self.helper._ratio\n\t\tself.tower_inputs = tower_inputs\n\t\tself.tower_inputs_tone_stress = tower_inputs_tone_stress\n\t\tself.tower_input_lengths = tower_input_lengths\n\t\tself.tower_mel_targets = tower_mel_targets\n\t\tself.tower_linear_targets = tower_linear_targets\n\t\tself.tower_targets_lengths = tower_targets_lengths\n\t\tself.tower_stop_token_targets = tower_stop_token_targets\n\t\tself.tower_speaker_labels = tower_speaker_labels\n\t\tself.tower_language_labels = tower_language_labels\n\t\tself.all_vars = tf.trainable_variables()\n\n\t\tlog('Initialized Tacotron model. Dimensions (? = dynamic shape): ')\n\t\tlog(' Train mode: {}'.format(is_training))\n\t\tlog(' Eval mode: {}'.format(is_evaluating))\n\t\tlog(' GTA mode: {}'.format(gta))\n\t\tlog(' Synthesis mode: {}'.format(not (is_training or is_evaluating)))\n\t\tlog(' Input: {}'.format(inputs.shape))\n\t\tfor i in range(hp.tacotron_num_gpus+hp.tacotron_gpu_start_idx):\n\t\t\tlog(' device: {}'.format(i))\n\t\t\tlog(' phoneme embedding: {}'.format(tower_embedded_inputs_phoneme[i].shape))\n\t\t\tlog(' tone stress embedding: {}'.format(tower_embedded_inputs_tone_stress[i].shape))\n\t\t\tlog(' concat embedding: {}'.format(tower_embedded_inputs_concat[i].shape))\n\t\t\tlog(' enc conv out: {}'.format(tower_enc_conv_output_shape[i]))\n\t\t\tlog(' encoder out: {}'.format(tower_encoder_outputs[i].shape))\n\t\t\tlog(' decoder out: {}'.format(self.tower_decoder_output[i].shape))\n\t\t\tlog(' residual out: {}'.format(tower_residual[i].shape))\n\t\t\tlog(' projected residual out: {}'.format(tower_projected_residual[i].shape))\n\t\t\tlog(' mel out: {}'.format(self.tower_mel_outputs[i].shape))\n\t\t\tif post_condition:\n\t\t\t\tlog(' linear out: {}'.format(self.tower_linear_outputs[i].shape))\n\t\t\tlog(' <stop_token> out: {}'.format(self.tower_stop_token_prediction[i].shape))\n\n\t\t\t#1_000_000 is causing syntax problems for some people?! Python please :)\n\t\t\tlog(' Tacotron Parameters {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.all_vars]) / 1000000))\n\n\tdef add_loss(self):\n\t\t'''Adds loss to the model. Sets \"loss\" field. initialize must have been called.'''\n\t\thp = self._hparams\n\n\t\tself.tower_before_loss = []\n\t\tself.tower_after_loss= []\n\t\tself.tower_stop_token_loss = []\n\t\tself.tower_regularization_loss = []\n\t\tself.tower_linear_loss = []\n\t\tself.tower_adversarial_loss = []\n\t\tself.tower_loss = []\n\n\t\ttotal_before_loss = 0\n\t\ttotal_after_loss= 0\n\t\ttotal_stop_token_loss = 0\n\t\ttotal_regularization_loss = 0\n\t\ttotal_linear_loss = 0\n\t\ttotal_adversarial_loss = 0\n\t\ttotal_loss = 0\n\n\t\tgpus = [\"/gpu:{}\".format(i) for i in range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx+hp.tacotron_num_gpus)]\n\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\twith tf.device(tf.train.replica_device_setter(ps_tasks=1,ps_device=\"/cpu:0\",worker_device=gpus[i])):\n\t\t\t\twith tf.variable_scope('loss') as scope:\n\t\t\t\t\tif hp.mask_decoder:\n\t\t\t\t\t\t# Compute loss of predictions before postnet\n\t\t\t\t\t\tbefore = MaskedMSE(self.tower_mel_targets[i], self.tower_decoder_output[i], self.tower_targets_lengths[i],\n\t\t\t\t\t\t\thparams=self._hparams)\n\t\t\t\t\t\t# Compute loss after postnet\n\t\t\t\t\t\tafter = MaskedMSE(self.tower_mel_targets[i], self.tower_mel_outputs[i], self.tower_targets_lengths[i],\n\t\t\t\t\t\t\thparams=self._hparams)\n\t\t\t\t\t\t#Compute <stop_token> loss (for learning dynamic generation stop)\n\t\t\t\t\t\tstop_token_loss = MaskedSigmoidCrossEntropy(self.tower_stop_token_targets[i],\n\t\t\t\t\t\t\tself.tower_stop_token_prediction[i], self.tower_targets_lengths[i], hparams=self._hparams)\n\t\t\t\t\t\t#Compute masked linear loss\n\t\t\t\t\t\tif hp.predict_linear:\n\t\t\t\t\t\t\t#Compute Linear L1 mask loss (priority to low frequencies)\n\t\t\t\t\t\t\tlinear_loss = MaskedLinearLoss(self.tower_linear_targets[i], self.tower_linear_outputs[i],\n\t\t\t\t\t\t\t\tself.targets_lengths, hparams=self._hparams)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlinear_loss=0.\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Compute loss of predictions before postnet\n\t\t\t\t\t\tbefore = tf.losses.mean_squared_error(self.tower_mel_targets[i], self.tower_decoder_output[i])\n\t\t\t\t\t\t# Compute loss after postnet\n\t\t\t\t\t\tafter = tf.losses.mean_squared_error(self.tower_mel_targets[i], self.tower_mel_outputs[i])\n\t\t\t\t\t\t#Compute <stop_token> loss (for learning dynamic generation stop)\n\t\t\t\t\t\tstop_token_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n\t\t\t\t\t\t\tlabels=self.tower_stop_token_targets[i],\n\t\t\t\t\t\t\tlogits=self.tower_stop_token_prediction[i]))\n\t\t\t\t\t\tspeaker_loss=tf.losses\n\t\t\t\t\t\tif hp.predict_linear:\n\t\t\t\t\t\t\t#Compute linear loss\n\t\t\t\t\t\t\t#From https://github.com/keithito/tacotron/blob/tacotron2-work-in-progress/models/tacotron.py\n\t\t\t\t\t\t\t#Prioritize loss for frequencies under 2000 Hz.\n\t\t\t\t\t\t\tl1 = tf.abs(self.tower_linear_targets[i] - self.tower_linear_outputs[i])\n\t\t\t\t\t\t\tn_priority_freq = int(2000 / (hp.sample_rate * 0.5) * hp.num_freq)\n\t\t\t\t\t\t\tlinear_loss = 0.5 * tf.reduce_mean(l1) + 0.5 * tf.reduce_mean(l1[:,:,0:n_priority_freq])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlinear_loss = 0.\n\n\t\t\t\t\t# Compute the regularization weight\n\t\t\t\t\tif hp.tacotron_scale_regularization:\n\t\t\t\t\t\treg_weight_scaler = 1. / (2 * hp.max_abs_value) if hp.symmetric_mels else 1. / (hp.max_abs_value)\n\t\t\t\t\t\treg_weight = hp.tacotron_reg_weight * reg_weight_scaler\n\t\t\t\t\telse:\n\t\t\t\t\t\treg_weight = hp.tacotron_reg_weight\n\n\t\t\t\t\t# Regularize variables\n\t\t\t\t\t# Exclude all types of bias, RNN (Bengio et al. On the difficulty of training recurrent neural networks), embeddings and prediction projection layers.\n\t\t\t\t\t# Note that we consider attention mechanism v_a weights as a prediction projection layer and we don't regularize it. (This gave better stability)\n\t\t\t\t\tregularization = tf.add_n([tf.nn.l2_loss(v) for v in self.all_vars\n\t\t\t\t\t\tif not('bias' in v.name or 'Bias' in v.name or '_projection' in v.name or 'inputs_embedding' in v.name\n\t\t\t\t\t\t\tor 'RNN' in v.name or 'LSTM' in v.name)]) * reg_weight\n\t\t\t\t\t# Compute the speaker adversarial training loss\n\t\t\t\t\t# speaker_prediction: predicted speaker label for each time step of input, with shape [N, T_in, speaker_num]\n\t\t\t\t\t# speaker_targets: one-hot speaker label of current input, from shape [N, speaker_num] to [N, 1, speaker_num] to [N, T_in, speaker_num]\n\t\t\t\t\tseq_len = tf.shape(self.tower_predict_speaker_labels[i])[1]\n\t\t\t\t\tspeaker_targets = tf.one_hot(self.tower_speaker_labels[i], hp.speaker_num, dtype=tf.float32)\n\t\t\t\t\tspeaker_targets = tf.tile(tf.reshape(speaker_targets, shape=[-1, 1, hp.speaker_num]),\n\t\t\t\t\t\t\t\t\t\t\t multiples=[1, seq_len, 1])\n\t\t\t\t\tadversarial_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n\t\t\t\t\t\tlabels=speaker_targets,\n\t\t\t\t\t\tlogits=self.tower_predict_speaker_labels[i]))\n\n\t\t\t\t\t# Compute final loss term\n\t\t\t\t\tself.tower_before_loss.append(before)\n\t\t\t\t\tself.tower_after_loss.append(after)\n\t\t\t\t\tself.tower_stop_token_loss.append(stop_token_loss)\n\t\t\t\t\tself.tower_regularization_loss.append(regularization)\n\t\t\t\t\tself.tower_linear_loss.append(linear_loss)\n\t\t\t\t\tself.tower_adversarial_loss.append(adversarial_loss)\n\t\t\t\t\t# loss = before + after + stop_token_loss + regularization + linear_loss + hp.loss_weight * adversarial_loss + self.kl_div\n\t\t\t\t\tloss = before + after + stop_token_loss + regularization + linear_loss + hp.loss_weight * adversarial_loss\n\t\t\t\t\tself.tower_loss.append(loss)\n\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\ttotal_before_loss += self.tower_before_loss[i] \n\t\t\ttotal_after_loss += self.tower_after_loss[i]\n\t\t\ttotal_stop_token_loss += self.tower_stop_token_loss[i]\n\t\t\ttotal_regularization_loss += self.tower_regularization_loss[i]\n\t\t\ttotal_linear_loss += self.tower_linear_loss[i]\n\t\t\ttotal_adversarial_loss +=self.tower_adversarial_loss[i]\n\t\t\ttotal_loss += self.tower_loss[i]\n\n\t\tself.before_loss = total_before_loss / hp.tacotron_num_gpus\n\t\tself.after_loss = total_after_loss / hp.tacotron_num_gpus\n\t\tself.stop_token_loss = total_stop_token_loss / hp.tacotron_num_gpus\n\t\tself.regularization_loss = total_regularization_loss / hp.tacotron_num_gpus\n\t\tself.linear_loss = total_linear_loss / hp.tacotron_num_gpus\n\t\tself.adversarial_loss = total_adversarial_loss / hp.tacotron_num_gpus\n\t\tself.loss = total_loss / hp.tacotron_num_gpus\n\n\tdef add_optimizer(self, global_step):\n\t\t'''Adds optimizer. Sets \"gradients\" and \"optimize\" fields. add_loss must have been called.\n\t\tArgs:\n\t\t\tglobal_step: int32 scalar Tensor representing current global step in training\n\t\t'''\n\t\thp = self._hparams\n\t\ttower_gradients = []\n\n\t\t# 1. Declare GPU Devices\n\t\tgpus = [\"/gpu:{}\".format(i) for i in range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx + hp.tacotron_num_gpus)]\n\n\t\tgrad_device = '/cpu:0' if hp.tacotron_num_gpus > 1 else gpus[0]\n\n\t\twith tf.device(grad_device):\n\t\t\twith tf.variable_scope('optimizer') as scope:\n\t\t\t\tif hp.tacotron_decay_learning_rate:\n\t\t\t\t\tself.decay_steps = hp.tacotron_decay_steps\n\t\t\t\t\tself.decay_rate = hp.tacotron_decay_rate\n\t\t\t\t\tself.learning_rate = self._learning_rate_decay(hp.tacotron_initial_learning_rate, global_step)\n\t\t\t\telse:\n\t\t\t\t\tself.learning_rate = tf.convert_to_tensor(hp.tacotron_initial_learning_rate)\n\n\t\t\t\toptimizer = tf.train.AdamOptimizer(self.learning_rate, hp.tacotron_adam_beta1,\n\t\t\t\t\thp.tacotron_adam_beta2, hp.tacotron_adam_epsilon)\n\n\t\t# 2. Compute Gradient\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\t# Device placement\n\t\t\twith tf.device(tf.train.replica_device_setter(ps_tasks=1,ps_device=\"/cpu:0\",worker_device=gpus[i])) :\n\t\t\t\t#agg_loss += self.tower_loss[i]\n\t\t\t\twith tf.variable_scope('optimizer') as scope:\n\t\t\t\t\tgradients = optimizer.compute_gradients(self.tower_loss[i])\n\t\t\t\t\ttower_gradients.append(gradients)\n\n\t\t# 3. Average Gradient\n\t\twith tf.device(grad_device) :\n\t\t\tavg_grads = []\n\t\t\tvars = []\n\t\t\tfor grad_and_vars in zip(*tower_gradients):\n\t\t\t\t# grads_vars = [(grad1, var), (grad2, var), ...]\n\t\t\t\tgrads = []\n\t\t\t\tfor g,_ in grad_and_vars:\n\t\t\t\t\texpanded_g = tf.expand_dims(g, 0)\n\t\t\t\t\t# Append on a 'tower' dimension which we will average over below.\n\t\t\t\t\tgrads.append(expanded_g)\n\t\t\t\t\t# Average over the 'tower' dimension.\n\t\t\t\tgrad = tf.concat(axis=0, values=grads)\n\t\t\t\tgrad = tf.reduce_mean(grad, 0)\n\n\t\t\t\tv = grad_and_vars[0][1]\n\t\t\t\tavg_grads.append(grad)\n\t\t\t\tvars.append(v)\n\n\t\t\tself.gradients = avg_grads\n\t\t\t#Just for causion\n\t\t\t#https://github.com/Rayhane-mamah/Tacotron-2/issues/11\n\t\t\tif hp.tacotron_clip_gradients:\n\t\t\t\tclipped_gradients, _ = tf.clip_by_global_norm(avg_grads, 1.) # __mark 0.5 refer\n\t\t\telse:\n\t\t\t\tclipped_gradients = avg_grads\n\n\t\t\t# Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:\n\t\t\t# https://github.com/tensorflow/tensorflow/issues/1122\n\t\t\twith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n\t\t\t\tself.optimize = optimizer.apply_gradients(zip(clipped_gradients, vars),\n\t\t\t\t\tglobal_step=global_step)\n\n\tdef _learning_rate_decay(self, init_lr, global_step):\n\t\t#################################################################\n\t\t# Narrow Exponential Decay:\n\n\t\t# Phase 1: lr = 1e-3\n\t\t# We only start learning rate decay after 50k steps\n\n\t\t# Phase 2: lr in ]1e-5, 1e-3[\n\t\t# decay reach minimal value at step 310k\n\n\t\t# Phase 3: lr = 1e-5\n\t\t# clip by minimal learning rate value (step > 310k)\n\t\t#################################################################\n\t\thp = self._hparams\n\n\t\t#Compute natural exponential decay\n\t\tlr = tf.train.exponential_decay(init_lr, \n\t\t\tglobal_step - hp.tacotron_start_decay, #lr = 1e-3 at step 50k\n\t\t\tself.decay_steps, \n\t\t\tself.decay_rate, #lr = 1e-5 around step 310k\n\t\t\tname='lr_exponential_decay')\n\n\n\t\t#clip learning rate by max and min values (initial and final values)\n\t\treturn tf.minimum(tf.maximum(lr, hp.tacotron_final_learning_rate), init_lr)"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.nn.l2_loss",
"tensorflow.train.AdamOptimizer",
"tensorflow.py_func",
"tensorflow.get_collection",
"tensorflow.train.exponential_decay",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.one_hot",
"tensorflow.split",
"tensorflow.nn.embedding_lookup",
"tensorflow.losses.mean_squared_error",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.train.replica_device_setter",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
blthayer/ecen-667 | [
"cf609fa230b94e5b98af7afe554250a0824c2e11"
] | [
"hw6.py"
] | [
"import numpy as np\n\n\ndef main():\n p1()\n p2()\n p6()\n\n\ndef p1():\n # Do part 1.\n print('*' * 80)\n print('Problem 8.8, Part 1')\n a1 = np.array([\n [3, 8],\n [2, 3]\n ])\n\n _get_participation(a1)\n\n # Now part 2.\n print('*' * 80)\n print('Problem 8.8, Part 2')\n a2 = np.array([\n [1, 2, 1],\n [0, 3, 1],\n [0, 5, -1]\n ])\n\n _get_participation(a2)\n\n\ndef _get_participation(a1):\n # Get right eigenvectors.\n lambda1, v1 = np.linalg.eig(a1)\n\n # Get left eigenvectors.\n lambda_left, w1 = np.linalg.eig(a1.T)\n\n # Sort so that our eigenvectors line up.\n sort_1 = np.argsort(lambda1)\n sort_2 = np.argsort(lambda_left)\n\n # Check.\n np.testing.assert_allclose(lambda1[sort_1],\n lambda_left[sort_2])\n\n print(f'Eigenvalues: {lambda1[sort_1]}')\n\n v1 = v1[:, sort_1]\n w1 = w1[:, sort_2]\n\n # Scale left eigenvectors so that w_i^t * v_i = 1.\n for idx in range(w1.shape[0]):\n w_i = w1[:, idx]\n v_i = v1[:, idx]\n p = np.matmul(w_i, v_i)\n if p == 0:\n continue\n c = 1 / np.matmul(w_i, v_i)\n w1[:, idx] = w1[:, idx] * c\n\n # Check.\n # Commenting this out since it doesn't work well with values very\n # near zero (e.g. 1e-17).\n # np.testing.assert_allclose(np.matmul(w1.T, v1), np.identity(a1.shape[0]))\n\n # The participation factors are simple elementwise multiplication.\n p_1 = v1 * w1\n print(f'Participation Factors:\\n{p_1}')\n\n\ndef p2():\n print('*' * 80)\n print('Problem 2')\n # Given parameters\n m = 0.0133\n p_m = 0.91\n p_e = 3.24\n\n # Compute delta^s\n d_s = np.arcsin(p_m / p_e)\n\n # Compute V_cr\n v_cr = -p_m * (np.pi - 2 * d_s) + 2 * p_e * np.cos(d_s)\n\n # Initialize variables.\n t = 0\n dt = 0.005\n delta = d_s\n w = 0\n\n # Function for computing w(t)\n def w_t():\n # Consider w_0 to be 0, since we're in the \"delta w\" frame.\n return p_m * t / m\n\n # Function for computing delta(t)\n def d_t():\n # Again, consider w_0 to be 0.\n return 0.5 * p_m * t**2 / m + d_s\n\n # Energy function.\n def v():\n return 0.5 * m * w**2 - p_m * (delta - d_s) - \\\n p_e * (np.cos(delta) - np.cos(d_s))\n\n # Compute initial v\n v_t = v()\n v_list = [v_t]\n i = 0\n while v_t <= v_cr and i < 1000:\n t += dt\n # Compute delta and omega.\n delta = d_t()\n w = w_t()\n\n # Compute energy.\n v_t = v()\n v_list.append(v_t)\n\n i += 1\n\n if i >= 100:\n raise UserWarning('Maxed iterations.')\n\n print(f't_cr: {t:.3f}')\n\n\ndef p6():\n print('*' * 80)\n print('Problem 6')\n # Phase angles of vstab and speed\n vstab = -30.925\n speed = -45.306\n phi_deg = vstab + 360 - speed\n print(f'phi_deg: {phi_deg:.3f}')\n # Convert to radians, subtract 180 degrees, divide by 2.\n phi = (phi_deg - 180) / 2 * np.pi / 180\n\n # Frequency of our mode\n f = 1.67\n\n # Compute alpha\n alpha = (1 - np.sin(phi)) / (1 + np.sin(phi))\n print(f'alpha: {alpha:.3f}')\n\n # Now compute t1 and t2.\n t1 = 1 / (2 * np.pi * f * np.sqrt(alpha))\n t2 = alpha * t1\n print(f't1: {t1:.3f}')\n print(f't2: {t2:.3f}')\n pass\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.sqrt",
"numpy.arcsin",
"numpy.linalg.eig",
"numpy.matmul",
"numpy.cos",
"numpy.sin",
"numpy.testing.assert_allclose",
"numpy.argsort",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nhutnamhcmus/datacamp-playground | [
"25457e813b1145e1d335562286715eeddd1c1a7b",
"25457e813b1145e1d335562286715eeddd1c1a7b",
"25457e813b1145e1d335562286715eeddd1c1a7b",
"25457e813b1145e1d335562286715eeddd1c1a7b",
"25457e813b1145e1d335562286715eeddd1c1a7b"
] | [
"introduction-to-data-visualization-in-python/4. Analyzing time series and images/script_1.py",
"intermediates-python/4. Loops/script_13.py",
"intermediates-python/5. Case Study Hacker Statistics/script_1.py",
"introduction-to-data-visualization-in-python/3. Statistical plots with Seaborn/script_2.py",
"unit-testing-for-data-science-in-python/2. Intermediate unit testing/train.py"
] | [
"# Multiple time series on common axes\r\n\r\n# Import matplotlib.pyplot as plt\r\nimport matplotlib.pyplot as plt\r\n\r\n# Plot the aapl time series in blue\r\nplt.plot(aapl, color='blue', label='AAPL')\r\n\r\n# Plot the ibm time series in green\r\nplt.plot(ibm, color='green', label='IBM')\r\n\r\n# Plot the csco time series in red\r\nplt.plot(csco, color='red', label='CSCO')\r\n\r\n# Plot the msft time series in magenta\r\nplt.plot(msft, color='magenta', label='MSFT')\r\n\r\n# Add a legend in the top left corner of the plot\r\nplt.legend(loc='upper left')\r\n\r\n# Specify the orientation of the xticks\r\nplt.xticks(rotation=60)\r\n\r\n# Display the plot\r\nplt.show()\r\n",
"# Import cars data\r\nimport pandas as pd\r\ncars = pd.read_csv('cars.csv', index_col = 0)\r\n\r\n# Use .apply(str.upper)\r\ncars['COUNTRY'] = cars['country'].apply(str.upper)",
"# Import numpy as np\r\nimport numpy as np\r\n\r\n# Set the seed\r\nnp.random.seed(123)\r\n\r\n# Generate and print random float\r\nprint(np.random.rand())",
"# Plotting residuals of a regression\r\n\r\n# Import plotting modules\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n# Generate a green residual plot of the regression between 'hp' and 'mpg'\r\nsns.residplot(x='hp', y='mpg', data=auto, color='green')\r\n\r\n# Display the plot\r\nplt.show()\r\n",
"import numpy as np\r\n\r\ndef split_into_training_and_testing_sets(data_array):\r\n dim = data_array.ndim\r\n if dim != 2:\r\n raise ValueError(\"Argument data_array must be two dimensional. Got {0} dimensional array instead!\".format(dim))\r\n num_rows = data_array.shape[0]\r\n if num_rows < 2:\r\n raise ValueError(\"Argument data_array must have at least 2 rows, it actually has just {0}\".format(num_rows))\r\n num_training = int(0.75 * data_array.shape[0])\r\n permuted_indices = np.random.permutation(data_array.shape[0])\r\n return data_array[permuted_indices[:num_training], :], data_array[permuted_indices[num_training:], :]"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
],
[
"pandas.read_csv"
],
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"matplotlib.pyplot.show"
],
[
"numpy.random.permutation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dimon58/miptlabs | [
"538f6c410210a6e3405ca5b61dc7bc41d251cdf8"
] | [
"src/miptlabs/interpolators/interpolators.py"
] | [
"from numpy import linspace\nfrom scipy.interpolate import interp1d\n\n\nclass Interpolator:\n \"\"\"\n Базовый класс интерполятора\n \"\"\"\n\n def __init__(self, points=100):\n self.points = points\n\n def interpolate(self, x, y):\n pass\n\n\nclass Quadratic(Interpolator):\n \"\"\"\n Квдратичный интерполятор\n \"\"\"\n\n def gen_x_axis(self, start, end):\n \"\"\"\n Генерирует набор точек по оси абсцисс\n :param start:\n :param end:\n :return:\n \"\"\"\n return linspace(start, end, self.points)\n\n def interpolate(self, x, y):\n \"\"\"\n Производит квадратическую интерполяцию, подробнее в `документации numpy`_\n\n .. _`документации numpy`: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html\n\n :param x: координаты по оси x\n :param y: координаты по оси y\n\n :return: набор координат по оси x и набор координат по оси y после интерполяции\n \"\"\"\n points = interp1d(x, y, kind='quadratic')\n x = linspace(min(x), max(x), self.points)\n y = points(x)\n\n return x, y\n"
] | [
[
"scipy.interpolate.interp1d",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
NeuroML/Documentation | [
"06e355a8268c848b872b4e4c44d990b77b1fcb37"
] | [
"source/Userdocs/NML2_examples/izhikevich-single-neuron.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nSimulating a regular spiking Izhikevich neuron with NeuroML.\n\nFile: izhikevich-single-neuron.py\n\"\"\"\n\nfrom neuroml import NeuroMLDocument\nfrom neuroml import Izhikevich2007Cell\nfrom neuroml import Population\nfrom neuroml import Network\nfrom neuroml import PulseGenerator\nfrom neuroml import ExplicitInput\nimport neuroml.writers as writers\nfrom neuroml.utils import validate_neuroml2\nfrom pyneuroml import pynml\nfrom pyneuroml.lems import LEMSSimulation\nimport numpy as np\n\n\n# Create a new NeuroML model document\nnml_doc = NeuroMLDocument(id=\"IzhSingleNeuron\")\n\n# Define the Izhikevich cell and add it to the model in the document\nizh0 = Izhikevich2007Cell(\n id=\"izh2007RS0\", v0=\"-60mV\", C=\"100pF\", k=\"0.7nS_per_mV\", vr=\"-60mV\",\n vt=\"-40mV\", vpeak=\"35mV\", a=\"0.03per_ms\", b=\"-2nS\", c=\"-50.0mV\", d=\"100pA\")\nnml_doc.izhikevich2007_cells.append(izh0)\n\n# Create a network and add it to the model\nnet = Network(id=\"IzhNet\")\nnml_doc.networks.append(net)\n\n# Create a population of defined cells and add it to the model\nsize0 = 1\npop0 = Population(id=\"IzhPop0\", component=izh0.id, size=size0)\nnet.populations.append(pop0)\n\n# Define an external stimulus and add it to the model\npg = PulseGenerator(\n id=\"pulseGen_%i\" % 0, delay=\"0ms\", duration=\"1000ms\",\n amplitude=\"0.07 nA\"\n)\nnml_doc.pulse_generators.append(pg)\nexp_input = ExplicitInput(target=\"%s[%i]\" % (pop0.id, 0), input=pg.id)\nnet.explicit_inputs.append(exp_input)\n\n# Write the NeuroML model to a file\nnml_file = 'izhikevich2007_single_cell_network.nml'\nwriters.NeuroMLWriter.write(nml_doc, nml_file)\nprint(\"Written network file to: \" + nml_file)\n\n# Validate the NeuroML model against the NeuroML schema\nvalidate_neuroml2(nml_file)\n\n################################################################################\n## The NeuroML file has now been created and validated. The rest of the code\n## involves writing a LEMS simulation file to run the model\n\n# Create a simulation instance of the model\nsimulation_id = \"example-single-izhikevich2007cell-sim\"\nsimulation = LEMSSimulation(sim_id=simulation_id,\n duration=1000, dt=0.1, simulation_seed=123)\nsimulation.assign_simulation_target(net.id)\nsimulation.include_neuroml2_file(nml_file)\n\n# Define the output file to store simulation outputs\n# we record the neuron's membrane potential\nsimulation.create_output_file(\n \"output0\", \"%s.v.dat\" % simulation_id\n)\nsimulation.add_column_to_output_file(\"output0\", 'IzhPop0[0]', 'IzhPop0[0]/v')\n\n# Save the simulation to a file\nlems_simulation_file = simulation.save_to_file()\n\n# Run the simulation using the jNeuroML simulator\npynml.run_lems_with_jneuroml(\n lems_simulation_file, max_memory=\"2G\", nogui=True, plot=False\n)\n\n# Load the data from the file and plot the graph for the membrane potential\n# using the pynml generate_plot utility function.\ndata_array = np.loadtxt(\"%s.v.dat\" % simulation_id)\npynml.generate_plot(\n [data_array[:, 0]], [data_array[:, 1]],\n \"Membrane potential\", show_plot_already=False,\n save_figure_to=\"%s-v.png\" % simulation_id,\n xaxis=\"time (s)\", yaxis=\"membrane potential (V)\"\n)\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WesleyBatista/fklearn | [
"7a606d246545de5ab68b2d9f38d0fdbeec6ca630"
] | [
"src/fklearn/metrics/pd_extractors.py"
] | [
"import collections\nfrom datetime import datetime\nfrom itertools import chain, repeat\n\nimport pandas as pd\nfrom toolz import curry\nfrom numpy import nan\n\n\n@curry\ndef evaluator_extractor(result, evaluator_name):\n metric_value = result[evaluator_name] if result else nan\n return pd.DataFrame({evaluator_name: [metric_value]})\n\n\n@curry\ndef combined_evaluator_extractor(result, base_extractors):\n return pd.concat([x(result) for x in base_extractors], axis=1)\n\n\n@curry\ndef split_evaluator_extractor_iteration(split_value, result, split_col, base_extractor):\n key = 'split_evaluator__' + split_col + '_' + str(split_value)\n return (base_extractor(result.get(key, {}))\n .assign(**{'split_evaluator__' + split_col: split_value}))\n\n\n@curry\ndef split_evaluator_extractor(result, split_col, split_values, base_extractor):\n return pd.concat(\n list(map(split_evaluator_extractor_iteration(result=result, split_col=split_col, base_extractor=base_extractor),\n split_values)))\n\n\n@curry\ndef temporal_split_evaluator_extractor(result, time_col, base_extractor, time_format=\"%Y-%m\", eval_name=None):\n if eval_name is None:\n eval_name = 'split_evaluator__' + time_col\n\n split_keys = [key for key in result.keys() if eval_name in key]\n split_values = []\n for key in split_keys:\n date = key.split(eval_name)[1][1:]\n try:\n # just check time format\n datetime.strptime(date, time_format)\n split_values.append(date)\n except ValueError:\n # this might happen if result has temporal splitters using different data formats\n pass\n\n return split_evaluator_extractor(result, time_col, split_values, base_extractor)\n\n\n@curry\ndef learning_curve_evaluator_extractor(result, base_extractor):\n return base_extractor(result).assign(lc_period_end=result['lc_period_end'])\n\n\n@curry\ndef reverse_learning_curve_evaluator_extractor(result, base_extractor):\n return base_extractor(result).assign(reverse_lc_period_start=result['reverse_lc_period_start'])\n\n\n@curry\ndef stability_curve_evaluator_extractor(result, base_extractor):\n return base_extractor(result).assign(sc_period=result['sc_period'])\n\n\n@curry\ndef repeat_split_log(split_log, results_len):\n if isinstance(split_log, collections.Iterable):\n n_repeat = results_len // len(split_log)\n # The logic below makes [1, 2, 3] into [1, 1, 1, 2, 2, 2, 3, 3, 3] for n_repeat=3\n return list(chain.from_iterable(zip(*repeat(split_log, n_repeat))))\n else:\n return split_log\n\n\n@curry\ndef extract_base_iteration(result, extractor):\n extracted_results = pd.concat(list(map(extractor, result['eval_results'])))\n repeat_fn = repeat_split_log(results_len=len(extracted_results))\n\n keys = result['split_log'].keys()\n assignments = {k: repeat_fn(result['split_log'][k]) for k in keys}\n\n return (extracted_results\n .assign(fold_num=result['fold_num'])\n .assign(**assignments))\n\n\n@curry\ndef extract(validator_results, extractor):\n return pd.concat(list(map(extract_base_iteration(extractor=extractor), validator_results)))\n\n\n@curry\ndef extract_lc(validator_results, extractor):\n return extract(validator_results, learning_curve_evaluator_extractor(base_extractor=extractor))\n\n\n@curry\ndef extract_reverse_lc(validator_results, extractor):\n return extract(validator_results, reverse_learning_curve_evaluator_extractor(base_extractor=extractor))\n\n\n@curry\ndef extract_sc(validator_results, extractor):\n return extract(validator_results, stability_curve_evaluator_extractor(base_extractor=extractor))\n\n\n@curry\ndef extract_param_tuning_iteration(iteration, tuning_log, base_extractor, model_learner_name):\n iter_df = base_extractor(tuning_log[iteration][\"validator_log\"])\n return iter_df.assign(**tuning_log[iteration][\"train_log\"][model_learner_name][\"parameters\"])\n\n\n@curry\ndef extract_tuning(tuning_log, base_extractor, model_learner_name):\n iter_fn = extract_param_tuning_iteration(tuning_log=tuning_log, base_extractor=base_extractor,\n model_learner_name=model_learner_name)\n return pd.concat(list(map(iter_fn, range(len(tuning_log)))))\n\n\n@curry\ndef permutation_extractor(results, base_extractor):\n df = pd.concat(base_extractor(r) for r in results['permutation_importance'].values())\n df.index = results['permutation_importance'].keys()\n if 'permutation_importance_baseline' in results: # With baseline comparison\n baseline = base_extractor(results['permutation_importance_baseline'])\n baseline.index = [\"baseline\"]\n df = pd.concat((df, baseline))\n for c in baseline.columns:\n df[c + '_delta_from_baseline'] = baseline[c].iloc[0] - df[c]\n return df\n"
] | [
[
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JustinACoder/H22-GR3-UnrealAI | [
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"361eb9ef1147f8a2991e5f98c4118cd823184adf",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"3f1fdcb7693ff152f17623ce549526ec272698b1",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"3f1fdcb7693ff152f17623ce549526ec272698b1",
"1fa4cd6a566c8745f455fc3d2273208f21f88ced",
"3f1fdcb7693ff152f17623ce549526ec272698b1"
] | [
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/timeseries/python/timeseries/input_pipeline.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/ops/random_ops.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/training/checkpoint_management.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/keras/estimator/__init__.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Scripts/f2py.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/debug/lib/debug_graphs.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/distribute/python/step_fn.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/estimator/python/estimator/boosted_trees.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/autograph/converters/call_trees.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/util/nest.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/tensor_forest/hybrid/ops/gen_training_ops.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/estimator/python/estimator/logit_fns.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/ops/collective_ops.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/training/checkpointable/util.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/h5py/tests/old/test_h5t.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/ops/sparse_grad.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/h5py/tests/old/test_base.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/estimator/python/estimator/rnn.py",
"Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/h5py/tests/common.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Defines ways of splicing and re-arranging time series.\r\n\r\nThis file provides methods for reading, parsing, and re-arranging a time\r\nseries. The main departure from standard TensorFlow input pipelines is a focus\r\non \"chunking\" a time series, i.e. slicing it into small contiguous windows which\r\nare then batched together for training, a form of truncated\r\nbackpropagation. This typically provides a significant speedup compared to\r\nlooping over the whole series sequentially, by exploiting data parallelism and\r\nby reducing redundant contributions to gradients (due to redundant information\r\nin the series itself).\r\n\r\nA series, consisting of times (an increasing vector of integers) and values (one\r\nor more floating point values for each time) along with any exogenous features,\r\nis stored either in memory or on disk in various formats (e.g. \"one record per\r\ntimestep\" on disk, or as a dictionary of Numpy arrays in memory). The location\r\nand format is specified by configuring a `TimeSeriesReader` object\r\n(e.g. `NumpyReader`, `CSVReader`), which reads the data into the TensorFlow\r\ngraph. A `TimeSeriesInputFn` object (typically `RandomWindowInputFn`) then\r\nperforms windowing and batching.\r\n\r\nTime series are passed through this pipeline as dictionaries mapping feature\r\nnames to their values. For training and evaluation, these require at minimum\r\n`TrainEvalFeatures.TIMES` (scalar integers, one per timestep) and\r\n`TrainEvalFeatures.VALUES` (may be either univariate or multivariate). Exogenous\r\nfeatures may have any shape, but are likewise associated with a timestep. Times\r\nthemselves need not be contiguous or regular (although smaller/fewer gaps are\r\ngenerally better), but each timestep must have all `VALUES` and any exogenous\r\nfeatures (i.e. times may be missing, but given that a time is specified, every\r\nother feature must also be specified for that step; some models may support\r\nmaking exogenous updates conditional).\r\n\r\nThe expected use case of a `TimeSeriesInputFn` is that it is first configured\r\n(for example setting a batch or window size) and passed a reader (a\r\n`TimeSeriesReader` object). The `TimeSeriesInputFn` can then be passed as the\r\ninput_fn of an Estimator.\r\n\r\nFor example, `RandomWindowInputFn` is useful for creating batches of random\r\nchunks of a series for training:\r\n\r\n```\r\n # Read data in the default \"time,value\" CSV format with no header\r\n reader = input_pipeline.CSVReader(csv_file_name)\r\n # Set up windowing and batching for training\r\n train_input_fn = input_pipeline.RandomWindowInputFn(\r\n reader, batch_size=16, window_size=16)\r\n # Fit model parameters to data\r\n estimator.train(input_fn=train_input_fn, steps=150)\r\n```\r\n\r\n`RandomWindowInputFn` is the primary tool for training and quantitative\r\nevaluation of time series. `WholeDatasetInputFn`, which reads a whole series\r\ninto memory, is useful for qualitative evaluation and preparing to make\r\npredictions with `predict_continuation_input_fn`.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport abc\r\n\r\nimport numpy\r\n\r\nfrom tensorflow.contrib.timeseries.python.timeseries import feature_keys\r\nfrom tensorflow.contrib.timeseries.python.timeseries import model_utils\r\n\r\nfrom tensorflow.python.estimator import estimator_lib\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import io_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn\r\nfrom tensorflow.python.ops import parsing_ops\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.ops import state_ops\r\nfrom tensorflow.python.ops import tensor_array_ops\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.training import input as input_lib\r\nfrom tensorflow.python.training import training\r\nfrom tensorflow.python.util import nest\r\n\r\n\r\ndef predict_continuation_input_fn(\r\n evaluation, steps=None, times=None, exogenous_features=None):\r\n \"\"\"An Estimator input_fn for running predict() after evaluate().\r\n\r\n If the call to evaluate() we are making predictions based on had a batch_size\r\n greater than one, predictions will start after each of these windows\r\n (i.e. will have the same batch dimension).\r\n\r\n Args:\r\n evaluation: The dictionary returned by `Estimator.evaluate`, with keys\r\n FilteringResults.STATE_TUPLE and FilteringResults.TIMES.\r\n steps: The number of steps to predict (scalar), starting after the\r\n evaluation. If `times` is specified, `steps` must not be; one is required.\r\n times: A [batch_size x window_size] array of integers (not a Tensor)\r\n indicating times to make predictions for. These times must be after the\r\n corresponding evaluation. If `steps` is specified, `times` must not be;\r\n one is required. If the batch dimension is omitted, it is assumed to be 1.\r\n exogenous_features: Optional dictionary. If specified, indicates exogenous\r\n features for the model to use while making the predictions. Values must\r\n have shape [batch_size x window_size x ...], where `batch_size` matches\r\n the batch dimension used when creating `evaluation`, and `window_size` is\r\n either the `steps` argument or the `window_size` of the `times` argument\r\n (depending on which was specified).\r\n Returns:\r\n An `input_fn` suitable for passing to the `predict` function of a time\r\n series `Estimator`.\r\n Raises:\r\n ValueError: If `times` or `steps` are misspecified.\r\n \"\"\"\r\n if exogenous_features is None:\r\n exogenous_features = {}\r\n predict_times = model_utils.canonicalize_times_or_steps_from_output(\r\n times=times, steps=steps, previous_model_output=evaluation)\r\n features = {\r\n feature_keys.PredictionFeatures.STATE_TUPLE:\r\n evaluation[feature_keys.FilteringResults.STATE_TUPLE],\r\n feature_keys.PredictionFeatures.TIMES:\r\n predict_times\r\n }\r\n features.update(exogenous_features)\r\n def _predict_input_fn():\r\n \"\"\"An input_fn for predict().\"\"\"\r\n # Prevents infinite iteration with a constant output in an Estimator's\r\n # predict().\r\n limited_features = {}\r\n for key, values in features.items():\r\n limited_values = nest.map_structure(\r\n lambda value: training.limit_epochs(value, num_epochs=1), values)\r\n limited_features[key] = limited_values\r\n return (limited_features, None)\r\n return _predict_input_fn\r\n\r\n\r\nclass TimeSeriesReader(object):\r\n \"\"\"Reads from and parses a data source for a `TimeSeriesInputFn`.\r\n\r\n This class provides methods that read a few records (`read`) or the full data\r\n set at once (`read_full`), and returns them as dictionaries mapping feature\r\n names to feature Tensors. Please see note at the top of the file for the\r\n structure of these dictionaries. The output is generally chunked by a\r\n `TimeSeriesInputFn` before being passed to the model.\r\n \"\"\"\r\n\r\n def check_dataset_size(self, minimum_dataset_size):\r\n \"\"\"When possible, raises an error if the dataset is too small.\r\n\r\n This method allows TimeSeriesReaders to raise informative error messages if\r\n the user has selected a window size in their TimeSeriesInputFn which is\r\n larger than the dataset size. However, many TimeSeriesReaders will not have\r\n access to a dataset size, in which case they do not need to override this\r\n method.\r\n\r\n Args:\r\n minimum_dataset_size: The minimum number of records which should be\r\n contained in the dataset. Readers should attempt to raise an error when\r\n possible if an epoch of data contains fewer records.\r\n \"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def read(self):\r\n \"\"\"Parses one or more records into a feature dictionary.\r\n\r\n This method is expected to be called by a `TimeSeriesInputFn` object, and is\r\n not for use with models directly.\r\n\r\n A `TimeSeriesReader` object reads multiple records at a single time for\r\n efficiency; the size of these batches is an implementation detail internal\r\n to the input pipeline. These records should generally be sequential,\r\n although some out-of-order records due to file wraparounds are expected and\r\n must be handled by callers.\r\n\r\n Returns:\r\n A dictionary mapping feature names to `Tensor` values, each with an\r\n arbitrary batch dimension (for efficiency) as their first dimension.\r\n \"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def read_full(self):\r\n \"\"\"Return the full dataset.\r\n\r\n Largely for interactive use/plotting (or evaluation on small\r\n datasets). Generally not very efficient. Not recommended for training.\r\n\r\n Returns:\r\n Same return type as `read`, but with the full dataset rather than an\r\n arbitrary chunk of it. A dictionary mapping feature names to `Tensor`\r\n values, where the size of the first dimension of each `Tensor` is the\r\n number of samples in the entire dataset. These `Tensor`s should be\r\n constant across graph invocations, assuming that the underlying data\r\n remains constant. Current implementations re-read data on each graph\r\n invocation, although this may change in the future.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass NumpyReader(TimeSeriesReader):\r\n \"\"\"A time series parser for feeding Numpy arrays to a `TimeSeriesInputFn`.\r\n\r\n Avoids embedding data in the graph as constants.\r\n \"\"\"\r\n\r\n def __init__(self, data, read_num_records_hint=4096):\r\n \"\"\"Numpy array input for a `TimeSeriesInputFn`.\r\n\r\n Args:\r\n data: A dictionary mapping feature names to Numpy arrays, with two\r\n possible shapes (requires keys `TrainEvalFeatures.TIMES` and\r\n `TrainEvalFeatures.VALUES`):\r\n Univariate; `TIMES` and `VALUES` are both vectors of shape [series\r\n length]\r\n Multivariate; `TIMES` is a vector of shape [series length], `VALUES`\r\n has shape [series length x number of features].\r\n In any case, `VALUES` and any exogenous features must have their shapes\r\n prefixed by the shape of the value corresponding to the `TIMES` key.\r\n read_num_records_hint: The maximum number of samples to read at one time,\r\n for efficiency.\r\n \"\"\"\r\n self._features = _canonicalize_numpy_data(\r\n data, require_single_batch=True)\r\n self._read_num_records_hint = read_num_records_hint\r\n\r\n def check_dataset_size(self, minimum_dataset_size):\r\n \"\"\"Raise an error if the dataset is too small.\"\"\"\r\n dataset_size = self._features[feature_keys.TrainEvalFeatures.TIMES].shape[1]\r\n if dataset_size < minimum_dataset_size:\r\n raise ValueError(\r\n (\"A TimeSeriesInputFn is configured to create windows of size {}, \"\r\n \"but only {} records were available in the dataset. Either decrease \"\r\n \"the window size or provide more records.\").format(\r\n minimum_dataset_size, dataset_size))\r\n\r\n def read(self):\r\n \"\"\"Returns a large chunk of the Numpy arrays for later re-chunking.\"\"\"\r\n # Remove the batch dimension from all features\r\n features = {key: numpy.squeeze(value, axis=0)\r\n for key, value in self._features.items()}\r\n return estimator_lib.inputs.numpy_input_fn(\r\n x=features,\r\n # The first dimensions of features are the series length, since we have\r\n # removed the batch dimension above. We now pull out\r\n # self._read_num_records_hint steps of this single time series to pass\r\n # to the TimeSeriesInputFn.\r\n batch_size=self._read_num_records_hint,\r\n num_epochs=None,\r\n shuffle=False)()\r\n\r\n def read_full(self):\r\n \"\"\"Returns `Tensor` versions of the full Numpy arrays.\"\"\"\r\n features = estimator_lib.inputs.numpy_input_fn(\r\n x=self._features,\r\n batch_size=1,\r\n num_epochs=None,\r\n queue_capacity=2, # Each queue element is a full copy of the dataset\r\n shuffle=False)()\r\n # TimeSeriesInputFn expect just a batch dimension\r\n return {feature_name: array_ops.squeeze(feature_value, axis=0)\r\n for feature_name, feature_value in features.items()}\r\n\r\n\r\nclass ReaderBaseTimeSeriesParser(TimeSeriesReader):\r\n \"\"\"Base for time series readers which wrap a `tf.ReaderBase`.\"\"\"\r\n\r\n def __init__(self, filenames, read_num_records_hint=4096):\r\n \"\"\"Configure the time series reader.\r\n\r\n Args:\r\n filenames: A string or list of strings indicating files to read records\r\n from.\r\n read_num_records_hint: When not reading a full dataset, indicates the\r\n number of records to transfer in a single chunk (for efficiency). The\r\n actual number transferred at one time may vary.\r\n \"\"\"\r\n self._filenames = filenames\r\n self._read_num_records_hint = read_num_records_hint\r\n\r\n @abc.abstractmethod\r\n def _get_reader(self):\r\n \"\"\"Get an instance of the tf.ReaderBase associated with this class.\"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def _process_records(self, lines):\r\n \"\"\"Given string items, return a processed dictionary of Tensors.\r\n\r\n Args:\r\n lines: A 1-dimensional string Tensor, each representing a record to parse\r\n (source dependent, e.g. a line of a file, or a serialized protocol\r\n buffer).\r\n\r\n Returns:\r\n A dictionary mapping feature names to their values. The batch dimensions\r\n should match the length of `lines`.\r\n \"\"\"\r\n pass\r\n\r\n def _get_filename_queue(self, epoch_limit):\r\n \"\"\"Constructs a filename queue with an epoch limit.\r\n\r\n `epoch_limit` is intended as an error checking fallback to prevent a reader\r\n from infinitely looping in its requests for more work items if none are\r\n available in any file. It should be set high enough that it is never reached\r\n assuming at least one record exists in some file.\r\n\r\n Args:\r\n epoch_limit: The maximum number of times to read through the complete list\r\n of files before throwing an OutOfRangeError.\r\n Returns:\r\n A tuple of (filename_queue, epoch_limiter):\r\n filename_queue: A FIFOQueue with filename work items.\r\n epoch_limiter: The local variable used for epoch limitation. This should\r\n be set to zero before a reader is passed `filename_queue` in order to\r\n reset the epoch limiter's state.\r\n \"\"\"\r\n epoch_limiter = variable_scope.variable(\r\n initial_value=constant_op.constant(0, dtype=dtypes.int64),\r\n name=\"epoch_limiter\",\r\n trainable=False,\r\n collections=[ops.GraphKeys.LOCAL_VARIABLES])\r\n filenames_tensor = array_ops.reshape(\r\n ops.convert_to_tensor(self._filenames), [-1])\r\n # We can't rely on epoch_limiter being initialized, since queue runners are\r\n # started before local variables are initialized. Instead, we ignore epoch\r\n # limits before variable initialization. This means that prior to variable\r\n # initialization, a QueueRunner may cause a reader to enter an un-checked\r\n # infinite loop. However, as soon as local variables are initialized, we\r\n # will start incrementing and checking epoch_limiter, which will interrupt\r\n # any in-progress loops.\r\n conditional_count_up_to = control_flow_ops.cond(\r\n state_ops.is_variable_initialized(epoch_limiter),\r\n lambda: epoch_limiter.count_up_to(epoch_limit),\r\n lambda: constant_op.constant(0, dtype=dtypes.int64))\r\n with ops.control_dependencies([conditional_count_up_to]):\r\n filenames_tensor = array_ops.identity(filenames_tensor)\r\n filename_queue = input_lib.string_input_producer(\r\n filenames_tensor, shuffle=False, capacity=1)\r\n return filename_queue, epoch_limiter\r\n\r\n def read(self):\r\n \"\"\"Reads a chunk of data from the `tf.ReaderBase` for later re-chunking.\"\"\"\r\n # Assuming there is at least one item to be read among all of the files in\r\n # self._filenames, we will not need to go through more than\r\n # self._read_num_records_hint epochs to get a batch of\r\n # self._read_num_records_hint records. Setting this limit and resetting it\r\n # before each reader.read_up_to call prevents infinite looping when there\r\n # are no records available in any of the files.\r\n filename_queue, epoch_limiter = self._get_filename_queue(\r\n epoch_limit=self._read_num_records_hint)\r\n reader = self._get_reader()\r\n epoch_reset_op = state_ops.assign(epoch_limiter, 0)\r\n with ops.control_dependencies([epoch_reset_op]):\r\n _, records = reader.read_up_to(\r\n filename_queue, self._read_num_records_hint)\r\n return self._process_records(records)\r\n\r\n def read_full(self):\r\n \"\"\"Reads a full epoch of data into memory.\"\"\"\r\n reader = self._get_reader()\r\n # Set a hard limit of 2 epochs through self._filenames. If there are any\r\n # records available, we should only end up reading the first record in the\r\n # second epoch before exiting the while loop and subsequently resetting the\r\n # epoch limit. If there are no records available in any of the files, this\r\n # hard limit prevents the reader.read_up_to call from looping infinitely.\r\n filename_queue, epoch_limiter = self._get_filename_queue(epoch_limit=2)\r\n epoch_reset_op = state_ops.assign(epoch_limiter, 0)\r\n with ops.control_dependencies([epoch_reset_op]):\r\n first_key, first_value = reader.read_up_to(filename_queue, 1)\r\n # Read until we get a duplicate key (one epoch)\r\n def _while_condition(\r\n current_key, current_value, current_index, collected_records):\r\n del current_value, current_index, collected_records # unused\r\n return math_ops.not_equal(array_ops.squeeze(current_key, axis=0),\r\n array_ops.squeeze(first_key, axis=0))\r\n\r\n def _while_body(\r\n current_key, current_value, current_index, collected_records):\r\n del current_key # unused\r\n new_key, new_value = reader.read_up_to(filename_queue, 1)\r\n new_key.set_shape([1])\r\n new_value.set_shape([1])\r\n return (new_key,\r\n new_value,\r\n current_index + 1,\r\n collected_records.write(current_index, current_value))\r\n _, _, _, records_ta = control_flow_ops.while_loop(\r\n _while_condition,\r\n _while_body,\r\n [constant_op.constant([\"\"]), first_value,\r\n 0, # current_index starting value\r\n tensor_array_ops.TensorArray( # collected_records\r\n dtype=dtypes.string, size=0, dynamic_size=True)])\r\n records = records_ta.concat()\r\n # Reset the reader when we're done so that subsequent requests for data get\r\n # the dataset in the proper order.\r\n with ops.control_dependencies([records]):\r\n reader_reset_op = reader.reset()\r\n with ops.control_dependencies([reader_reset_op]):\r\n records = array_ops.identity(records)\r\n return self._process_records(records)\r\n\r\n\r\nclass CSVReader(ReaderBaseTimeSeriesParser):\r\n \"\"\"Reads from a collection of CSV-formatted files.\"\"\"\r\n\r\n def __init__(self,\r\n filenames,\r\n column_names=(feature_keys.TrainEvalFeatures.TIMES,\r\n feature_keys.TrainEvalFeatures.VALUES),\r\n column_dtypes=None,\r\n skip_header_lines=None,\r\n read_num_records_hint=4096):\r\n \"\"\"CSV-parsing reader for a `TimeSeriesInputFn`.\r\n\r\n Args:\r\n filenames: A filename or list of filenames to read the time series\r\n from. Each line must have columns corresponding to `column_names`.\r\n column_names: A list indicating names for each\r\n feature. `TrainEvalFeatures.TIMES` and `TrainEvalFeatures.VALUES` are\r\n required; `VALUES` may be repeated to indicate a multivariate series.\r\n column_dtypes: If provided, must be a list with the same length as\r\n `column_names`, indicating dtypes for each column. Defaults to\r\n `tf.int64` for `TrainEvalFeatures.TIMES` and `tf.float32` for\r\n everything else.\r\n skip_header_lines: Passed on to `tf.TextLineReader`; skips this number of\r\n lines at the beginning of each file.\r\n read_num_records_hint: When not reading a full dataset, indicates the\r\n number of records to parse/transfer in a single chunk (for\r\n efficiency). The actual number transferred at one time may be more or\r\n less.\r\n Raises:\r\n ValueError: If required column names are not specified, or if lengths do\r\n not match.\r\n \"\"\"\r\n if feature_keys.TrainEvalFeatures.TIMES not in column_names:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.TIMES))\r\n if feature_keys.TrainEvalFeatures.VALUES not in column_names:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.VALUES))\r\n if column_dtypes is not None and len(column_dtypes) != len(column_names):\r\n raise ValueError(\r\n (\"If specified, the length of column_dtypes must match the length of \"\r\n \"column_names (got column_dtypes={} and column_names={}).\").format(\r\n column_dtypes, column_names))\r\n if sum(1 for column_name in column_names\r\n if column_name == feature_keys.TrainEvalFeatures.TIMES) != 1:\r\n raise ValueError(\r\n \"Got more than one times column ('{}'), but exactly \"\r\n \"one is required.\".format(feature_keys.TrainEvalFeatures.TIMES))\r\n self._column_names = column_names\r\n self._column_dtypes = column_dtypes\r\n self._skip_header_lines = skip_header_lines\r\n super(CSVReader, self).__init__(\r\n filenames=filenames, read_num_records_hint=read_num_records_hint)\r\n\r\n def _get_reader(self):\r\n return io_ops.TextLineReader(skip_header_lines=self._skip_header_lines)\r\n\r\n def _process_records(self, lines):\r\n \"\"\"Parse `lines` as CSV records.\"\"\"\r\n if self._column_dtypes is None:\r\n default_values = [(array_ops.zeros([], dtypes.int64),)\r\n if column_name == feature_keys.TrainEvalFeatures.TIMES\r\n else () for column_name in self._column_names]\r\n else:\r\n default_values = [(array_ops.zeros([], dtype),)\r\n for dtype in self._column_dtypes]\r\n columns = parsing_ops.decode_csv(lines, default_values)\r\n features_lists = {}\r\n for column_name, value in zip(self._column_names, columns):\r\n features_lists.setdefault(column_name, []).append(value)\r\n features = {}\r\n for column_name, values in features_lists.items():\r\n if column_name == feature_keys.TrainEvalFeatures.TIMES:\r\n features[column_name] = values[0]\r\n else:\r\n features[column_name] = array_ops.stack(values, axis=1)\r\n return features\r\n\r\n\r\nclass TFExampleReader(ReaderBaseTimeSeriesParser):\r\n \"\"\"Reads and parses `tf.Example`s from a TFRecords file.\"\"\"\r\n\r\n def __init__(self,\r\n filenames,\r\n features):\r\n \"\"\"Configure `tf.Example` parsing.\r\n\r\n Args:\r\n filenames: A filename or list of filenames to read the time series\r\n from. Each line must have columns corresponding to `column_names`.\r\n features: A dictionary mapping from feature keys to `tf.FixedLenFeature`\r\n objects. Must include `TrainEvalFeatures.TIMES` (scalar integer) and\r\n `TrainEvalFeatures.VALUES` (floating point vector) features.\r\n Raises:\r\n ValueError: If required times/values features are not present.\r\n \"\"\"\r\n if feature_keys.TrainEvalFeatures.TIMES not in features:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.TIMES))\r\n if feature_keys.TrainEvalFeatures.VALUES not in features:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.VALUES))\r\n self._features = features\r\n super(TFExampleReader, self).__init__(filenames=filenames)\r\n\r\n def _get_reader(self):\r\n return io_ops.TFRecordReader()\r\n\r\n def _process_records(self, examples):\r\n \"\"\"Parse `tf.Example`s into `Tensors`.\"\"\"\r\n return parsing_ops.parse_example(\r\n serialized=examples, features=self._features)\r\n\r\n\r\nclass TimeSeriesInputFn(object):\r\n \"\"\"Base for classes which create batches of windows from a time series.\"\"\"\r\n\r\n @abc.abstractmethod\r\n def create_batch(self):\r\n \"\"\"Creates chunked Tensors from times, values, and other features.\r\n\r\n Suitable for use as the input_fn argument of a tf.estimator.Estimator's\r\n fit() or evaluate() method.\r\n\r\n Returns:\r\n A tuple of (features, targets):\r\n features: A dictionary with `TrainEvalFeatures.TIMES` and\r\n `TrainEvalFeatures.VALUES` as keys, `TIMES` having an associated value\r\n with shape [batch size x window length], `VALUES` with shape [batch\r\n size x window length x number of features]. Any other features will\r\n also have shapes prefixed with [batch size x window length].\r\n targets: Not used, but must have a value for compatibility with the\r\n Estimator API. That value should be None.\r\n \"\"\"\r\n pass\r\n\r\n def __call__(self):\r\n # Allow a TimeSeriesInputFn to be used as an input function directly\r\n return self.create_batch()\r\n\r\n\r\nclass WholeDatasetInputFn(TimeSeriesInputFn):\r\n \"\"\"Supports passing a full time series to a model for evaluation/inference.\r\n\r\n Note that this `TimeSeriesInputFn` is not designed for high throughput, and\r\n should not be used for training. It allows for sequential evaluation on a full\r\n dataset (with sequential in-sample predictions), which then feeds naturally\r\n into `predict_continuation_input_fn` for making out-of-sample\r\n predictions. While this is useful for plotting and interactive use,\r\n `RandomWindowInputFn` is better suited to training and quantitative\r\n evaluation.\r\n \"\"\"\r\n # TODO(allenl): A SequentialWindowInputFn for getting model end state without\r\n # loading the whole dataset into memory (or for quantitative evaluation of\r\n # sequential models). Note that an Estimator using such a TimeSeriesInputFn\r\n # won't return in-sample predictions for the whole dataset, which means it\r\n # won't be terribly useful for interactive use/plotting (unless the user\r\n # passes in concat metrics). Also need to be careful about state saving for\r\n # sequential models, particularly the gaps between chunks.\r\n\r\n def __init__(self, time_series_reader):\r\n \"\"\"Initialize the `TimeSeriesInputFn`.\r\n\r\n Args:\r\n time_series_reader: A TimeSeriesReader object.\r\n \"\"\"\r\n self._reader = time_series_reader\r\n super(WholeDatasetInputFn, self).__init__()\r\n\r\n def create_batch(self):\r\n \"\"\"A suitable `input_fn` for an `Estimator`'s `evaluate()`.\r\n\r\n Returns:\r\n A dictionary mapping feature names to `Tensors`, each shape\r\n prefixed by [1, data set size] (i.e. a batch size of 1).\r\n \"\"\"\r\n features = self._reader.read_full()\r\n # Add a batch dimension of one to each feature.\r\n return ({feature_name: feature_value[None, ...]\r\n for feature_name, feature_value in features.items()},\r\n None)\r\n\r\n\r\nclass RandomWindowInputFn(TimeSeriesInputFn):\r\n \"\"\"Wraps a `TimeSeriesReader` to create random batches of windows.\r\n\r\n Tensors are first collected into sequential windows (in a windowing queue\r\n created by `tf.train.batch`, based on the order returned from\r\n `time_series_reader`), then these windows are randomly batched (in a\r\n `RandomShuffleQueue`), the Tensors returned by `create_batch` having shapes\r\n prefixed by [`batch_size`, `window_size`].\r\n\r\n This `TimeSeriesInputFn` is useful for both training and quantitative\r\n evaluation (but be sure to run several epochs for sequential models such as\r\n `StructuralEnsembleRegressor` to completely flush stale state left over from\r\n training). For qualitative evaluation or when preparing for predictions, use\r\n `WholeDatasetInputFn`.\r\n \"\"\"\r\n\r\n def __init__(\r\n self, time_series_reader, window_size, batch_size,\r\n queue_capacity_multiplier=1000, shuffle_min_after_dequeue_multiplier=2,\r\n discard_out_of_order=True, discard_consecutive_batches_limit=1000,\r\n jitter=True, num_threads=2, shuffle_seed=None):\r\n \"\"\"Configure the RandomWindowInputFn.\r\n\r\n Args:\r\n time_series_reader: A TimeSeriesReader object.\r\n window_size: The number of examples to keep together sequentially. This\r\n controls the length of truncated backpropagation: smaller values mean\r\n less sequential computation, which can lead to faster training, but\r\n create a coarser approximation to the gradient (which would ideally be\r\n computed by a forward pass over the entire sequence in order).\r\n batch_size: The number of windows to place together in a batch. Larger\r\n values will lead to more stable gradients during training.\r\n queue_capacity_multiplier: The capacity for the queues used to create\r\n batches, specified as a multiple of `batch_size` (for\r\n RandomShuffleQueue) and `batch_size * window_size` (for the\r\n FIFOQueue). Controls the maximum number of windows stored. Should be\r\n greater than `shuffle_min_after_dequeue_multiplier`.\r\n shuffle_min_after_dequeue_multiplier: The minimum number of windows in the\r\n RandomShuffleQueue after a dequeue, which controls the amount of entropy\r\n introduced during batching. Specified as a multiple of `batch_size`.\r\n discard_out_of_order: If True, windows of data which have times which\r\n decrease (a higher time followed by a lower time) are discarded. If\r\n False, the window and associated features are instead sorted so that\r\n times are non-decreasing. Discarding is typically faster, as models do\r\n not have to deal with artificial gaps in the data. However, discarding\r\n does create a bias where the beginnings and endings of files are\r\n under-sampled.\r\n discard_consecutive_batches_limit: Raise an OutOfRangeError if more than\r\n this number of batches are discarded without a single non-discarded\r\n window (prevents infinite looping when the dataset is too small).\r\n jitter: If True, randomly discards examples between some windows in order\r\n to avoid deterministic chunking patterns. This is important for models\r\n like AR which may otherwise overfit a fixed chunking.\r\n num_threads: Use this number of threads for queues. Setting a value of 1\r\n removes one source of non-determinism (and in combination with\r\n shuffle_seed should provide deterministic windowing).\r\n shuffle_seed: A seed for window shuffling. The default value of None\r\n provides random behavior. With `shuffle_seed` set and\r\n `num_threads=1`, provides deterministic behavior.\r\n \"\"\"\r\n self._reader = time_series_reader\r\n self._window_size = window_size\r\n self._reader.check_dataset_size(minimum_dataset_size=self._window_size)\r\n self._batch_size = batch_size\r\n self._queue_capacity_multiplier = queue_capacity_multiplier\r\n self._shuffle_min_after_dequeue_multiplier = (\r\n shuffle_min_after_dequeue_multiplier)\r\n self._discard_out_of_order = discard_out_of_order\r\n self._discard_limit = discard_consecutive_batches_limit\r\n self._jitter = jitter\r\n if num_threads is None:\r\n self._num_threads = self._batch_size\r\n else:\r\n self._num_threads = num_threads\r\n self._shuffle_seed = shuffle_seed\r\n super(RandomWindowInputFn, self).__init__()\r\n\r\n def create_batch(self):\r\n \"\"\"Create queues to window and batch time series data.\r\n\r\n Returns:\r\n A dictionary of Tensors corresponding to the output of `self._reader`\r\n (from the `time_series_reader` constructor argument), each with shapes\r\n prefixed by [`batch_size`, `window_size`].\r\n \"\"\"\r\n features = self._reader.read()\r\n if self._jitter:\r\n # TODO(agarwal, allenl): Figure out if more jitter is needed here.\r\n jitter = random_ops.random_uniform(shape=[], maxval=2, dtype=dtypes.int32)\r\n else:\r\n jitter = 0\r\n # To keep things efficient, we pass from the windowing batcher to the\r\n # batch-of-windows batcher in batches. This avoids the need for huge numbers\r\n # of threads, but does mean that jitter is only applied occasionally.\r\n # TODO(allenl): Experiment with different internal passing sizes.\r\n internal_passing_size = self._batch_size\r\n features_windowed = input_lib.batch(\r\n features,\r\n batch_size=self._window_size * internal_passing_size + jitter,\r\n enqueue_many=True,\r\n capacity=(self._queue_capacity_multiplier\r\n * internal_passing_size * self._window_size),\r\n num_threads=self._num_threads)\r\n raw_features_windowed = features_windowed\r\n if self._jitter:\r\n features_windowed = {\r\n key: value[jitter:]\r\n for key, value in features_windowed.items()}\r\n features_windowed = {\r\n key: array_ops.reshape(\r\n value,\r\n array_ops.concat(\r\n [[internal_passing_size, self._window_size],\r\n array_ops.shape(value)[1:]],\r\n axis=0))\r\n for key, value in features_windowed.items()}\r\n batch_and_window_shape = tensor_shape.TensorShape(\r\n [internal_passing_size, self._window_size])\r\n for key in features_windowed.keys():\r\n features_windowed[key].set_shape(\r\n batch_and_window_shape.concatenate(\r\n raw_features_windowed[key].get_shape()[1:]))\r\n # When switching files, we may end up with windows where the time is not\r\n # decreasing, even if times within each file are sorted (and even if those\r\n # files are visited in order, when looping back around to the beginning of\r\n # the first file). This is hard for models to deal with, so we either\r\n # discard such examples, creating a bias where the beginning and end of the\r\n # series is under-sampled, or we sort the window, creating large gaps.\r\n times = features_windowed[feature_keys.TrainEvalFeatures.TIMES]\r\n if self._discard_out_of_order:\r\n non_decreasing = math_ops.reduce_all(\r\n times[:, 1:] >= times[:, :-1], axis=1)\r\n # Ensure that no more than self._discard_limit complete batches are\r\n # discarded contiguously (resetting the count when we find a single clean\r\n # window). This prevents infinite looping when the dataset is smaller than\r\n # the window size.\r\n # TODO(allenl): Figure out a way to return informative errors from\r\n # count_up_to.\r\n discarded_windows_limiter = variable_scope.variable(\r\n initial_value=constant_op.constant(0, dtype=dtypes.int64),\r\n name=\"discarded_windows_limiter\",\r\n trainable=False,\r\n collections=[ops.GraphKeys.LOCAL_VARIABLES])\r\n def _initialized_limit_check():\r\n return control_flow_ops.cond(\r\n math_ops.reduce_any(non_decreasing),\r\n lambda: state_ops.assign(discarded_windows_limiter, 0),\r\n lambda: discarded_windows_limiter.count_up_to(self._discard_limit))\r\n discard_limit_op = control_flow_ops.cond(\r\n state_ops.is_variable_initialized(discarded_windows_limiter),\r\n _initialized_limit_check,\r\n lambda: constant_op.constant(0, dtype=dtypes.int64))\r\n with ops.control_dependencies([discard_limit_op]):\r\n non_decreasing = array_ops.identity(non_decreasing)\r\n else:\r\n _, indices_descending = nn.top_k(\r\n times, k=array_ops.shape(times)[-1], sorted=True)\r\n indices = array_ops.reverse(indices_descending, axis=[0])\r\n features_windowed = {\r\n key: array_ops.gather(params=value, indices=indices)\r\n for key, value in features_windowed.items()\r\n }\r\n non_decreasing = True\r\n features_batched = input_lib.maybe_shuffle_batch(\r\n features_windowed,\r\n num_threads=self._num_threads,\r\n seed=self._shuffle_seed,\r\n batch_size=self._batch_size,\r\n capacity=self._queue_capacity_multiplier * self._batch_size,\r\n min_after_dequeue=(self._shuffle_min_after_dequeue_multiplier *\r\n self._batch_size),\r\n keep_input=non_decreasing,\r\n enqueue_many=True)\r\n return (features_batched, None)\r\n\r\n\r\ndef _canonicalize_numpy_data(data, require_single_batch):\r\n \"\"\"Do basic checking and reshaping for Numpy data.\r\n\r\n Args:\r\n data: A dictionary mapping keys to Numpy arrays, with several possible\r\n shapes (requires keys `TrainEvalFeatures.TIMES` and\r\n `TrainEvalFeatures.VALUES`):\r\n Single example; `TIMES` is a scalar and `VALUES` is either a scalar or a\r\n vector of length [number of features].\r\n Sequence; `TIMES` is a vector of shape [series length], `VALUES` either\r\n has shape [series length] (univariate) or [series length x number of\r\n features] (multivariate).\r\n Batch of sequences; `TIMES` is a vector of shape [batch size x series\r\n length], `VALUES` has shape [batch size x series length] or [batch\r\n size x series length x number of features].\r\n In any case, `VALUES` and any exogenous features must have their shapes\r\n prefixed by the shape of the value corresponding to the `TIMES` key.\r\n require_single_batch: If True, raises an error if the provided data has a\r\n batch dimension > 1.\r\n Returns:\r\n A dictionary with features normalized to have shapes prefixed with [batch\r\n size x series length]. The sizes of dimensions which were omitted in the\r\n inputs are 1.\r\n Raises:\r\n ValueError: If dimensions are incorrect or do not match, or required\r\n features are missing.\r\n \"\"\"\r\n features = {key: numpy.array(value) for key, value in data.items()}\r\n if (feature_keys.TrainEvalFeatures.TIMES not in features or\r\n feature_keys.TrainEvalFeatures.VALUES not in features):\r\n raise ValueError(\"{} and {} are required features.\".format(\r\n feature_keys.TrainEvalFeatures.TIMES,\r\n feature_keys.TrainEvalFeatures.VALUES))\r\n times = features[feature_keys.TrainEvalFeatures.TIMES]\r\n for key, value in features.items():\r\n if value.shape[:len(times.shape)] != times.shape:\r\n raise ValueError(\r\n (\"All features must have their shapes prefixed by the shape of the\"\r\n \" times feature. Got shape {} for feature '{}', but shape {} for\"\r\n \" '{}'\").format(value.shape, key, times.shape,\r\n feature_keys.TrainEvalFeatures.TIMES))\r\n if not times.shape: # a single example\r\n if not features[feature_keys.TrainEvalFeatures.VALUES].shape: # univariate\r\n # Add a feature dimension (with one feature)\r\n features[feature_keys.TrainEvalFeatures.VALUES] = features[\r\n feature_keys.TrainEvalFeatures.VALUES][..., None]\r\n elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 1:\r\n raise ValueError(\r\n (\"Got an unexpected number of dimensions for the '{}' feature.\"\r\n \" Was expecting at most 1 dimension\"\r\n \" ([number of features]) since '{}' does not \"\r\n \"have a batch or time dimension, but got shape {}\").format(\r\n feature_keys.TrainEvalFeatures.VALUES,\r\n feature_keys.TrainEvalFeatures.TIMES,\r\n features[feature_keys.TrainEvalFeatures.VALUES].shape))\r\n # Add trivial batch and time dimensions for every feature\r\n features = {key: value[None, None, ...] for key, value in features.items()}\r\n if len(times.shape) == 1: # shape [series length]\r\n if len(features[feature_keys.TrainEvalFeatures.VALUES]\r\n .shape) == 1: # shape [series length]\r\n # Add a feature dimension (with one feature)\r\n features[feature_keys.TrainEvalFeatures.VALUES] = features[\r\n feature_keys.TrainEvalFeatures.VALUES][..., None]\r\n elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 2:\r\n raise ValueError(\r\n (\"Got an unexpected number of dimensions for the '{}' feature.\"\r\n \" Was expecting at most 2 dimensions\"\r\n \" ([series length, number of features]) since '{}' does not \"\r\n \"have a batch dimension, but got shape {}\").format(\r\n feature_keys.TrainEvalFeatures.VALUES,\r\n feature_keys.TrainEvalFeatures.TIMES,\r\n features[feature_keys.TrainEvalFeatures.VALUES].shape))\r\n # Add trivial batch dimensions for every feature\r\n features = {key: value[None, ...] for key, value in features.items()}\r\n elif len(features[feature_keys.TrainEvalFeatures.TIMES]\r\n .shape) != 2: # shape [batch size, series length]\r\n raise ValueError(\r\n (\"Got an unexpected number of dimensions for times. Was expecting at \"\r\n \"most two ([batch size, series length]), but got shape {}.\").format(\r\n times.shape))\r\n if require_single_batch:\r\n # We don't expect input to be already batched; batching is done later\r\n if features[feature_keys.TrainEvalFeatures.TIMES].shape[0] != 1:\r\n raise ValueError(\"Got batch input, was expecting unbatched input.\")\r\n return features\r\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Operations for generating random numbers.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import random_seed\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import gen_random_ops\r\nfrom tensorflow.python.ops import math_ops\r\n# go/tf-wildcard-import\r\n# pylint: disable=wildcard-import\r\nfrom tensorflow.python.ops.gen_random_ops import *\r\nfrom tensorflow.python.util import deprecation\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n# pylint: enable=wildcard-import\r\n\r\n\r\ndef _ShapeTensor(shape):\r\n \"\"\"Convert to an int32 or int64 tensor, defaulting to int32 if empty.\"\"\"\r\n if isinstance(shape, (tuple, list)) and not shape:\r\n dtype = dtypes.int32\r\n else:\r\n dtype = None\r\n return ops.convert_to_tensor(shape, dtype=dtype, name=\"shape\")\r\n\r\n\r\n@tf_export(\"random.normal\", \"random_normal\")\r\ndef random_normal(shape,\r\n mean=0.0,\r\n stddev=1.0,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a normal distribution.\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal\r\n distribution.\r\n stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation\r\n of the normal distribution.\r\n dtype: The type of the output.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random normal values.\r\n \"\"\"\r\n with ops.name_scope(name, \"random_normal\", [shape, mean, stddev]) as name:\r\n shape_tensor = _ShapeTensor(shape)\r\n mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name=\"mean\")\r\n stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name=\"stddev\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n rnd = gen_random_ops.random_standard_normal(\r\n shape_tensor, dtype, seed=seed1, seed2=seed2)\r\n mul = rnd * stddev_tensor\r\n value = math_ops.add(mul, mean_tensor, name=name)\r\n return value\r\n\r\n\r\nops.NotDifferentiable(\"RandomStandardNormal\")\r\n\r\n\r\ndef parameterized_truncated_normal(shape,\r\n means=0.0,\r\n stddevs=1.0,\r\n minvals=-2.0,\r\n maxvals=2.0,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a truncated normal distribution.\r\n\r\n The generated values follow a normal distribution with specified mean and\r\n standard deviation, except that values whose magnitude is more than 2 standard\r\n deviations from the mean are dropped and re-picked.\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n means: A 0-D Tensor or Python value of type `dtype`. The mean of the\r\n truncated normal distribution.\r\n stddevs: A 0-D Tensor or Python value of type `dtype`. The standard\r\n deviation of the truncated normal distribution.\r\n minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of\r\n the truncated normal distribution.\r\n maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of\r\n the truncated normal distribution.\r\n dtype: The type of the output.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random truncated normal values.\r\n \"\"\"\r\n with ops.name_scope(name, \"parameterized_truncated_normal\",\r\n [shape, means, stddevs, minvals, maxvals]) as name:\r\n shape_tensor = _ShapeTensor(shape)\r\n means_tensor = ops.convert_to_tensor(means, dtype=dtype, name=\"means\")\r\n stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name=\"stddevs\")\r\n minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name=\"minvals\")\r\n maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name=\"maxvals\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n rnd = gen_random_ops.parameterized_truncated_normal(\r\n shape_tensor,\r\n means_tensor,\r\n stddevs_tensor,\r\n minvals_tensor,\r\n maxvals_tensor,\r\n seed=seed1,\r\n seed2=seed2)\r\n return rnd\r\n\r\n\r\n@tf_export(\"random.truncated_normal\", \"truncated_normal\")\r\ndef truncated_normal(shape,\r\n mean=0.0,\r\n stddev=1.0,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a truncated normal distribution.\r\n\r\n The generated values follow a normal distribution with specified mean and\r\n standard deviation, except that values whose magnitude is more than 2 standard\r\n deviations from the mean are dropped and re-picked.\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n mean: A 0-D Tensor or Python value of type `dtype`. The mean of the\r\n truncated normal distribution.\r\n stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation\r\n of the normal distribution, before truncation.\r\n dtype: The type of the output.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random truncated normal values.\r\n \"\"\"\r\n with ops.name_scope(name, \"truncated_normal\", [shape, mean, stddev]) as name:\r\n shape_tensor = _ShapeTensor(shape)\r\n mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name=\"mean\")\r\n stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name=\"stddev\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n rnd = gen_random_ops.truncated_normal(\r\n shape_tensor, dtype, seed=seed1, seed2=seed2)\r\n mul = rnd * stddev_tensor\r\n value = math_ops.add(mul, mean_tensor, name=name)\r\n return value\r\n\r\n\r\nops.NotDifferentiable(\"ParameterizedTruncatedNormal\")\r\nops.NotDifferentiable(\"TruncatedNormal\")\r\n\r\n\r\n@tf_export(\"random.uniform\", \"random_uniform\")\r\ndef random_uniform(shape,\r\n minval=0,\r\n maxval=None,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a uniform distribution.\r\n\r\n The generated values follow a uniform distribution in the range\r\n `[minval, maxval)`. The lower bound `minval` is included in the range, while\r\n the upper bound `maxval` is excluded.\r\n\r\n For floats, the default range is `[0, 1)`. For ints, at least `maxval` must\r\n be specified explicitly.\r\n\r\n In the integer case, the random integers are slightly biased unless\r\n `maxval - minval` is an exact power of two. The bias is small for values of\r\n `maxval - minval` significantly smaller than the range of the output (either\r\n `2**32` or `2**64`).\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the\r\n range of random values to generate. Defaults to 0.\r\n maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on\r\n the range of random values to generate. Defaults to 1 if `dtype` is\r\n floating point.\r\n dtype: The type of the output: `float16`, `float32`, `float64`, `int32`,\r\n or `int64`.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random uniform values.\r\n\r\n Raises:\r\n ValueError: If `dtype` is integral and `maxval` is not specified.\r\n \"\"\"\r\n dtype = dtypes.as_dtype(dtype)\r\n if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,\r\n dtypes.float64, dtypes.int32, dtypes.int64):\r\n raise ValueError(\"Invalid dtype %r\" % dtype)\r\n if maxval is None:\r\n if dtype.is_integer:\r\n raise ValueError(\"Must specify maxval for integer dtype %r\" % dtype)\r\n maxval = 1\r\n with ops.name_scope(name, \"random_uniform\", [shape, minval, maxval]) as name:\r\n shape = _ShapeTensor(shape)\r\n minval = ops.convert_to_tensor(minval, dtype=dtype, name=\"min\")\r\n maxval = ops.convert_to_tensor(maxval, dtype=dtype, name=\"max\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n if dtype.is_integer:\r\n return gen_random_ops.random_uniform_int(\r\n shape, minval, maxval, seed=seed1, seed2=seed2, name=name)\r\n else:\r\n rnd = gen_random_ops.random_uniform(shape, dtype, seed=seed1, seed2=seed2)\r\n return math_ops.add(rnd * (maxval - minval), minval, name=name)\r\n\r\n\r\nops.NotDifferentiable(\"RandomUniform\")\r\n\r\n\r\n@tf_export(\"random.shuffle\", \"random_shuffle\")\r\ndef random_shuffle(value, seed=None, name=None):\r\n \"\"\"Randomly shuffles a tensor along its first dimension.\r\n\r\n The tensor is shuffled along dimension 0, such that each `value[j]` is mapped\r\n to one and only one `output[i]`. For example, a mapping that might occur for a\r\n 3x2 tensor is:\r\n\r\n ```python\r\n [[1, 2], [[5, 6],\r\n [3, 4], ==> [1, 2],\r\n [5, 6]] [3, 4]]\r\n ```\r\n\r\n Args:\r\n value: A Tensor to be shuffled.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of same shape and type as `value`, shuffled along its first\r\n dimension.\r\n \"\"\"\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return gen_random_ops.random_shuffle(\r\n value, seed=seed1, seed2=seed2, name=name)\r\n\r\n\r\n@tf_export(\"image.random_crop\", \"random_crop\")\r\ndef random_crop(value, size, seed=None, name=None):\r\n \"\"\"Randomly crops a tensor to a given size.\r\n\r\n Slices a shape `size` portion out of `value` at a uniformly chosen offset.\r\n Requires `value.shape >= size`.\r\n\r\n If a dimension should not be cropped, pass the full size of that dimension.\r\n For example, RGB images can be cropped with\r\n `size = [crop_height, crop_width, 3]`.\r\n\r\n Args:\r\n value: Input tensor to crop.\r\n size: 1-D tensor with size the rank of `value`.\r\n seed: Python integer. Used to create a random seed. See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for this operation (optional).\r\n\r\n Returns:\r\n A cropped tensor of the same rank as `value` and shape `size`.\r\n \"\"\"\r\n # TODO(shlens): Implement edge case to guarantee output size dimensions.\r\n # If size > value.shape, zero pad the result so that it always has shape\r\n # exactly size.\r\n with ops.name_scope(name, \"random_crop\", [value, size]) as name:\r\n value = ops.convert_to_tensor(value, name=\"value\")\r\n size = ops.convert_to_tensor(size, dtype=dtypes.int32, name=\"size\")\r\n shape = array_ops.shape(value)\r\n check = control_flow_ops.Assert(\r\n math_ops.reduce_all(shape >= size),\r\n [\"Need value.shape >= size, got \", shape, size],\r\n summarize=1000)\r\n shape = control_flow_ops.with_dependencies([check], shape)\r\n limit = shape - size + 1\r\n offset = random_uniform(\r\n array_ops.shape(shape),\r\n dtype=size.dtype,\r\n maxval=size.dtype.max,\r\n seed=seed) % limit\r\n return array_ops.slice(value, offset, size, name=name)\r\n\r\n\r\n@tf_export(\"random.multinomial\", \"multinomial\")\r\ndef multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):\r\n \"\"\"Draws samples from a multinomial distribution.\r\n\r\n Example:\r\n\r\n ```python\r\n # samples has shape [1, 5], where each value is either 0 or 1 with equal\r\n # probability.\r\n samples = tf.multinomial(tf.log([[10., 10.]]), 5)\r\n ```\r\n\r\n Args:\r\n logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice\r\n `[i, :]` represents the unnormalized log-probabilities for all classes.\r\n num_samples: 0-D. Number of independent samples to draw for each row slice.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: Optional name for the operation.\r\n output_dtype: integer type to use for the output. Defaults to int64.\r\n\r\n Returns:\r\n The drawn samples of shape `[batch_size, num_samples]`.\r\n \"\"\"\r\n with ops.name_scope(name, \"multinomial\", [logits]):\r\n logits = ops.convert_to_tensor(logits, name=\"logits\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return gen_random_ops.multinomial(\r\n logits, num_samples, seed=seed1, seed2=seed2, output_dtype=output_dtype)\r\n\r\n\r\nops.NotDifferentiable(\"Multinomial\")\r\n\r\n\r\n@tf_export(\"random.gamma\", \"random_gamma\")\r\[email protected]_endpoints(\"random_gamma\")\r\ndef random_gamma(shape,\r\n alpha,\r\n beta=None,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Draws `shape` samples from each of the given Gamma distribution(s).\r\n\r\n `alpha` is the shape parameter describing the distribution(s), and `beta` is\r\n the inverse scale parameter(s).\r\n\r\n Note: Because internal calculations are done using `float64` and casting has\r\n `floor` semantics, we must manually map zero outcomes to the smallest\r\n possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This\r\n means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise\r\n should. This bias can only happen for small values of `alpha`, i.e.,\r\n `alpha << 1` or large values of `beta`, i.e., `beta >> 1`.\r\n\r\n The samples are differentiable w.r.t. alpha and beta.\r\n The derivatives are computed using the approach described in the paper\r\n\r\n [Michael Figurnov, Shakir Mohamed, Andriy Mnih.\r\n Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)\r\n\r\n Example:\r\n\r\n ```python\r\n samples = tf.random_gamma([10], [0.5, 1.5])\r\n # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents\r\n # the samples drawn from each distribution\r\n\r\n samples = tf.random_gamma([7, 5], [0.5, 1.5])\r\n # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]\r\n # represents the 7x5 samples drawn from each of the two distributions\r\n\r\n alpha = tf.constant([[1.],[3.],[5.]])\r\n beta = tf.constant([[3., 4.]])\r\n samples = tf.random_gamma([30], alpha=alpha, beta=beta)\r\n # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.\r\n\r\n loss = tf.reduce_mean(tf.square(samples))\r\n dloss_dalpha, dloss_dbeta = tf.gradients(loss, [alpha, beta])\r\n # unbiased stochastic derivatives of the loss function\r\n alpha.shape == dloss_dalpha.shape # True\r\n beta.shape == dloss_dbeta.shape # True\r\n ```\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output samples\r\n to be drawn per alpha/beta-parameterized distribution.\r\n alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha`\r\n provides the shape parameter(s) describing the gamma distribution(s) to\r\n sample. Must be broadcastable with `beta`.\r\n beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1.\r\n `beta` provides the inverse scale parameter(s) of the gamma\r\n distribution(s) to sample. Must be broadcastable with `alpha`.\r\n dtype: The type of alpha, beta, and the output: `float16`, `float32`, or\r\n `float64`.\r\n seed: A Python integer. Used to create a random seed for the distributions.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: Optional name for the operation.\r\n\r\n Returns:\r\n samples: a `Tensor` of shape\r\n `tf.concat([shape, tf.shape(alpha + beta)], axis=0)` with values of type\r\n `dtype`.\r\n \"\"\"\r\n with ops.name_scope(name, \"random_gamma\", [shape, alpha, beta]):\r\n shape = ops.convert_to_tensor(shape, name=\"shape\", dtype=dtypes.int32)\r\n alpha = ops.convert_to_tensor(alpha, name=\"alpha\", dtype=dtype)\r\n beta = ops.convert_to_tensor(\r\n beta if beta is not None else 1, name=\"beta\", dtype=dtype)\r\n alpha_broadcast = alpha + array_ops.zeros_like(beta)\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return math_ops.maximum(\r\n np.finfo(dtype.as_numpy_dtype).tiny,\r\n gen_random_ops.random_gamma(\r\n shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta)\r\n\r\n\r\n@tf_export(\"random.poisson\", \"random_poisson\")\r\[email protected]_endpoints(\"random_poisson\")\r\ndef random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):\r\n \"\"\"Draws `shape` samples from each of the given Poisson distribution(s).\r\n\r\n `lam` is the rate parameter describing the distribution(s).\r\n\r\n Example:\r\n\r\n ```python\r\n samples = tf.random_poisson([0.5, 1.5], [10])\r\n # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents\r\n # the samples drawn from each distribution\r\n\r\n samples = tf.random_poisson([12.2, 3.3], [7, 5])\r\n # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]\r\n # represents the 7x5 samples drawn from each of the two distributions\r\n ```\r\n\r\n Args:\r\n lam: A Tensor or Python value or N-D array of type `dtype`.\r\n `lam` provides the rate parameter(s) describing the poisson\r\n distribution(s) to sample.\r\n shape: A 1-D integer Tensor or Python array. The shape of the output samples\r\n to be drawn per \"rate\"-parameterized distribution.\r\n dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or\r\n `int64`.\r\n seed: A Python integer. Used to create a random seed for the distributions.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: Optional name for the operation.\r\n\r\n Returns:\r\n samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)`\r\n with values of type `dtype`.\r\n \"\"\"\r\n with ops.name_scope(name, \"random_poisson\", [lam, shape]):\r\n shape = ops.convert_to_tensor(shape, name=\"shape\", dtype=dtypes.int32)\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return gen_random_ops.random_poisson_v2(\r\n shape, lam, dtype=dtype, seed=seed1, seed2=seed2)\r\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n# pylint: disable=invalid-name\r\n\"\"\"Save and restore variables.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport os.path\r\nimport re\r\nimport time\r\n\r\nfrom google.protobuf import text_format\r\n\r\nfrom tensorflow.core.protobuf import saver_pb2\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.lib.io import file_io\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.training import training_util\r\nfrom tensorflow.python.training.checkpoint_state_pb2 import CheckpointState\r\nfrom tensorflow.python.util import compat\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\ndef _GetCheckpointFilename(save_dir, latest_filename):\r\n \"\"\"Returns a filename for storing the CheckpointState.\r\n\r\n Args:\r\n save_dir: The directory for saving and restoring checkpoints.\r\n latest_filename: Name of the file in 'save_dir' that is used\r\n to store the CheckpointState.\r\n\r\n Returns:\r\n The path of the file that contains the CheckpointState proto.\r\n \"\"\"\r\n if latest_filename is None:\r\n latest_filename = \"checkpoint\"\r\n return os.path.join(save_dir, latest_filename)\r\n\r\n\r\n@tf_export(\"train.generate_checkpoint_state_proto\")\r\ndef generate_checkpoint_state_proto(save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=None,\r\n all_model_checkpoint_timestamps=None,\r\n last_preserved_timestamp=None):\r\n \"\"\"Generates a checkpoint state proto.\r\n\r\n Args:\r\n save_dir: Directory where the model was saved.\r\n model_checkpoint_path: The checkpoint file.\r\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\r\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\r\n the last element must be equal to model_checkpoint_path. These paths\r\n are also saved in the CheckpointState proto.\r\n all_model_checkpoint_timestamps: A list of floats, indicating the number of\r\n seconds since the Epoch when each checkpoint was generated.\r\n last_preserved_timestamp: A float, indicating the number of seconds since\r\n the Epoch when the last preserved checkpoint was written, e.g. due to a\r\n `keep_checkpoint_every_n_hours` parameter (see\r\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\r\n Returns:\r\n CheckpointState proto with model_checkpoint_path and\r\n all_model_checkpoint_paths updated to either absolute paths or\r\n relative paths to the current save_dir.\r\n\r\n Raises:\r\n ValueError: If `all_model_checkpoint_timestamps` was provided but its length\r\n does not match `all_model_checkpoint_paths`.\r\n \"\"\"\r\n if all_model_checkpoint_paths is None:\r\n all_model_checkpoint_paths = []\r\n\r\n if (not all_model_checkpoint_paths or\r\n all_model_checkpoint_paths[-1] != model_checkpoint_path):\r\n logging.info(\"%s is not in all_model_checkpoint_paths. Manually adding it.\",\r\n model_checkpoint_path)\r\n all_model_checkpoint_paths.append(model_checkpoint_path)\r\n\r\n if (all_model_checkpoint_timestamps\r\n and (len(all_model_checkpoint_timestamps)\r\n != len(all_model_checkpoint_paths))):\r\n raise ValueError(\r\n (\"Checkpoint timestamps, if provided, must match checkpoint paths (got \"\r\n \"paths %s and timestamps %s)\")\r\n % (all_model_checkpoint_paths, all_model_checkpoint_timestamps))\r\n\r\n # Relative paths need to be rewritten to be relative to the \"save_dir\"\r\n # if model_checkpoint_path already contains \"save_dir\".\r\n if not os.path.isabs(save_dir):\r\n if not os.path.isabs(model_checkpoint_path):\r\n model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)\r\n for i in range(len(all_model_checkpoint_paths)):\r\n p = all_model_checkpoint_paths[i]\r\n if not os.path.isabs(p):\r\n all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)\r\n\r\n coord_checkpoint_proto = CheckpointState(\r\n model_checkpoint_path=model_checkpoint_path,\r\n all_model_checkpoint_paths=all_model_checkpoint_paths,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n\r\n return coord_checkpoint_proto\r\n\r\n\r\n@tf_export(\"train.update_checkpoint_state\")\r\ndef update_checkpoint_state(save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=None,\r\n latest_filename=None,\r\n all_model_checkpoint_timestamps=None,\r\n last_preserved_timestamp=None):\r\n \"\"\"Updates the content of the 'checkpoint' file.\r\n\r\n This updates the checkpoint file containing a CheckpointState\r\n proto.\r\n\r\n Args:\r\n save_dir: Directory where the model was saved.\r\n model_checkpoint_path: The checkpoint file.\r\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\r\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\r\n the last element must be equal to model_checkpoint_path. These paths\r\n are also saved in the CheckpointState proto.\r\n latest_filename: Optional name of the checkpoint file. Default to\r\n 'checkpoint'.\r\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\r\n seconds since the Epoch) indicating when the checkpoints in\r\n `all_model_checkpoint_paths` were created.\r\n last_preserved_timestamp: A float, indicating the number of seconds since\r\n the Epoch when the last preserved checkpoint was written, e.g. due to a\r\n `keep_checkpoint_every_n_hours` parameter (see\r\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\r\n Raises:\r\n RuntimeError: If any of the model checkpoint paths conflict with the file\r\n containing CheckpointSate.\r\n \"\"\"\r\n update_checkpoint_state_internal(\r\n save_dir=save_dir,\r\n model_checkpoint_path=model_checkpoint_path,\r\n all_model_checkpoint_paths=all_model_checkpoint_paths,\r\n latest_filename=latest_filename,\r\n save_relative_paths=False,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n\r\n\r\ndef update_checkpoint_state_internal(save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=None,\r\n latest_filename=None,\r\n save_relative_paths=False,\r\n all_model_checkpoint_timestamps=None,\r\n last_preserved_timestamp=None):\r\n \"\"\"Updates the content of the 'checkpoint' file.\r\n\r\n This updates the checkpoint file containing a CheckpointState\r\n proto.\r\n\r\n Args:\r\n save_dir: Directory where the model was saved.\r\n model_checkpoint_path: The checkpoint file.\r\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\r\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\r\n the last element must be equal to model_checkpoint_path. These paths\r\n are also saved in the CheckpointState proto.\r\n latest_filename: Optional name of the checkpoint file. Default to\r\n 'checkpoint'.\r\n save_relative_paths: If `True`, will write relative paths to the checkpoint\r\n state file.\r\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\r\n seconds since the Epoch) indicating when the checkpoints in\r\n `all_model_checkpoint_paths` were created.\r\n last_preserved_timestamp: A float, indicating the number of seconds since\r\n the Epoch when the last preserved checkpoint was written, e.g. due to a\r\n `keep_checkpoint_every_n_hours` parameter (see\r\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\r\n\r\n Raises:\r\n RuntimeError: If any of the model checkpoint paths conflict with the file\r\n containing CheckpointSate.\r\n \"\"\"\r\n # Writes the \"checkpoint\" file for the coordinator for later restoration.\r\n coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)\r\n if save_relative_paths:\r\n if os.path.isabs(model_checkpoint_path):\r\n rel_model_checkpoint_path = os.path.relpath(\r\n model_checkpoint_path, save_dir)\r\n else:\r\n rel_model_checkpoint_path = model_checkpoint_path\r\n rel_all_model_checkpoint_paths = []\r\n for p in all_model_checkpoint_paths:\r\n if os.path.isabs(p):\r\n rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))\r\n else:\r\n rel_all_model_checkpoint_paths.append(p)\r\n ckpt = generate_checkpoint_state_proto(\r\n save_dir,\r\n rel_model_checkpoint_path,\r\n all_model_checkpoint_paths=rel_all_model_checkpoint_paths,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n else:\r\n ckpt = generate_checkpoint_state_proto(\r\n save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=all_model_checkpoint_paths,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n\r\n if coord_checkpoint_filename == ckpt.model_checkpoint_path:\r\n raise RuntimeError(\"Save path '%s' conflicts with path used for \"\r\n \"checkpoint state. Please use a different save path.\" %\r\n model_checkpoint_path)\r\n\r\n # Preventing potential read/write race condition by *atomically* writing to a\r\n # file.\r\n file_io.atomic_write_string_to_file(coord_checkpoint_filename,\r\n text_format.MessageToString(ckpt))\r\n\r\n\r\n@tf_export(\"train.get_checkpoint_state\")\r\ndef get_checkpoint_state(checkpoint_dir, latest_filename=None):\r\n \"\"\"Returns CheckpointState proto from the \"checkpoint\" file.\r\n\r\n If the \"checkpoint\" file contains a valid CheckpointState\r\n proto, returns it.\r\n\r\n Args:\r\n checkpoint_dir: The directory of checkpoints.\r\n latest_filename: Optional name of the checkpoint file. Default to\r\n 'checkpoint'.\r\n\r\n Returns:\r\n A CheckpointState if the state was available, None\r\n otherwise.\r\n\r\n Raises:\r\n ValueError: if the checkpoint read doesn't have model_checkpoint_path set.\r\n \"\"\"\r\n ckpt = None\r\n coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,\r\n latest_filename)\r\n f = None\r\n try:\r\n # Check that the file exists before opening it to avoid\r\n # many lines of errors from colossus in the logs.\r\n if file_io.file_exists(coord_checkpoint_filename):\r\n file_content = file_io.read_file_to_string(\r\n coord_checkpoint_filename)\r\n ckpt = CheckpointState()\r\n text_format.Merge(file_content, ckpt)\r\n if not ckpt.model_checkpoint_path:\r\n raise ValueError(\"Invalid checkpoint state loaded from \"\r\n + checkpoint_dir)\r\n # For relative model_checkpoint_path and all_model_checkpoint_paths,\r\n # prepend checkpoint_dir.\r\n if not os.path.isabs(ckpt.model_checkpoint_path):\r\n ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,\r\n ckpt.model_checkpoint_path)\r\n for i in range(len(ckpt.all_model_checkpoint_paths)):\r\n p = ckpt.all_model_checkpoint_paths[i]\r\n if not os.path.isabs(p):\r\n ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)\r\n except errors.OpError as e:\r\n # It's ok if the file cannot be read\r\n logging.warning(\"%s: %s\", type(e).__name__, e)\r\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\r\n return None\r\n except text_format.ParseError as e:\r\n logging.warning(\"%s: %s\", type(e).__name__, e)\r\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\r\n return None\r\n finally:\r\n if f:\r\n f.close()\r\n return ckpt\r\n\r\n\r\ndef _prefix_to_checkpoint_path(prefix, format_version):\r\n \"\"\"Returns the pathname of a checkpoint file, given the checkpoint prefix.\r\n\r\n For V1 checkpoint, simply returns the prefix itself (the data file). For V2,\r\n returns the pathname to the index file.\r\n\r\n Args:\r\n prefix: a string, the prefix of a checkpoint.\r\n format_version: the checkpoint format version that corresponds to the\r\n prefix.\r\n Returns:\r\n The pathname of a checkpoint file, taking into account the checkpoint\r\n format version.\r\n \"\"\"\r\n if format_version == saver_pb2.SaverDef.V2:\r\n return prefix + \".index\" # The index file identifies a checkpoint.\r\n return prefix # Just the data file.\r\n\r\n\r\n@tf_export(\"train.latest_checkpoint\")\r\ndef latest_checkpoint(checkpoint_dir, latest_filename=None):\r\n \"\"\"Finds the filename of latest saved checkpoint file.\r\n\r\n Args:\r\n checkpoint_dir: Directory where the variables were saved.\r\n latest_filename: Optional name for the protocol buffer file that\r\n contains the list of most recent checkpoint filenames.\r\n See the corresponding argument to `Saver.save()`.\r\n\r\n Returns:\r\n The full path to the latest checkpoint or `None` if no checkpoint was found.\r\n \"\"\"\r\n # Pick the latest checkpoint based on checkpoint state.\r\n ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)\r\n if ckpt and ckpt.model_checkpoint_path:\r\n # Look for either a V2 path or a V1 path, with priority for V2.\r\n v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\r\n saver_pb2.SaverDef.V2)\r\n v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\r\n saver_pb2.SaverDef.V1)\r\n if file_io.get_matching_files(v2_path) or file_io.get_matching_files(\r\n v1_path):\r\n return ckpt.model_checkpoint_path\r\n else:\r\n logging.error(\"Couldn't match files for checkpoint %s\",\r\n ckpt.model_checkpoint_path)\r\n return None\r\n\r\n\r\n@tf_export(\"train.checkpoint_exists\")\r\ndef checkpoint_exists(checkpoint_prefix):\r\n \"\"\"Checks whether a V1 or V2 checkpoint exists with the specified prefix.\r\n\r\n This is the recommended way to check if a checkpoint exists, since it takes\r\n into account the naming difference between V1 and V2 formats.\r\n\r\n Args:\r\n checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking\r\n priority. Typically the result of `Saver.save()` or that of\r\n `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or\r\n V1/V2.\r\n Returns:\r\n A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.\r\n \"\"\"\r\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\r\n saver_pb2.SaverDef.V2)\r\n if file_io.get_matching_files(pathname):\r\n return True\r\n elif file_io.get_matching_files(checkpoint_prefix):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n@tf_export(\"train.get_checkpoint_mtimes\")\r\ndef get_checkpoint_mtimes(checkpoint_prefixes):\r\n \"\"\"Returns the mtimes (modification timestamps) of the checkpoints.\r\n\r\n Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files\r\n exist, collect their mtime. Both V2 and V1 checkpoints are considered, in\r\n that priority.\r\n\r\n This is the recommended way to get the mtimes, since it takes into account\r\n the naming difference between V1 and V2 formats.\r\n\r\n Args:\r\n checkpoint_prefixes: a list of checkpoint paths, typically the results of\r\n `Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of\r\n sharded/non-sharded or V1/V2.\r\n Returns:\r\n A list of mtimes (in microseconds) of the found checkpoints.\r\n \"\"\"\r\n mtimes = []\r\n\r\n def match_maybe_append(pathname):\r\n fnames = file_io.get_matching_files(pathname)\r\n if fnames:\r\n mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)\r\n return True\r\n return False\r\n\r\n for checkpoint_prefix in checkpoint_prefixes:\r\n # Tries V2's metadata file first.\r\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\r\n saver_pb2.SaverDef.V2)\r\n if match_maybe_append(pathname):\r\n continue\r\n # Otherwise, tries V1, where the prefix is the complete pathname.\r\n match_maybe_append(checkpoint_prefix)\r\n\r\n return mtimes\r\n\r\n\r\n@tf_export(\"train.remove_checkpoint\")\r\ndef remove_checkpoint(checkpoint_prefix,\r\n checkpoint_format_version=saver_pb2.SaverDef.V2,\r\n meta_graph_suffix=\"meta\"):\r\n \"\"\"Removes a checkpoint given by `checkpoint_prefix`.\r\n\r\n Args:\r\n checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result\r\n of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of\r\n sharded/non-sharded or V1/V2.\r\n checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to\r\n `SaverDef.V2`.\r\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\r\n \"\"\"\r\n _delete_file_if_exists(\r\n meta_graph_filename(checkpoint_prefix, meta_graph_suffix))\r\n if checkpoint_format_version == saver_pb2.SaverDef.V2:\r\n # V2 has a metadata file and some data files.\r\n _delete_file_if_exists(checkpoint_prefix + \".index\")\r\n _delete_file_if_exists(checkpoint_prefix + \".data-?????-of-?????\")\r\n else:\r\n # V1, Legacy. Exact match on the data file.\r\n _delete_file_if_exists(checkpoint_prefix)\r\n\r\n\r\ndef _delete_file_if_exists(filespec):\r\n \"\"\"Deletes files matching `filespec`.\"\"\"\r\n for pathname in file_io.get_matching_files(filespec):\r\n file_io.delete_file(pathname)\r\n\r\n\r\ndef meta_graph_filename(checkpoint_filename, meta_graph_suffix=\"meta\"):\r\n \"\"\"Returns the meta graph filename.\r\n\r\n Args:\r\n checkpoint_filename: Name of the checkpoint file.\r\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\r\n\r\n Returns:\r\n MetaGraph file name.\r\n \"\"\"\r\n # If the checkpoint_filename is sharded, the checkpoint_filename could\r\n # be of format model.ckpt-step#-?????-of-shard#. For example,\r\n # model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.\r\n basename = re.sub(r\"-[\\d\\?]+-of-\\d+$\", \"\", checkpoint_filename)\r\n suffixed_filename = \".\".join([basename, meta_graph_suffix])\r\n return suffixed_filename\r\n\r\n\r\n# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?\r\nclass CheckpointManager(object):\r\n \"\"\"Deletes old checkpoints.\r\n\r\n Example usage:\r\n ```python\r\n import tensorflow as tf\r\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\r\n manager = tf.contrib.checkpoint.CheckpointManager(\r\n checkpoint, directory=\"/tmp/model\", max_to_keep=5)\r\n status = checkpoint.restore(manager.latest_checkpoint)\r\n while True:\r\n # train\r\n manager.save()\r\n ```\r\n\r\n `CheckpointManager` preserves its own state across instantiations (see the\r\n `__init__` documentation for details). Only one should be active in a\r\n particular directory at a time.\r\n \"\"\"\r\n\r\n def __init__(self, checkpoint, directory,\r\n max_to_keep, keep_checkpoint_every_n_hours=None):\r\n \"\"\"Configure a `CheckpointManager` for use in `directory`.\r\n\r\n If a `CheckpointManager` was previously used in `directory`, its\r\n state will be restored. This includes the list of managed checkpoints and\r\n the timestamp bookkeeping necessary to support\r\n `keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`\r\n will be the same as the previous `CheckpointManager`, including cleaning up\r\n existing checkpoints if appropriate.\r\n\r\n Checkpoints are only considered for deletion just after a new checkpoint has\r\n been added. At that point, `max_to_keep` checkpoints will remain in an\r\n \"active set\". Once a checkpoint is preserved by\r\n `keep_checkpoint_every_n_hours` it will not be deleted by this\r\n `CheckpointManager` or any future `CheckpointManager` instantiated in\r\n `directory` (regardless of the new setting of\r\n `keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the\r\n active set may be deleted by this `CheckpointManager` or a future\r\n `CheckpointManager` instantiated in `directory` (subject to its\r\n `max_to_keep` and `keep_checkpoint_every_n_hours` settings).\r\n\r\n Args:\r\n checkpoint: The `tf.train.Checkpoint` instance to save and manage\r\n checkpoints for.\r\n directory: The path to a directory in which to write checkpoints. A\r\n special file named \"checkpoint\" is also written to this directory (in a\r\n human-readable text format) which contains the state of the\r\n `CheckpointManager`.\r\n max_to_keep: An integer, the number of checkpoints to keep. Unless\r\n preserved by `keep_checkpoint_every_n_hours`, checkpoints will be\r\n deleted from the active set, oldest first, until only `max_to_keep`\r\n checkpoints remain. If `None`, no checkpoints are deleted and everything\r\n stays in the active set. Note that `max_to_keep=None` will keep all\r\n checkpoint paths in memory and in the checkpoint state protocol buffer\r\n on disk.\r\n keep_checkpoint_every_n_hours: Upon removal from the active set, a\r\n checkpoint will be preserved if it has been at least\r\n `keep_checkpoint_every_n_hours` since the last preserved checkpoint. The\r\n default setting of `None` does not preserve any checkpoints in this way.\r\n\r\n Raises:\r\n ValueError: If `max_to_keep` is not a positive integer.\r\n \"\"\"\r\n self._checkpoint = checkpoint\r\n self._save_counter_assign = None\r\n if max_to_keep is not None and max_to_keep <= 0:\r\n raise ValueError(\r\n (\"Expected a positive integer or `None` for `max_to_max_to_keep`, \"\r\n \"got %d.\")\r\n % (max_to_keep,))\r\n self._max_to_keep = max_to_keep\r\n self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours\r\n self._directory = directory\r\n self._checkpoint_prefix = os.path.join(directory, \"ckpt\")\r\n recovered_state = get_checkpoint_state(directory)\r\n current_clock = time.time()\r\n self._maybe_delete = collections.OrderedDict()\r\n if recovered_state is None:\r\n self._latest_checkpoint = None\r\n # Set the clock back slightly to avoid race conditions when quckly\r\n # re-creating a CheckpointManager.\r\n self._last_preserved_timestamp = current_clock - 1.\r\n else:\r\n self._latest_checkpoint = recovered_state.model_checkpoint_path\r\n self._last_preserved_timestamp = recovered_state.last_preserved_timestamp\r\n if current_clock < self._last_preserved_timestamp:\r\n # Time seems to have reversed itself. In addition to this warning, we'll\r\n # min() saved checkpoint timestamps with the current time to ensure that\r\n # old checkpoints don't get deleted accidentally.\r\n logging.warning(\r\n (\"time.time() returned a value %f seconds behind the last \"\r\n \"preserved checkpoint timestamp.\")\r\n % (self._last_preserved_timestamp - current_clock,))\r\n self._last_preserved_timestamp = current_clock\r\n all_timestamps = recovered_state.all_model_checkpoint_timestamps\r\n all_paths = recovered_state.all_model_checkpoint_paths\r\n del recovered_state # Uses modified values from now on\r\n if not all_timestamps:\r\n all_timestamps = [self._last_preserved_timestamp] * len(all_paths)\r\n\r\n for filename, timestamp in zip(all_paths, all_timestamps):\r\n timestamp = min(timestamp, current_clock)\r\n if timestamp > self._last_preserved_timestamp:\r\n self._maybe_delete[filename] = timestamp\r\n\r\n @property\r\n def latest_checkpoint(self):\r\n \"\"\"The prefix of the most recent checkpoint in `directory`.\r\n\r\n Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is\r\n the constructor argument to `CheckpointManager`.\r\n\r\n Suitable for passing to `tf.train.Checkpoint.restore` to resume training.\r\n\r\n Returns:\r\n The checkpoint prefix. If there are no checkpoints, returns `None`.\r\n \"\"\"\r\n return self._latest_checkpoint\r\n\r\n @property\r\n def checkpoints(self):\r\n \"\"\"A list of managed checkpoints.\r\n\r\n Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not\r\n show up in this list (to avoid ever-growing filename lists).\r\n\r\n Returns:\r\n A list of filenames, sorted from oldest to newest.\r\n \"\"\"\r\n return list(self._maybe_delete.keys())\r\n\r\n def _sweep(self):\r\n \"\"\"Deletes or preserves managed checkpoints.\"\"\"\r\n if not self._max_to_keep:\r\n # Does not update self._last_preserved_timestamp, since everything is kept\r\n # in the active set.\r\n return\r\n while len(self._maybe_delete) > self._max_to_keep:\r\n filename, timestamp = self._maybe_delete.popitem(last=False)\r\n # Even if we're keeping this checkpoint due to\r\n # keep_checkpoint_every_n_hours, we won't reference it to avoid\r\n # infinitely-growing CheckpointState protos.\r\n if (self._keep_checkpoint_every_n_hours\r\n and (timestamp - self._keep_checkpoint_every_n_hours * 3600.\r\n >= self._last_preserved_timestamp)):\r\n self._last_preserved_timestamp = timestamp\r\n continue\r\n remove_checkpoint(filename)\r\n\r\n def _record_state(self):\r\n \"\"\"Saves the `CheckpointManager`'s state in `directory`.\"\"\"\r\n filenames, timestamps = zip(*self._maybe_delete.items())\r\n update_checkpoint_state_internal(\r\n self._directory,\r\n model_checkpoint_path=self.latest_checkpoint,\r\n all_model_checkpoint_paths=filenames,\r\n all_model_checkpoint_timestamps=timestamps,\r\n last_preserved_timestamp=self._last_preserved_timestamp,\r\n save_relative_paths=True)\r\n\r\n @property\r\n def _prefix(self):\r\n \"\"\"A common prefix for all checkpoints saved with this manager.\r\n\r\n For example, if `directory` (a constructor argument) were `\"/tmp/tf-model\"`,\r\n `prefix` would be `\"/tmp/tf-model/ckpt\"` and checkpoints would generally be\r\n numbered `\"/tmp/tf-model/ckpt-1\"`, `\"/tmp/tf-model/ckpt-2\"`, and so on. Each\r\n checkpoint has several associated files\r\n (e.g. `\"/tmp/tf-model/ckpt-2.index\"`).\r\n\r\n Returns:\r\n A string prefix.\r\n \"\"\"\r\n return self._checkpoint_prefix\r\n\r\n def save(self, session=None, checkpoint_number=None):\r\n \"\"\"Creates a new checkpoint and manages it.\r\n\r\n Args:\r\n session: The session to evaluate variables in. Ignored when executing\r\n eagerly. If not provided when graph building, the default session is\r\n used.\r\n checkpoint_number: An optional integer, or an integer-dtype `Variable` or\r\n `Tensor`, used to number the checkpoint. If `None` (default),\r\n checkpoints are numbered using `checkpoint.save_counter`. Even if\r\n `checkpoint_number` is provided, `save_counter` is still incremented. A\r\n user-provided `checkpoint_number` is not incremented even if it is a\r\n `Variable`.\r\n\r\n Returns:\r\n The path to the new checkpoint. It is also recorded in the `checkpoints`\r\n and `latest_checkpoint` properies.\r\n \"\"\"\r\n # Save counter logic duplicated from tf.train.Checkpoint, soon to diverge\r\n # slightly with a custom numbering option.\r\n if context.executing_eagerly():\r\n save_counter = self._checkpoint.save_counter\r\n save_counter.assign_add(1)\r\n else:\r\n if session is None:\r\n session = ops.get_default_session()\r\n\r\n def _initializing_creator(next_creator, **kwargs):\r\n \"\"\"Initialize the save counter if it has been newly created.\"\"\"\r\n v = next_creator(**kwargs)\r\n session.run(v.initializer)\r\n return v\r\n\r\n with variable_scope.variable_creator_scope(_initializing_creator):\r\n save_counter = self._checkpoint.save_counter\r\n if self._save_counter_assign is None:\r\n self._save_counter_assign = save_counter.assign_add(1, read_value=False)\r\n session.run(self._save_counter_assign)\r\n if checkpoint_number is None:\r\n checkpoint_number = save_counter\r\n if not isinstance(checkpoint_number, compat.integral_types):\r\n checkpoint_number = training_util.global_step(\r\n sess=session, global_step_tensor=checkpoint_number)\r\n prefix = \"%s-%d\" % (self._prefix, checkpoint_number)\r\n save_path = self._checkpoint.write(prefix)\r\n timestamp = time.time()\r\n # If this is an overwritten checkpoint we were previously tracking, delete\r\n # and reinsert it to make sure it goes to the end of the queue.\r\n if save_path in self._maybe_delete:\r\n del self._maybe_delete[save_path]\r\n self._maybe_delete[save_path] = timestamp\r\n self._latest_checkpoint = save_path\r\n self._sweep()\r\n self._record_state()\r\n return save_path\r\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Module implementing RNN Cells that used to be in core.\r\n\r\n@@EmbeddingWrapper\r\n@@InputProjectionWrapper\r\n@@OutputProjectionWrapper\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport math\r\n\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import embedding_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn_ops\r\nfrom tensorflow.python.ops import rnn_cell_impl\r\nfrom tensorflow.python.ops import variable_scope as vs\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.util import nest\r\n\r\n\r\n# pylint: disable=protected-access,invalid-name\r\nRNNCell = rnn_cell_impl.RNNCell\r\n_WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME\r\n_BIAS_VARIABLE_NAME = rnn_cell_impl._BIAS_VARIABLE_NAME\r\n# pylint: enable=protected-access,invalid-name\r\n\r\n\r\nclass _Linear(object):\r\n \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.\r\n\r\n Args:\r\n args: a 2D Tensor or a list of 2D, batch, n, Tensors.\r\n output_size: int, second dimension of weight variable.\r\n dtype: data type for variables.\r\n build_bias: boolean, whether to build a bias variable.\r\n bias_initializer: starting value to initialize the bias\r\n (default is all zeros).\r\n kernel_initializer: starting value to initialize the weight.\r\n\r\n Raises:\r\n ValueError: if inputs_shape is wrong.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n args,\r\n output_size,\r\n build_bias,\r\n bias_initializer=None,\r\n kernel_initializer=None):\r\n self._build_bias = build_bias\r\n\r\n if args is None or (nest.is_sequence(args) and not args):\r\n raise ValueError(\"`args` must be specified\")\r\n if not nest.is_sequence(args):\r\n args = [args]\r\n self._is_sequence = False\r\n else:\r\n self._is_sequence = True\r\n\r\n # Calculate the total size of arguments on dimension 1.\r\n total_arg_size = 0\r\n shapes = [a.get_shape() for a in args]\r\n for shape in shapes:\r\n if shape.ndims != 2:\r\n raise ValueError(\"linear is expecting 2D arguments: %s\" % shapes)\r\n if shape[1].value is None:\r\n raise ValueError(\"linear expects shape[1] to be provided for shape %s, \"\r\n \"but saw %s\" % (shape, shape[1]))\r\n else:\r\n total_arg_size += shape[1].value\r\n\r\n dtype = [a.dtype for a in args][0]\r\n\r\n scope = vs.get_variable_scope()\r\n with vs.variable_scope(scope) as outer_scope:\r\n self._weights = vs.get_variable(\r\n _WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],\r\n dtype=dtype,\r\n initializer=kernel_initializer)\r\n if build_bias:\r\n with vs.variable_scope(outer_scope) as inner_scope:\r\n inner_scope.set_partitioner(None)\r\n if bias_initializer is None:\r\n bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)\r\n self._biases = vs.get_variable(\r\n _BIAS_VARIABLE_NAME, [output_size],\r\n dtype=dtype,\r\n initializer=bias_initializer)\r\n\r\n def __call__(self, args):\r\n if not self._is_sequence:\r\n args = [args]\r\n\r\n if len(args) == 1:\r\n res = math_ops.matmul(args[0], self._weights)\r\n else:\r\n # Explicitly creating a one for a minor performance improvement.\r\n one = constant_op.constant(1, dtype=dtypes.int32)\r\n res = math_ops.matmul(array_ops.concat(args, one), self._weights)\r\n if self._build_bias:\r\n res = nn_ops.bias_add(res, self._biases)\r\n return res\r\n\r\n\r\n# TODO(xpan): Remove this function in a follow up.\r\ndef _linear(args,\r\n output_size,\r\n bias,\r\n bias_initializer=None,\r\n kernel_initializer=None):\r\n \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.\r\n\r\n Args:\r\n args: a 2D Tensor or a list of 2D, batch, n, Tensors.\r\n output_size: int, second dimension of W[i].\r\n bias: boolean, whether to add a bias term or not.\r\n bias_initializer: starting value to initialize the bias\r\n (default is all zeros).\r\n kernel_initializer: starting value to initialize the weight.\r\n\r\n Returns:\r\n A 2D Tensor with shape `[batch, output_size]` equal to\r\n sum_i(args[i] * W[i]), where W[i]s are newly created matrices.\r\n\r\n Raises:\r\n ValueError: if some of the arguments has unspecified or wrong shape.\r\n \"\"\"\r\n if args is None or (nest.is_sequence(args) and not args):\r\n raise ValueError(\"`args` must be specified\")\r\n if not nest.is_sequence(args):\r\n args = [args]\r\n\r\n # Calculate the total size of arguments on dimension 1.\r\n total_arg_size = 0\r\n shapes = [a.get_shape() for a in args]\r\n for shape in shapes:\r\n if shape.ndims != 2:\r\n raise ValueError(\"linear is expecting 2D arguments: %s\" % shapes)\r\n if shape[1].value is None:\r\n raise ValueError(\"linear expects shape[1] to be provided for shape %s, \"\r\n \"but saw %s\" % (shape, shape[1]))\r\n else:\r\n total_arg_size += shape[1].value\r\n\r\n dtype = [a.dtype for a in args][0]\r\n\r\n # Now the computation.\r\n scope = vs.get_variable_scope()\r\n with vs.variable_scope(scope) as outer_scope:\r\n weights = vs.get_variable(\r\n _WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],\r\n dtype=dtype,\r\n initializer=kernel_initializer)\r\n if len(args) == 1:\r\n res = math_ops.matmul(args[0], weights)\r\n else:\r\n res = math_ops.matmul(array_ops.concat(args, 1), weights)\r\n if not bias:\r\n return res\r\n with vs.variable_scope(outer_scope) as inner_scope:\r\n inner_scope.set_partitioner(None)\r\n if bias_initializer is None:\r\n bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)\r\n biases = vs.get_variable(\r\n _BIAS_VARIABLE_NAME, [output_size],\r\n dtype=dtype,\r\n initializer=bias_initializer)\r\n return nn_ops.bias_add(res, biases)\r\n\r\n\r\nclass EmbeddingWrapper(RNNCell):\r\n \"\"\"Operator adding input embedding to the given cell.\r\n\r\n Note: in many cases it may be more efficient to not use this wrapper,\r\n but instead concatenate the whole sequence of your inputs in time,\r\n do the embedding on this batch-concatenated sequence, then split it and\r\n feed into your RNN.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n cell,\r\n embedding_classes,\r\n embedding_size,\r\n initializer=None,\r\n reuse=None):\r\n \"\"\"Create a cell with an added input embedding.\r\n\r\n Args:\r\n cell: an RNNCell, an embedding will be put before its inputs.\r\n embedding_classes: integer, how many symbols will be embedded.\r\n embedding_size: integer, the size of the vectors we embed into.\r\n initializer: an initializer to use when creating the embedding;\r\n if None, the initializer from variable scope or a default one is used.\r\n reuse: (optional) Python boolean describing whether to reuse variables\r\n in an existing scope. If not `True`, and the existing scope already has\r\n the given variables, an error is raised.\r\n\r\n Raises:\r\n TypeError: if cell is not an RNNCell.\r\n ValueError: if embedding_classes is not positive.\r\n \"\"\"\r\n super(EmbeddingWrapper, self).__init__(_reuse=reuse)\r\n rnn_cell_impl.assert_like_rnncell(\"cell\", cell)\r\n if embedding_classes <= 0 or embedding_size <= 0:\r\n raise ValueError(\"Both embedding_classes and embedding_size must be > 0: \"\r\n \"%d, %d.\" % (embedding_classes, embedding_size))\r\n self._cell = cell\r\n self._embedding_classes = embedding_classes\r\n self._embedding_size = embedding_size\r\n self._initializer = initializer\r\n\r\n @property\r\n def state_size(self):\r\n return self._cell.state_size\r\n\r\n @property\r\n def output_size(self):\r\n return self._cell.output_size\r\n\r\n def zero_state(self, batch_size, dtype):\r\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\r\n return self._cell.zero_state(batch_size, dtype)\r\n\r\n def call(self, inputs, state):\r\n \"\"\"Run the cell on embedded inputs.\"\"\"\r\n with ops.device(\"/cpu:0\"):\r\n if self._initializer:\r\n initializer = self._initializer\r\n elif vs.get_variable_scope().initializer:\r\n initializer = vs.get_variable_scope().initializer\r\n else:\r\n # Default initializer for embeddings should have variance=1.\r\n sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.\r\n initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)\r\n\r\n if isinstance(state, tuple):\r\n data_type = state[0].dtype\r\n else:\r\n data_type = state.dtype\r\n\r\n embedding = vs.get_variable(\r\n \"embedding\", [self._embedding_classes, self._embedding_size],\r\n initializer=initializer,\r\n dtype=data_type)\r\n embedded = embedding_ops.embedding_lookup(embedding,\r\n array_ops.reshape(inputs, [-1]))\r\n\r\n return self._cell(embedded, state)\r\n\r\n\r\nclass InputProjectionWrapper(RNNCell):\r\n \"\"\"Operator adding an input projection to the given cell.\r\n\r\n Note: in many cases it may be more efficient to not use this wrapper,\r\n but instead concatenate the whole sequence of your inputs in time,\r\n do the projection on this batch-concatenated sequence, then split it.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n cell,\r\n num_proj,\r\n activation=None,\r\n input_size=None,\r\n reuse=None):\r\n \"\"\"Create a cell with input projection.\r\n\r\n Args:\r\n cell: an RNNCell, a projection of inputs is added before it.\r\n num_proj: Python integer. The dimension to project to.\r\n activation: (optional) an optional activation function.\r\n input_size: Deprecated and unused.\r\n reuse: (optional) Python boolean describing whether to reuse variables\r\n in an existing scope. If not `True`, and the existing scope already has\r\n the given variables, an error is raised.\r\n\r\n Raises:\r\n TypeError: if cell is not an RNNCell.\r\n \"\"\"\r\n super(InputProjectionWrapper, self).__init__(_reuse=reuse)\r\n if input_size is not None:\r\n logging.warn(\"%s: The input_size parameter is deprecated.\", self)\r\n rnn_cell_impl.assert_like_rnncell(\"cell\", cell)\r\n self._cell = cell\r\n self._num_proj = num_proj\r\n self._activation = activation\r\n self._linear = None\r\n\r\n @property\r\n def state_size(self):\r\n return self._cell.state_size\r\n\r\n @property\r\n def output_size(self):\r\n return self._cell.output_size\r\n\r\n def zero_state(self, batch_size, dtype):\r\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\r\n return self._cell.zero_state(batch_size, dtype)\r\n\r\n def call(self, inputs, state):\r\n \"\"\"Run the input projection and then the cell.\"\"\"\r\n # Default scope: \"InputProjectionWrapper\"\r\n if self._linear is None:\r\n self._linear = _Linear(inputs, self._num_proj, True)\r\n projected = self._linear(inputs)\r\n if self._activation:\r\n projected = self._activation(projected)\r\n return self._cell(projected, state)\r\n\r\n\r\nclass OutputProjectionWrapper(RNNCell):\r\n \"\"\"Operator adding an output projection to the given cell.\r\n\r\n Note: in many cases it may be more efficient to not use this wrapper,\r\n but instead concatenate the whole sequence of your outputs in time,\r\n do the projection on this batch-concatenated sequence, then split it\r\n if needed or directly feed into a softmax.\r\n \"\"\"\r\n\r\n def __init__(self, cell, output_size, activation=None, reuse=None):\r\n \"\"\"Create a cell with output projection.\r\n\r\n Args:\r\n cell: an RNNCell, a projection to output_size is added to it.\r\n output_size: integer, the size of the output after projection.\r\n activation: (optional) an optional activation function.\r\n reuse: (optional) Python boolean describing whether to reuse variables\r\n in an existing scope. If not `True`, and the existing scope already has\r\n the given variables, an error is raised.\r\n\r\n Raises:\r\n TypeError: if cell is not an RNNCell.\r\n ValueError: if output_size is not positive.\r\n \"\"\"\r\n super(OutputProjectionWrapper, self).__init__(_reuse=reuse)\r\n rnn_cell_impl.assert_like_rnncell(\"cell\", cell)\r\n if output_size < 1:\r\n raise ValueError(\"Parameter output_size must be > 0: %d.\" % output_size)\r\n self._cell = cell\r\n self._output_size = output_size\r\n self._activation = activation\r\n self._linear = None\r\n\r\n @property\r\n def state_size(self):\r\n return self._cell.state_size\r\n\r\n @property\r\n def output_size(self):\r\n return self._output_size\r\n\r\n def zero_state(self, batch_size, dtype):\r\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\r\n return self._cell.zero_state(batch_size, dtype)\r\n\r\n def call(self, inputs, state):\r\n \"\"\"Run the cell and output projection on inputs, starting from state.\"\"\"\r\n output, res_state = self._cell(inputs, state)\r\n if self._linear is None:\r\n self._linear = _Linear(output, self._output_size, True)\r\n projected = self._linear(output)\r\n if self._activation:\r\n projected = self._activation(projected)\r\n return projected, res_state\r\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Keras estimator API.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n# Keras has undeclared dependency on tensorflow/estimator:estimator_py.\r\n# As long as you depend //third_party/py/tensorflow:tensorflow target\r\n# everything will work as normal.\r\n\r\ntry:\r\n from tensorflow.python.estimator import keras as keras_lib # pylint: disable=g-import-not-at-top\r\n model_to_estimator = tf_export('keras.estimator.model_to_estimator')(\r\n keras_lib.model_to_estimator)\r\nexcept Exception: # pylint: disable=broad-except\r\n\r\n # pylint: disable=unused-argument\r\n def stub_model_to_estimator(keras_model=None,\r\n keras_model_path=None,\r\n custom_objects=None,\r\n model_dir=None,\r\n config=None):\r\n raise NotImplementedError(\r\n 'tf.keras.estimator.model_to_estimator function not available in your '\r\n 'installation.')\r\n # pylint: enable=unused-argument\r\n\r\n model_to_estimator = tf_export('keras.estimator.model_to_estimator')(\r\n stub_model_to_estimator)\r\n\r\n",
"#!c:\\users\\admin\\documents\\github\\tensorflowwork\\plugins\\unrealenginepython\\binaries\\win64\\python.exe\r\n# See http://cens.ioc.ee/projects/f2py2e/\r\nfrom __future__ import division, print_function\r\n\r\nimport os\r\nimport sys\r\nfor mode in [\"g3-numpy\", \"2e-numeric\", \"2e-numarray\", \"2e-numpy\"]:\r\n try:\r\n i = sys.argv.index(\"--\" + mode)\r\n del sys.argv[i]\r\n break\r\n except ValueError:\r\n pass\r\nos.environ[\"NO_SCIPY_IMPORT\"] = \"f2py\"\r\nif mode == \"g3-numpy\":\r\n sys.stderr.write(\"G3 f2py support is not implemented, yet.\\\\n\")\r\n sys.exit(1)\r\nelif mode == \"2e-numeric\":\r\n from f2py2e import main\r\nelif mode == \"2e-numarray\":\r\n sys.argv.append(\"-DNUMARRAY\")\r\n from f2py2e import main\r\nelif mode == \"2e-numpy\":\r\n from numpy.f2py import main\r\nelse:\r\n sys.stderr.write(\"Unknown mode: \" + repr(mode) + \"\\\\n\")\r\n sys.exit(1)\r\nmain()\r\n",
"\"\"\"Python wrappers around TensorFlow ops.\r\n\r\nThis file is MACHINE GENERATED! Do not edit.\r\n\"\"\"\r\n\r\nimport collections as _collections\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\nfrom tensorflow.python.eager import context as _context\r\nfrom tensorflow.python.eager import core as _core\r\nfrom tensorflow.python.eager import execute as _execute\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import errors as _errors\r\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\r\n\r\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\r\n# Needed to trigger the call to _set_call_cpp_shape_fn.\r\nfrom tensorflow.python.framework import common_shapes as _common_shapes\r\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import op_def_library as _op_def_library\r\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n_block_lstm_outputs = [\"i\", \"cs\", \"f\", \"o\", \"ci\", \"co\", \"h\"]\r\n_BlockLSTMOutput = _collections.namedtuple(\r\n \"BlockLSTM\", _block_lstm_outputs)\r\n\r\n\r\n@tf_export('block_lstm')\r\ndef block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):\r\n r\"\"\"Computes the LSTM cell forward propagation for all the time steps.\r\n\r\n This is equivalent to applying LSTMBlockCell in a loop, like so:\r\n\r\n ```python\r\n for x1 in unpack(x):\r\n i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(\r\n x1, cs_prev, h_prev, w, wci, wcf, wco, b)\r\n cs_prev = cs1\r\n h_prev = h1\r\n i.append(i1)\r\n cs.append(cs1)\r\n f.append(f1)\r\n o.append(o1)\r\n ci.append(ci1)\r\n co.append(co1)\r\n h.append(h1)\r\n return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)\r\n ```\r\n\r\n Args:\r\n seq_len_max: A `Tensor` of type `int64`.\r\n Maximum time length actually used by this input. Outputs are padded\r\n with zeros beyond this length.\r\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\r\n The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).\r\n cs_prev: A `Tensor`. Must have the same type as `x`.\r\n Value of the initial cell state.\r\n h_prev: A `Tensor`. Must have the same type as `x`.\r\n Initial output of cell (to be used for peephole).\r\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\r\n wci: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for input gate peephole connection.\r\n wcf: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for forget gate peephole connection.\r\n wco: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for output gate peephole connection.\r\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\r\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\r\n cell_clip: An optional `float`. Defaults to `3`.\r\n Value to clip the 'cs' value to.\r\n use_peephole: An optional `bool`. Defaults to `False`.\r\n Whether to use peephole weights.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\r\n\r\n i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.\r\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.\r\n f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.\r\n o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.\r\n ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.\r\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.\r\n h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if forget_bias is None:\r\n forget_bias = 1\r\n forget_bias = _execute.make_float(forget_bias, \"forget_bias\")\r\n if cell_clip is None:\r\n cell_clip = 3\r\n cell_clip = _execute.make_float(cell_clip, \"cell_clip\")\r\n if use_peephole is None:\r\n use_peephole = False\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BlockLSTM\", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,\r\n h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,\r\n forget_bias=forget_bias, cell_clip=cell_clip,\r\n use_peephole=use_peephole, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"forget_bias\", _op.get_attr(\"forget_bias\"), \"cell_clip\",\r\n _op.get_attr(\"cell_clip\"), \"use_peephole\",\r\n _op.get_attr(\"use_peephole\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BlockLSTM\", _inputs_flat, _attrs, _result, name)\r\n _result = _BlockLSTMOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"BlockLSTM\",\r\n name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev,\r\n w, wci, wcf, wco, b, \"forget_bias\", forget_bias, \"cell_clip\",\r\n cell_clip, \"use_peephole\", use_peephole)\r\n _result = _BlockLSTMOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return block_lstm_eager_fallback(\r\n seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,\r\n forget_bias=forget_bias, cell_clip=cell_clip,\r\n use_peephole=use_peephole, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function block_lstm\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if forget_bias is None:\r\n forget_bias = 1\r\n forget_bias = _execute.make_float(forget_bias, \"forget_bias\")\r\n if cell_clip is None:\r\n cell_clip = 3\r\n cell_clip = _execute.make_float(cell_clip, \"cell_clip\")\r\n if use_peephole is None:\r\n use_peephole = False\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)\r\n (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T\r\n seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)\r\n _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]\r\n _attrs = (\"forget_bias\", forget_bias, \"cell_clip\", cell_clip,\r\n \"use_peephole\", use_peephole, \"T\", _attr_T)\r\n _result = _execute.execute(b\"BlockLSTM\", 7, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BlockLSTM\", _inputs_flat, _attrs, _result, name)\r\n _result = _BlockLSTMOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"BlockLSTM\")(None)\r\n\r\n\r\n_block_lstm_grad_outputs = [\"x_grad\", \"cs_prev_grad\", \"h_prev_grad\", \"w_grad\",\r\n \"wci_grad\", \"wcf_grad\", \"wco_grad\", \"b_grad\"]\r\n_BlockLSTMGradOutput = _collections.namedtuple(\r\n \"BlockLSTMGrad\", _block_lstm_grad_outputs)\r\n\r\n\r\n@tf_export('block_lstm_grad')\r\ndef block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):\r\n r\"\"\"Computes the LSTM cell backward propagation for the entire time sequence.\r\n\r\n This implementation is to be used in conjunction of LSTMBlock.\r\n\r\n Args:\r\n seq_len_max: A `Tensor` of type `int64`.\r\n Maximum time length actually used by this input. Outputs are padded\r\n with zeros beyond this length.\r\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\r\n The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).\r\n cs_prev: A `Tensor`. Must have the same type as `x`.\r\n Value of the initial cell state.\r\n h_prev: A `Tensor`. Must have the same type as `x`.\r\n Initial output of cell (to be used for peephole).\r\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\r\n wci: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for input gate peephole connection.\r\n wcf: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for forget gate peephole connection.\r\n wco: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for output gate peephole connection.\r\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\r\n i: A `Tensor`. Must have the same type as `x`.\r\n The input gate over the whole time sequence.\r\n cs: A `Tensor`. Must have the same type as `x`.\r\n The cell state before the tanh over the whole time sequence.\r\n f: A `Tensor`. Must have the same type as `x`.\r\n The forget gate over the whole time sequence.\r\n o: A `Tensor`. Must have the same type as `x`.\r\n The output gate over the whole time sequence.\r\n ci: A `Tensor`. Must have the same type as `x`.\r\n The cell input over the whole time sequence.\r\n co: A `Tensor`. Must have the same type as `x`.\r\n The cell after the tanh over the whole time sequence.\r\n h: A `Tensor`. Must have the same type as `x`.\r\n The output h vector over the whole time sequence.\r\n cs_grad: A `Tensor`. Must have the same type as `x`.\r\n The current gradient of cs.\r\n h_grad: A `Tensor`. Must have the same type as `x`.\r\n The gradient of h vector.\r\n use_peephole: A `bool`. Whether to use peephole weights.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).\r\n\r\n x_grad: A `Tensor`. Has the same type as `x`. The gradient of x to be back-propped.\r\n cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs_prev to be back-propped.\r\n h_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of h_prev to be back-propped.\r\n w_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.\r\n wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.\r\n wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.\r\n wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.\r\n b_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BlockLSTMGrad\", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,\r\n h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f,\r\n o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad,\r\n use_peephole=use_peephole, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"use_peephole\", _op.get_attr(\"use_peephole\"), \"T\",\r\n _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BlockLSTMGrad\", _inputs_flat, _attrs, _result, name)\r\n _result = _BlockLSTMGradOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BlockLSTMGrad\", name, _ctx._post_execution_callbacks, seq_len_max, x,\r\n cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad,\r\n h_grad, \"use_peephole\", use_peephole)\r\n _result = _BlockLSTMGradOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return block_lstm_grad_eager_fallback(\r\n seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o,\r\n ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name,\r\n ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function block_lstm_grad\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], _ctx)\r\n (x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T\r\n seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)\r\n _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]\r\n _attrs = (\"use_peephole\", use_peephole, \"T\", _attr_T)\r\n _result = _execute.execute(b\"BlockLSTMGrad\", 8, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BlockLSTMGrad\", _inputs_flat, _attrs, _result, name)\r\n _result = _BlockLSTMGradOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"BlockLSTMGrad\")(None)\r\n\r\n\r\n_lstm_block_cell_outputs = [\"i\", \"cs\", \"f\", \"o\", \"ci\", \"co\", \"h\"]\r\n_LSTMBlockCellOutput = _collections.namedtuple(\r\n \"LSTMBlockCell\", _lstm_block_cell_outputs)\r\n\r\n\r\n@tf_export('lstm_block_cell')\r\ndef lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):\r\n r\"\"\"Computes the LSTM cell forward propagation for 1 time step.\r\n\r\n This implementation uses 1 weight matrix and 1 bias vector, and there's an\r\n optional peephole connection.\r\n\r\n This kernel op implements the following mathematical equations:\r\n\r\n ```python\r\n xh = [x, h_prev]\r\n [i, f, ci, o] = xh * w + b\r\n f = f + forget_bias\r\n\r\n if not use_peephole:\r\n wci = wcf = wco = 0\r\n\r\n i = sigmoid(cs_prev * wci + i)\r\n f = sigmoid(cs_prev * wcf + f)\r\n ci = tanh(ci)\r\n\r\n cs = ci .* i + cs_prev .* f\r\n cs = clip(cs, cell_clip)\r\n\r\n o = sigmoid(cs * wco + o)\r\n co = tanh(cs)\r\n h = co .* o\r\n ```\r\n\r\n Args:\r\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\r\n The input to the LSTM cell, shape (batch_size, num_inputs).\r\n cs_prev: A `Tensor`. Must have the same type as `x`.\r\n Value of the cell state at previous time step.\r\n h_prev: A `Tensor`. Must have the same type as `x`.\r\n Output of the previous cell at previous time step.\r\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\r\n wci: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for input gate peephole connection.\r\n wcf: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for forget gate peephole connection.\r\n wco: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for output gate peephole connection.\r\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\r\n forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.\r\n cell_clip: An optional `float`. Defaults to `3`.\r\n Value to clip the 'cs' value to.\r\n use_peephole: An optional `bool`. Defaults to `False`.\r\n Whether to use peephole weights.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).\r\n\r\n i: A `Tensor`. Has the same type as `x`. The input gate.\r\n cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.\r\n f: A `Tensor`. Has the same type as `x`. The forget gate.\r\n o: A `Tensor`. Has the same type as `x`. The output gate.\r\n ci: A `Tensor`. Has the same type as `x`. The cell input.\r\n co: A `Tensor`. Has the same type as `x`. The cell after the tanh.\r\n h: A `Tensor`. Has the same type as `x`. The output h vector.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if forget_bias is None:\r\n forget_bias = 1\r\n forget_bias = _execute.make_float(forget_bias, \"forget_bias\")\r\n if cell_clip is None:\r\n cell_clip = 3\r\n cell_clip = _execute.make_float(cell_clip, \"cell_clip\")\r\n if use_peephole is None:\r\n use_peephole = False\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"LSTMBlockCell\", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci,\r\n wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip,\r\n use_peephole=use_peephole, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"forget_bias\", _op.get_attr(\"forget_bias\"), \"cell_clip\",\r\n _op.get_attr(\"cell_clip\"), \"use_peephole\",\r\n _op.get_attr(\"use_peephole\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"LSTMBlockCell\", _inputs_flat, _attrs, _result, name)\r\n _result = _LSTMBlockCellOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"LSTMBlockCell\", name, _ctx._post_execution_callbacks, x, cs_prev,\r\n h_prev, w, wci, wcf, wco, b, \"forget_bias\", forget_bias, \"cell_clip\",\r\n cell_clip, \"use_peephole\", use_peephole)\r\n _result = _LSTMBlockCellOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return lstm_block_cell_eager_fallback(\r\n x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias,\r\n cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function lstm_block_cell\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if forget_bias is None:\r\n forget_bias = 1\r\n forget_bias = _execute.make_float(forget_bias, \"forget_bias\")\r\n if cell_clip is None:\r\n cell_clip = 3\r\n cell_clip = _execute.make_float(cell_clip, \"cell_clip\")\r\n if use_peephole is None:\r\n use_peephole = False\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)\r\n (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T\r\n _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]\r\n _attrs = (\"forget_bias\", forget_bias, \"cell_clip\", cell_clip,\r\n \"use_peephole\", use_peephole, \"T\", _attr_T)\r\n _result = _execute.execute(b\"LSTMBlockCell\", 7, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"LSTMBlockCell\", _inputs_flat, _attrs, _result, name)\r\n _result = _LSTMBlockCellOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"LSTMBlockCell\")(None)\r\n\r\n\r\n_lstm_block_cell_grad_outputs = [\"cs_prev_grad\", \"dicfo\", \"wci_grad\",\r\n \"wcf_grad\", \"wco_grad\"]\r\n_LSTMBlockCellGradOutput = _collections.namedtuple(\r\n \"LSTMBlockCellGrad\", _lstm_block_cell_grad_outputs)\r\n\r\n\r\n@tf_export('lstm_block_cell_grad')\r\ndef lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None):\r\n r\"\"\"Computes the LSTM cell backward propagation for 1 timestep.\r\n\r\n This implementation is to be used in conjunction of LSTMBlockCell.\r\n\r\n Args:\r\n x: A `Tensor`. Must be one of the following types: `half`, `float32`.\r\n The input to the LSTM cell, shape (batch_size, num_inputs).\r\n cs_prev: A `Tensor`. Must have the same type as `x`.\r\n The previous cell state.\r\n h_prev: A `Tensor`. Must have the same type as `x`. The previous h state.\r\n w: A `Tensor`. Must have the same type as `x`. The weight matrix.\r\n wci: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for input gate peephole connection.\r\n wcf: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for forget gate peephole connection.\r\n wco: A `Tensor`. Must have the same type as `x`.\r\n The weight matrix for output gate peephole connection.\r\n b: A `Tensor`. Must have the same type as `x`. The bias vector.\r\n i: A `Tensor`. Must have the same type as `x`. The input gate.\r\n cs: A `Tensor`. Must have the same type as `x`.\r\n The cell state before the tanh.\r\n f: A `Tensor`. Must have the same type as `x`. The forget gate.\r\n o: A `Tensor`. Must have the same type as `x`. The output gate.\r\n ci: A `Tensor`. Must have the same type as `x`. The cell input.\r\n co: A `Tensor`. Must have the same type as `x`. The cell after the tanh.\r\n cs_grad: A `Tensor`. Must have the same type as `x`.\r\n The current gradient of cs.\r\n h_grad: A `Tensor`. Must have the same type as `x`.\r\n The gradient of h vector.\r\n use_peephole: A `bool`. Whether the cell uses peephole connections.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad).\r\n\r\n cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs to be back-propped.\r\n dicfo: A `Tensor`. Has the same type as `x`. The derivative wrt to [i, cs, f, o].\r\n wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.\r\n wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.\r\n wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"LSTMBlockCellGrad\", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w,\r\n wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co,\r\n cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"use_peephole\", _op.get_attr(\"use_peephole\"), \"T\",\r\n _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"LSTMBlockCellGrad\", _inputs_flat, _attrs, _result, name)\r\n _result = _LSTMBlockCellGradOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"LSTMBlockCellGrad\", name, _ctx._post_execution_callbacks, x, cs_prev,\r\n h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad,\r\n \"use_peephole\", use_peephole)\r\n _result = _LSTMBlockCellGradOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return lstm_block_cell_grad_eager_fallback(\r\n x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co,\r\n cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function lstm_block_cell_grad\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n use_peephole = _execute.make_bool(use_peephole, \"use_peephole\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], _ctx)\r\n (x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T\r\n _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad]\r\n _attrs = (\"use_peephole\", use_peephole, \"T\", _attr_T)\r\n _result = _execute.execute(b\"LSTMBlockCellGrad\", 5, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"LSTMBlockCellGrad\", _inputs_flat, _attrs, _result, name)\r\n _result = _LSTMBlockCellGradOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"LSTMBlockCellGrad\")(None)\r\n\r\ndef _InitOpDefLibrary(op_list_proto_bytes):\r\n op_list = _op_def_pb2.OpList()\r\n op_list.ParseFromString(op_list_proto_bytes)\r\n _op_def_registry.register_op_list(op_list)\r\n op_def_lib = _op_def_library.OpDefLibrary()\r\n op_def_lib.add_op_list(op_list)\r\n return op_def_lib\r\n# op {\r\n# name: \"BlockLSTM\"\r\n# input_arg {\r\n# name: \"seq_len_max\"\r\n# type: DT_INT64\r\n# }\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"h_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"w\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wci\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wcf\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wco\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"b\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"i\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"cs\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"f\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"o\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"ci\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"co\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"h\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"forget_bias\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 1\r\n# }\r\n# }\r\n# attr {\r\n# name: \"cell_clip\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 3\r\n# }\r\n# }\r\n# attr {\r\n# name: \"use_peephole\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"BlockLSTMGrad\"\r\n# input_arg {\r\n# name: \"seq_len_max\"\r\n# type: DT_INT64\r\n# }\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"h_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"w\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wci\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wcf\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wco\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"b\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"i\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"f\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"o\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"ci\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"co\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"h\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"h_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"x_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"cs_prev_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"h_prev_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"w_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"wci_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"wcf_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"wco_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"b_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"use_peephole\"\r\n# type: \"bool\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"LSTMBlockCell\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"h_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"w\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wci\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wcf\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wco\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"b\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"i\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"cs\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"f\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"o\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"ci\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"co\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"h\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"forget_bias\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 1\r\n# }\r\n# }\r\n# attr {\r\n# name: \"cell_clip\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 3\r\n# }\r\n# }\r\n# attr {\r\n# name: \"use_peephole\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"LSTMBlockCellGrad\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"h_prev\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"w\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wci\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wcf\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"wco\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"b\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"i\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"f\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"o\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"ci\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"co\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"cs_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"h_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"cs_prev_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"dicfo\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"wci_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"wcf_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"wco_grad\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"use_peephole\"\r\n# type: \"bool\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# }\r\n# }\r\n# }\r\n# }\r\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\215\\002\\n\\tBlockLSTM\\022\\017\\n\\013seq_len_max\\030\\t\\022\\006\\n\\001x\\\"\\001T\\022\\014\\n\\007cs_prev\\\"\\001T\\022\\013\\n\\006h_prev\\\"\\001T\\022\\006\\n\\001w\\\"\\001T\\022\\010\\n\\003wci\\\"\\001T\\022\\010\\n\\003wcf\\\"\\001T\\022\\010\\n\\003wco\\\"\\001T\\022\\006\\n\\001b\\\"\\001T\\032\\006\\n\\001i\\\"\\001T\\032\\007\\n\\002cs\\\"\\001T\\032\\006\\n\\001f\\\"\\001T\\032\\006\\n\\001o\\\"\\001T\\032\\007\\n\\002ci\\\"\\001T\\032\\007\\n\\002co\\\"\\001T\\032\\006\\n\\001h\\\"\\001T\\\"\\033\\n\\013forget_bias\\022\\005float\\032\\005%\\000\\000\\200?\\\"\\031\\n\\tcell_clip\\022\\005float\\032\\005%\\000\\000@@\\\"\\030\\n\\014use_peephole\\022\\004bool\\032\\002(\\000\\\"\\021\\n\\001T\\022\\004type:\\006\\n\\0042\\002\\023\\001\\n\\351\\002\\n\\rBlockLSTMGrad\\022\\017\\n\\013seq_len_max\\030\\t\\022\\006\\n\\001x\\\"\\001T\\022\\014\\n\\007cs_prev\\\"\\001T\\022\\013\\n\\006h_prev\\\"\\001T\\022\\006\\n\\001w\\\"\\001T\\022\\010\\n\\003wci\\\"\\001T\\022\\010\\n\\003wcf\\\"\\001T\\022\\010\\n\\003wco\\\"\\001T\\022\\006\\n\\001b\\\"\\001T\\022\\006\\n\\001i\\\"\\001T\\022\\007\\n\\002cs\\\"\\001T\\022\\006\\n\\001f\\\"\\001T\\022\\006\\n\\001o\\\"\\001T\\022\\007\\n\\002ci\\\"\\001T\\022\\007\\n\\002co\\\"\\001T\\022\\006\\n\\001h\\\"\\001T\\022\\014\\n\\007cs_grad\\\"\\001T\\022\\013\\n\\006h_grad\\\"\\001T\\032\\013\\n\\006x_grad\\\"\\001T\\032\\021\\n\\014cs_prev_grad\\\"\\001T\\032\\020\\n\\013h_prev_grad\\\"\\001T\\032\\013\\n\\006w_grad\\\"\\001T\\032\\r\\n\\010wci_grad\\\"\\001T\\032\\r\\n\\010wcf_grad\\\"\\001T\\032\\r\\n\\010wco_grad\\\"\\001T\\032\\013\\n\\006b_grad\\\"\\001T\\\"\\024\\n\\014use_peephole\\022\\004bool\\\"\\021\\n\\001T\\022\\004type:\\006\\n\\0042\\002\\023\\001\\n\\200\\002\\n\\rLSTMBlockCell\\022\\006\\n\\001x\\\"\\001T\\022\\014\\n\\007cs_prev\\\"\\001T\\022\\013\\n\\006h_prev\\\"\\001T\\022\\006\\n\\001w\\\"\\001T\\022\\010\\n\\003wci\\\"\\001T\\022\\010\\n\\003wcf\\\"\\001T\\022\\010\\n\\003wco\\\"\\001T\\022\\006\\n\\001b\\\"\\001T\\032\\006\\n\\001i\\\"\\001T\\032\\007\\n\\002cs\\\"\\001T\\032\\006\\n\\001f\\\"\\001T\\032\\006\\n\\001o\\\"\\001T\\032\\007\\n\\002ci\\\"\\001T\\032\\007\\n\\002co\\\"\\001T\\032\\006\\n\\001h\\\"\\001T\\\"\\033\\n\\013forget_bias\\022\\005float\\032\\005%\\000\\000\\200?\\\"\\031\\n\\tcell_clip\\022\\005float\\032\\005%\\000\\000@@\\\"\\030\\n\\014use_peephole\\022\\004bool\\032\\002(\\000\\\"\\021\\n\\001T\\022\\004type:\\006\\n\\0042\\002\\023\\001\\n\\247\\002\\n\\021LSTMBlockCellGrad\\022\\006\\n\\001x\\\"\\001T\\022\\014\\n\\007cs_prev\\\"\\001T\\022\\013\\n\\006h_prev\\\"\\001T\\022\\006\\n\\001w\\\"\\001T\\022\\010\\n\\003wci\\\"\\001T\\022\\010\\n\\003wcf\\\"\\001T\\022\\010\\n\\003wco\\\"\\001T\\022\\006\\n\\001b\\\"\\001T\\022\\006\\n\\001i\\\"\\001T\\022\\007\\n\\002cs\\\"\\001T\\022\\006\\n\\001f\\\"\\001T\\022\\006\\n\\001o\\\"\\001T\\022\\007\\n\\002ci\\\"\\001T\\022\\007\\n\\002co\\\"\\001T\\022\\014\\n\\007cs_grad\\\"\\001T\\022\\013\\n\\006h_grad\\\"\\001T\\032\\021\\n\\014cs_prev_grad\\\"\\001T\\032\\n\\n\\005dicfo\\\"\\001T\\032\\r\\n\\010wci_grad\\\"\\001T\\032\\r\\n\\010wcf_grad\\\"\\001T\\032\\r\\n\\010wco_grad\\\"\\001T\\\"\\024\\n\\014use_peephole\\022\\004bool\\\"\\021\\n\\001T\\022\\004type:\\006\\n\\0042\\002\\023\\001\")\r\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Training functions for Gradient boosted decision trees.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport copy\r\n\r\nfrom tensorflow.contrib import learn\r\nfrom tensorflow.contrib import stateless\r\nfrom tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler\r\nfrom tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler\r\nfrom tensorflow.contrib.boosted_trees.proto import learner_pb2\r\nfrom tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils\r\nfrom tensorflow.contrib.boosted_trees.python.ops import gen_model_ops\r\nfrom tensorflow.contrib.boosted_trees.python.ops import model_ops\r\nfrom tensorflow.contrib.boosted_trees.python.ops import prediction_ops\r\nfrom tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops\r\nfrom tensorflow.contrib.boosted_trees.python.ops import training_ops\r\nfrom tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\r\nfrom tensorflow.contrib.layers.python.layers import feature_column_ops\r\nfrom tensorflow.python.feature_column import feature_column as fc_core\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import sparse_tensor\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import gradients_impl\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.ops.losses import losses\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.summary import summary\r\nfrom tensorflow.python.training import device_setter\r\n\r\n\r\n# Key names for prediction dict.\r\nENSEMBLE_STAMP = \"ensemble_stamp\"\r\nPREDICTIONS = \"predictions\"\r\nPARTITION_IDS = \"partition_ids\"\r\nNUM_LAYERS_ATTEMPTED = \"num_layers\"\r\nNUM_TREES_ATTEMPTED = \"num_trees\"\r\nNUM_USED_HANDLERS = \"num_used_handlers\"\r\nUSED_HANDLERS_MASK = \"used_handlers_mask\"\r\nLEAF_INDEX = \"leaf_index\"\r\n_FEATURE_NAME_TEMPLATE = \"%s_%d\"\r\n\r\n# Keys in Training state.\r\nGBDTTrainingState = collections.namedtuple(\"GBDTTrainingState\", [\r\n \"num_layer_examples\", \"num_layer_steps\", \"num_layers\", \"active_tree\",\r\n \"active_layer\", \"continue_centering\", \"bias_stats_accumulator\",\r\n \"steps_accumulator\", \"handlers\"\r\n])\r\n\r\n\r\ndef _get_column_by_index(tensor, indices):\r\n \"\"\"Returns columns from a 2-D tensor by index.\"\"\"\r\n shape = array_ops.shape(tensor)\r\n p_flat = array_ops.reshape(tensor, [-1])\r\n i_flat = array_ops.reshape(\r\n array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) +\r\n indices, [-1])\r\n return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1])\r\n\r\n\r\ndef _make_predictions_dict(stamp,\r\n logits,\r\n partition_ids,\r\n ensemble_stats,\r\n used_handlers,\r\n leaf_index=None):\r\n \"\"\"Returns predictions for the given logits and n_classes.\r\n\r\n Args:\r\n stamp: The ensemble stamp.\r\n logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that\r\n contains predictions when no dropout was applied.\r\n partition_ids: A rank 1 `Tensor` with shape [batch_size].\r\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\r\n used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a\r\n boolean mask.\r\n leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that\r\n contains leaf id for each example prediction.\r\n\r\n Returns:\r\n A dict of predictions.\r\n \"\"\"\r\n result = {}\r\n result[ENSEMBLE_STAMP] = stamp\r\n result[PREDICTIONS] = logits\r\n result[PARTITION_IDS] = partition_ids\r\n result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers\r\n result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees\r\n result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers\r\n result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask\r\n if leaf_index is not None:\r\n result[LEAF_INDEX] = leaf_index\r\n return result\r\n\r\n\r\nclass _OpRoundRobinStrategy(object):\r\n \"\"\"Returns the next ps task index for placement via per-Op round-robin order.\r\n\r\n This strategy works slightly better for the GBDT graph because of using\r\n custom resources which vary significantly in compute cost.\r\n \"\"\"\r\n\r\n def __init__(self, ps_ops, num_tasks):\r\n \"\"\"Create a new `_RoundRobinStrategy`.\r\n\r\n Args:\r\n ps_ops: List of Op types to place on PS.\r\n num_tasks: Number of ps tasks to cycle among.\r\n \"\"\"\r\n next_task = 0\r\n self._next_task_per_op = {}\r\n for op in ps_ops:\r\n self._next_task_per_op[op] = next_task\r\n next_task = (next_task + 1) % num_tasks if num_tasks else 0\r\n self._num_tasks = num_tasks\r\n\r\n def __call__(self, op):\r\n \"\"\"Choose a ps task index for the given `Operation`.\r\n\r\n Args:\r\n op: An `Operation` to be placed on ps.\r\n\r\n Returns:\r\n The next ps task index to use for the `Operation`. Returns the next\r\n index, in the range `[offset, offset + num_tasks)`.\r\n\r\n Raises:\r\n ValueError: If attempting to place non-PS Op.\r\n \"\"\"\r\n if op.type not in self._next_task_per_op:\r\n raise ValueError(\"Unknown op type '%s' for placement:\" % op.type)\r\n task = self._next_task_per_op[op.type]\r\n self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks\r\n if self._num_tasks else 0)\r\n return task\r\n\r\n\r\ndef extract_features(features, feature_columns, use_core_columns):\r\n \"\"\"Extracts columns from a dictionary of features.\r\n\r\n Args:\r\n features: `dict` of `Tensor` objects.\r\n feature_columns: A list of feature_columns.\r\n\r\n Returns:\r\n Seven values:\r\n - A list of all feature column names.\r\n - A list of dense floats.\r\n - A list of sparse float feature indices.\r\n - A list of sparse float feature values.\r\n - A list of sparse float feature shapes.\r\n - A list of sparse int feature indices.\r\n - A list of sparse int feature values.\r\n - A list of sparse int feature shapes.\r\n Raises:\r\n ValueError: if features is not valid.\r\n \"\"\"\r\n if not features:\r\n raise ValueError(\"Features dictionary must be specified.\")\r\n\r\n # Make a shallow copy of features to ensure downstream usage\r\n # is unaffected by modifications in the model function.\r\n features = copy.copy(features)\r\n if feature_columns:\r\n scope = \"gbdt\"\r\n with variable_scope.variable_scope(scope):\r\n feature_columns = list(feature_columns)\r\n transformed_features = collections.OrderedDict()\r\n for fc in feature_columns:\r\n # pylint: disable=protected-access\r\n if use_core_columns:\r\n # pylint: disable=protected-access\r\n tensor = fc_core._transform_features(features, [fc])[fc]\r\n transformed_features[fc.name] = tensor\r\n elif isinstance(fc, feature_column_lib._EmbeddingColumn):\r\n # pylint: enable=protected-access\r\n transformed_features[fc.name] = fc_core.input_layer(\r\n features, [fc], weight_collections=[scope])\r\n else:\r\n result = feature_column_ops.transform_features(features, [fc])\r\n if len(result) > 1:\r\n raise ValueError(\"Unexpected number of output features\")\r\n transformed_features[fc.name] = result[list(result.keys())[0]]\r\n features = transformed_features\r\n\r\n dense_float_names = []\r\n dense_floats = []\r\n sparse_float_names = []\r\n sparse_float_indices = []\r\n sparse_float_values = []\r\n sparse_float_shapes = []\r\n sparse_int_names = []\r\n sparse_int_indices = []\r\n sparse_int_values = []\r\n sparse_int_shapes = []\r\n for key in sorted(features.keys()):\r\n tensor = features[key]\r\n # TODO(nponomareva): consider iterating over feature columns instead.\r\n if isinstance(tensor, tuple):\r\n # Weighted categorical feature.\r\n categorical_tensor = tensor[0]\r\n weight_tensor = tensor[1]\r\n\r\n shape = categorical_tensor.dense_shape\r\n indices = array_ops.concat([\r\n array_ops.slice(categorical_tensor.indices, [0, 0], [-1, 1]),\r\n array_ops.expand_dims(\r\n math_ops.to_int64(categorical_tensor.values), -1)\r\n ], 1)\r\n tensor = sparse_tensor.SparseTensor(\r\n indices=indices, values=weight_tensor.values, dense_shape=shape)\r\n\r\n if isinstance(tensor, sparse_tensor.SparseTensor):\r\n if tensor.values.dtype == dtypes.float32:\r\n sparse_float_names.append(key)\r\n sparse_float_indices.append(tensor.indices)\r\n sparse_float_values.append(tensor.values)\r\n sparse_float_shapes.append(tensor.dense_shape)\r\n elif tensor.values.dtype == dtypes.int64:\r\n sparse_int_names.append(key)\r\n sparse_int_indices.append(tensor.indices)\r\n sparse_int_values.append(tensor.values)\r\n sparse_int_shapes.append(tensor.dense_shape)\r\n else:\r\n raise ValueError(\"Unsupported sparse feature %s with dtype %s.\" %\r\n (tensor.indices.name, tensor.dtype))\r\n else:\r\n if tensor.dtype == dtypes.float32:\r\n if len(tensor.shape) > 1 and tensor.shape[1] > 1:\r\n unstacked = array_ops.unstack(tensor, axis=1)\r\n for i in range(len(unstacked)):\r\n dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))\r\n dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))\r\n else:\r\n dense_float_names.append(key)\r\n dense_floats.append(tensor)\r\n else:\r\n raise ValueError(\"Unsupported dense feature %s with dtype %s.\" %\r\n (tensor.name, tensor.dtype))\r\n # Feature columns are logically organized into incrementing slots starting\r\n # from dense floats, then sparse floats then sparse ints.\r\n fc_names = (dense_float_names + sparse_float_names + sparse_int_names)\r\n return (fc_names, dense_floats, sparse_float_indices, sparse_float_values,\r\n sparse_float_shapes, sparse_int_indices, sparse_int_values,\r\n sparse_int_shapes)\r\n\r\n\r\ndef _dropout_params(mode, ensemble_stats):\r\n \"\"\"Returns parameters relevant for dropout.\r\n\r\n Args:\r\n mode: Train/Eval/Infer\r\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\r\n\r\n Returns:\r\n Whether to apply dropout and a dropout seed.\r\n \"\"\"\r\n if mode == learn.ModeKeys.TRAIN:\r\n # Do dropout only during training.\r\n apply_dropout = True\r\n seed = ensemble_stats.attempted_trees\r\n else:\r\n seed = -1\r\n apply_dropout = False\r\n return apply_dropout, seed\r\n\r\n\r\nclass GradientBoostedDecisionTreeModel(object):\r\n \"\"\"A GBDT model function.\"\"\"\r\n\r\n def __init__(self,\r\n is_chief,\r\n num_ps_replicas,\r\n ensemble_handle,\r\n center_bias,\r\n examples_per_layer,\r\n learner_config,\r\n features,\r\n logits_dimension,\r\n loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,\r\n feature_columns=None,\r\n use_core_columns=False,\r\n output_leaf_index=False,\r\n output_leaf_index_modes=None,\r\n num_quantiles=100):\r\n \"\"\"Construct a new GradientBoostedDecisionTreeModel function.\r\n\r\n Args:\r\n is_chief: Whether to build the chief graph.\r\n num_ps_replicas: Number of parameter server replicas, can be 0.\r\n ensemble_handle: A handle to the ensemble variable.\r\n center_bias: Whether to center the bias before growing trees.\r\n examples_per_layer: Number of examples to accumulate before growing a tree\r\n layer. It can also be a function that computes the number of examples\r\n based on the depth of the layer that's being built.\r\n learner_config: A learner config.\r\n features: `dict` of `Tensor` objects.\r\n logits_dimension: An int, the dimension of logits.\r\n loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.\r\n feature_columns: A list of feature columns.\r\n use_core_columns: A boolean specifying whether core feature columns are\r\n used.\r\n output_leaf_index: A boolean variable indicating whether to output leaf\r\n index into predictions dictionary.\r\n output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which\r\n dictates when leaf indices will be outputted. By default, leaf indices\r\n are only outputted in INFER mode.\r\n num_quantiles: Number of quantiles to build for numeric feature values.\r\n\r\n Raises:\r\n ValueError: if inputs are not valid.\r\n \"\"\"\r\n if ensemble_handle is None:\r\n raise ValueError(\"ensemble_handle must be specified.\")\r\n\r\n if learner_config is None:\r\n raise ValueError(\"learner_config must be specified.\")\r\n\r\n if learner_config.num_classes < 2:\r\n raise ValueError(\"Number of classes must be >=2\")\r\n\r\n self._logits_dimension = logits_dimension\r\n self._is_chief = is_chief\r\n self._num_ps_replicas = num_ps_replicas\r\n self._ensemble_handle = ensemble_handle\r\n self._center_bias = center_bias\r\n self._examples_per_layer = examples_per_layer\r\n\r\n # Check loss reduction value.\r\n if (loss_reduction != losses.Reduction.SUM and\r\n loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):\r\n raise ValueError(\r\n \"Invalid loss reduction is provided: %s.\" % loss_reduction)\r\n self._loss_reduction = loss_reduction\r\n\r\n # Fill in the defaults.\r\n if (learner_config.multi_class_strategy ==\r\n learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):\r\n if logits_dimension == 1:\r\n learner_config.multi_class_strategy = (\r\n learner_pb2.LearnerConfig.TREE_PER_CLASS)\r\n else:\r\n learner_config.multi_class_strategy = (\r\n learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)\r\n\r\n if logits_dimension == 1 or learner_config.multi_class_strategy == (\r\n learner_pb2.LearnerConfig.TREE_PER_CLASS):\r\n self._gradient_shape = tensor_shape.scalar()\r\n self._hessian_shape = tensor_shape.scalar()\r\n else:\r\n if center_bias:\r\n raise ValueError(\"Center bias should be False for multiclass.\")\r\n\r\n self._gradient_shape = tensor_shape.TensorShape([logits_dimension])\r\n if (learner_config.multi_class_strategy ==\r\n learner_pb2.LearnerConfig.FULL_HESSIAN):\r\n self._hessian_shape = tensor_shape.TensorShape(\r\n ([logits_dimension, logits_dimension]))\r\n else:\r\n # Diagonal hessian strategy.\r\n self._hessian_shape = tensor_shape.TensorShape(([logits_dimension]))\r\n if (learner_config.growing_mode ==\r\n learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):\r\n learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER\r\n\r\n if (learner_config.pruning_mode ==\r\n learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):\r\n learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE\r\n\r\n if learner_config.constraints.max_tree_depth == 0:\r\n # Use 6 as the default maximum depth.\r\n learner_config.constraints.max_tree_depth = 6\r\n\r\n tuner = learner_config.learning_rate_tuner.WhichOneof(\"tuner\")\r\n if not tuner:\r\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\r\n\r\n self._learner_config = learner_config\r\n self._feature_columns = feature_columns\r\n self._learner_config_serialized = learner_config.SerializeToString()\r\n self._num_quantiles = num_quantiles\r\n self._max_tree_depth = variables.VariableV1(\r\n initial_value=self._learner_config.constraints.max_tree_depth)\r\n self._attempted_trees = variables.VariableV1(\r\n initial_value=array_ops.zeros([], dtypes.int64),\r\n trainable=False,\r\n name=\"attempted_trees\")\r\n self._finalized_trees = variables.VariableV1(\r\n initial_value=array_ops.zeros([], dtypes.int64),\r\n trainable=False,\r\n name=\"finalized_trees\")\r\n if not features:\r\n raise ValueError(\"Features dictionary must be specified.\")\r\n (fc_names, dense_floats, sparse_float_indices, sparse_float_values,\r\n sparse_float_shapes, sparse_int_indices,\r\n sparse_int_values, sparse_int_shapes) = extract_features(\r\n features, self._feature_columns, use_core_columns)\r\n logging.info(\"Active Feature Columns: \" + str(fc_names))\r\n logging.info(\"Learner config: \" + str(learner_config))\r\n self._fc_names = fc_names\r\n self._dense_floats = dense_floats\r\n self._sparse_float_indices = sparse_float_indices\r\n self._sparse_float_values = sparse_float_values\r\n self._sparse_float_shapes = sparse_float_shapes\r\n self._sparse_int_indices = sparse_int_indices\r\n self._sparse_int_values = sparse_int_values\r\n self._sparse_int_shapes = sparse_int_shapes\r\n self._reduce_dim = (\r\n self._learner_config.multi_class_strategy ==\r\n learner_pb2.LearnerConfig.TREE_PER_CLASS and\r\n learner_config.num_classes == 2)\r\n\r\n if output_leaf_index_modes is None:\r\n output_leaf_index_modes = [learn.ModeKeys.INFER]\r\n elif not all(\r\n mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL,\r\n learn.ModeKeys.INFER) for mode in output_leaf_index_modes):\r\n raise ValueError(\"output_leaf_index_modes should only contain ModeKeys.\")\r\n\r\n self._output_leaf_index = output_leaf_index\r\n self._output_leaf_index_modes = output_leaf_index_modes\r\n\r\n def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):\r\n \"\"\"Runs prediction and returns a dictionary of the prediction results.\r\n\r\n Args:\r\n ensemble_handle: ensemble resource handle.\r\n ensemble_stamp: stamp of ensemble resource.\r\n mode: learn.ModeKeys.TRAIN or EVAL or INFER.\r\n\r\n Returns:\r\n a dictionary of prediction results -\r\n ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,\r\n NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.\r\n \"\"\"\r\n ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,\r\n ensemble_stamp)\r\n num_handlers = (\r\n len(self._dense_floats) + len(self._sparse_float_shapes) + len(\r\n self._sparse_int_shapes))\r\n # Used during feature selection.\r\n used_handlers = model_ops.tree_ensemble_used_handlers(\r\n ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)\r\n\r\n # We don't need dropout info - we can always restore it based on the\r\n # seed.\r\n apply_dropout, seed = _dropout_params(mode, ensemble_stats)\r\n # Make sure ensemble stats run. This will check that the ensemble has\r\n # the right stamp.\r\n with ops.control_dependencies(ensemble_stats):\r\n leaf_index = None\r\n if self._output_leaf_index and mode in self._output_leaf_index_modes:\r\n predictions, _, leaf_index = (\r\n prediction_ops).gradient_trees_prediction_verbose(\r\n ensemble_handle,\r\n seed,\r\n self._dense_floats,\r\n self._sparse_float_indices,\r\n self._sparse_float_values,\r\n self._sparse_float_shapes,\r\n self._sparse_int_indices,\r\n self._sparse_int_values,\r\n self._sparse_int_shapes,\r\n learner_config=self._learner_config_serialized,\r\n apply_dropout=apply_dropout,\r\n apply_averaging=mode != learn.ModeKeys.TRAIN,\r\n use_locking=True,\r\n center_bias=self._center_bias,\r\n reduce_dim=self._reduce_dim)\r\n else:\r\n leaf_index = None\r\n predictions, _ = prediction_ops.gradient_trees_prediction(\r\n ensemble_handle,\r\n seed,\r\n self._dense_floats,\r\n self._sparse_float_indices,\r\n self._sparse_float_values,\r\n self._sparse_float_shapes,\r\n self._sparse_int_indices,\r\n self._sparse_int_values,\r\n self._sparse_int_shapes,\r\n learner_config=self._learner_config_serialized,\r\n apply_dropout=apply_dropout,\r\n apply_averaging=mode != learn.ModeKeys.TRAIN,\r\n use_locking=True,\r\n center_bias=self._center_bias,\r\n reduce_dim=self._reduce_dim)\r\n partition_ids = prediction_ops.gradient_trees_partition_examples(\r\n ensemble_handle,\r\n self._dense_floats,\r\n self._sparse_float_indices,\r\n self._sparse_float_values,\r\n self._sparse_float_shapes,\r\n self._sparse_int_indices,\r\n self._sparse_int_values,\r\n self._sparse_int_shapes,\r\n use_locking=True)\r\n\r\n return _make_predictions_dict(ensemble_stamp, predictions, partition_ids,\r\n ensemble_stats, used_handlers, leaf_index)\r\n\r\n def predict(self, mode):\r\n \"\"\"Returns predictions given the features and mode.\r\n\r\n Args:\r\n mode: Mode the graph is running in (train|predict|eval).\r\n\r\n Returns:\r\n A dict of predictions tensors.\r\n\r\n Raises:\r\n ValueError: if features is not valid.\r\n \"\"\"\r\n\r\n # Use the current ensemble to predict on the current batch of input.\r\n # For faster prediction we check if the inputs are on the same device\r\n # as the model. If not, we create a copy of the model on the worker.\r\n input_deps = (\r\n self._dense_floats + self._sparse_float_indices +\r\n self._sparse_int_indices)\r\n if not input_deps:\r\n raise ValueError(\"No input tensors for prediction.\")\r\n\r\n # Get most current model stamp.\r\n ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)\r\n\r\n # Determine if ensemble is colocated with the inputs.\r\n if self._ensemble_handle.device != input_deps[0].device:\r\n # Create a local ensemble and get its local stamp.\r\n with ops.name_scope(\"local_ensemble\", \"TreeEnsembleVariable\") as name:\r\n local_ensemble_handle = (\r\n gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name))\r\n create_op = gen_model_ops.create_tree_ensemble_variable(\r\n local_ensemble_handle, stamp_token=-1, tree_ensemble_config=\"\")\r\n with ops.control_dependencies([create_op]):\r\n local_stamp = model_ops.tree_ensemble_stamp_token(\r\n local_ensemble_handle)\r\n\r\n # Determine whether the local ensemble is stale and update it if needed.\r\n def _refresh_local_ensemble_fn():\r\n # Serialize the model from parameter server after reading the inputs.\r\n with ops.control_dependencies([input_deps[0]]):\r\n (ensemble_stamp, serialized_model) = (\r\n model_ops.tree_ensemble_serialize(self._ensemble_handle))\r\n\r\n # Update local ensemble with the serialized model from parameter server.\r\n with ops.control_dependencies([create_op]):\r\n return model_ops.tree_ensemble_deserialize(\r\n local_ensemble_handle,\r\n stamp_token=ensemble_stamp,\r\n tree_ensemble_config=serialized_model), ensemble_stamp\r\n\r\n refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond(\r\n math_ops.not_equal(ensemble_stamp,\r\n local_stamp), _refresh_local_ensemble_fn,\r\n lambda: (control_flow_ops.no_op(), ensemble_stamp))\r\n\r\n # Once updated, use the local model for prediction.\r\n with ops.control_dependencies([refresh_local_ensemble]):\r\n return self._predict_and_return_dict(local_ensemble_handle,\r\n ensemble_stamp, mode)\r\n else:\r\n # Use ensemble_handle directly, if colocated.\r\n with ops.device(self._ensemble_handle.device):\r\n return self._predict_and_return_dict(self._ensemble_handle,\r\n ensemble_stamp, mode)\r\n\r\n def _get_class_id(self, predictions_dict):\r\n # Handle different multiclass strategies.\r\n if (self._learner_config.multi_class_strategy ==\r\n learner_pb2.LearnerConfig.TREE_PER_CLASS and\r\n self._logits_dimension != 1):\r\n # Choose the class for which the tree is built (one vs rest).\r\n return math_ops.to_int32(\r\n predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension)\r\n return constant_op.constant(-1, dtype=dtypes.int32)\r\n\r\n def update_stats(self, loss, predictions_dict):\r\n \"\"\"Update the accumulators with stats from this batch.\r\n\r\n Args:\r\n loss: A scalar tensor representing average loss of examples.\r\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\r\n about predictions per example.\r\n\r\n Returns:\r\n Three values:\r\n - An op that adds a new tree to the ensemble, and\r\n - An op that increments the stamp but removes all the trees and resets\r\n the handlers. This can be used to reset the state of the ensemble.\r\n - A dict containing the training state.\r\n\r\n Raises:\r\n ValueError: if inputs are not valid.\r\n \"\"\"\r\n # Get the worker device from input dependencies.\r\n input_deps = (\r\n self._dense_floats + self._sparse_float_indices +\r\n self._sparse_int_indices)\r\n worker_device = input_deps[0].device\r\n\r\n # Get tensors relevant for training and form the loss.\r\n predictions = predictions_dict[PREDICTIONS]\r\n partition_ids = predictions_dict[PARTITION_IDS]\r\n ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]\r\n gradients = gradients_impl.gradients(\r\n loss,\r\n predictions,\r\n name=\"Gradients\",\r\n colocate_gradients_with_ops=False,\r\n gate_gradients=0,\r\n aggregation_method=None)[0]\r\n strategy = self._learner_config.multi_class_strategy\r\n\r\n class_id = self._get_class_id(predictions_dict)\r\n # Handle different multiclass strategies.\r\n if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:\r\n # We build one vs rest trees.\r\n if self._logits_dimension == 1:\r\n # We have only 1 score, gradients is of shape [batch, 1].\r\n hessians = gradients_impl.gradients(\r\n gradients,\r\n predictions,\r\n name=\"Hessian\",\r\n colocate_gradients_with_ops=False,\r\n gate_gradients=0,\r\n aggregation_method=None)[0]\r\n\r\n squeezed_gradients = array_ops.squeeze(gradients, axis=[1])\r\n squeezed_hessians = array_ops.squeeze(hessians, axis=[1])\r\n else:\r\n hessian_list = self._diagonal_hessian(gradients, predictions)\r\n # Assemble hessian list into a tensor.\r\n hessians = array_ops.stack(hessian_list, axis=1)\r\n # Use class id tensor to get the column with that index from gradients\r\n # and hessians.\r\n squeezed_gradients = array_ops.squeeze(\r\n _get_column_by_index(gradients, class_id))\r\n squeezed_hessians = array_ops.squeeze(\r\n _get_column_by_index(hessians, class_id))\r\n else:\r\n # Other multiclass strategies.\r\n if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:\r\n hessian_list = self._full_hessian(gradients, predictions)\r\n else:\r\n # Diagonal hessian strategy.\r\n hessian_list = self._diagonal_hessian(gradients, predictions)\r\n\r\n squeezed_gradients = gradients\r\n hessians = array_ops.stack(hessian_list, axis=1)\r\n squeezed_hessians = hessians\r\n\r\n # Get the weights for each example for quantiles calculation,\r\n weights = self._get_weights(self._hessian_shape, squeezed_hessians)\r\n\r\n # Create all handlers ensuring resources are evenly allocated across PS.\r\n fc_name_idx = 0\r\n handlers = []\r\n init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)\r\n l1_regularization = constant_op.constant(\r\n self._learner_config.regularization.l1, dtypes.float32)\r\n l2_regularization = constant_op.constant(\r\n self._learner_config.regularization.l2, dtypes.float32)\r\n tree_complexity_regularization = constant_op.constant(\r\n self._learner_config.regularization.tree_complexity, dtypes.float32)\r\n min_node_weight = constant_op.constant(\r\n self._learner_config.constraints.min_node_weight, dtypes.float32)\r\n loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM\r\n loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)\r\n weak_learner_type = constant_op.constant(\r\n self._learner_config.weak_learner_type)\r\n num_quantiles = self._num_quantiles\r\n epsilon = 1.0 / num_quantiles\r\n strategy_tensor = constant_op.constant(strategy)\r\n with ops.device(self._get_replica_device_setter(worker_device)):\r\n # Create handlers for dense float columns\r\n for dense_float_column_idx in range(len(self._dense_floats)):\r\n fc_name = self._fc_names[fc_name_idx]\r\n handlers.append(\r\n ordinal_split_handler.DenseSplitHandler(\r\n l1_regularization=l1_regularization,\r\n l2_regularization=l2_regularization,\r\n tree_complexity_regularization=tree_complexity_regularization,\r\n min_node_weight=min_node_weight,\r\n feature_column_group_id=constant_op.constant(\r\n dense_float_column_idx),\r\n epsilon=epsilon,\r\n num_quantiles=num_quantiles,\r\n dense_float_column=self._dense_floats[dense_float_column_idx],\r\n name=fc_name,\r\n gradient_shape=self._gradient_shape,\r\n hessian_shape=self._hessian_shape,\r\n multiclass_strategy=strategy_tensor,\r\n init_stamp_token=init_stamp_token,\r\n loss_uses_sum_reduction=loss_uses_sum_reduction,\r\n weak_learner_type=weak_learner_type,\r\n ))\r\n fc_name_idx += 1\r\n\r\n # Create handlers for sparse float columns.\r\n for sparse_float_column_idx in range(len(self._sparse_float_indices)):\r\n fc_name = self._fc_names[fc_name_idx]\r\n handlers.append(\r\n ordinal_split_handler.SparseSplitHandler(\r\n l1_regularization=l1_regularization,\r\n l2_regularization=l2_regularization,\r\n tree_complexity_regularization=tree_complexity_regularization,\r\n min_node_weight=min_node_weight,\r\n feature_column_group_id=constant_op.constant(\r\n sparse_float_column_idx),\r\n epsilon=epsilon,\r\n num_quantiles=num_quantiles,\r\n sparse_float_column=sparse_tensor.SparseTensor(\r\n self._sparse_float_indices[sparse_float_column_idx],\r\n self._sparse_float_values[sparse_float_column_idx],\r\n self._sparse_float_shapes[sparse_float_column_idx]),\r\n name=fc_name,\r\n gradient_shape=self._gradient_shape,\r\n hessian_shape=self._hessian_shape,\r\n multiclass_strategy=strategy_tensor,\r\n init_stamp_token=init_stamp_token,\r\n loss_uses_sum_reduction=loss_uses_sum_reduction))\r\n fc_name_idx += 1\r\n\r\n # Create handlers for sparse int columns.\r\n for sparse_int_column_idx in range(len(self._sparse_int_indices)):\r\n fc_name = self._fc_names[fc_name_idx]\r\n handlers.append(\r\n categorical_split_handler.EqualitySplitHandler(\r\n l1_regularization=l1_regularization,\r\n l2_regularization=l2_regularization,\r\n tree_complexity_regularization=tree_complexity_regularization,\r\n min_node_weight=min_node_weight,\r\n feature_column_group_id=constant_op.constant(\r\n sparse_int_column_idx),\r\n sparse_int_column=sparse_tensor.SparseTensor(\r\n self._sparse_int_indices[sparse_int_column_idx],\r\n self._sparse_int_values[sparse_int_column_idx],\r\n self._sparse_int_shapes[sparse_int_column_idx]),\r\n name=fc_name,\r\n gradient_shape=self._gradient_shape,\r\n hessian_shape=self._hessian_shape,\r\n multiclass_strategy=strategy_tensor,\r\n init_stamp_token=init_stamp_token,\r\n loss_uses_sum_reduction=loss_uses_sum_reduction,\r\n weak_learner_type=weak_learner_type))\r\n fc_name_idx += 1\r\n\r\n # Create ensemble stats variables.\r\n num_layer_examples = variables.VariableV1(\r\n initial_value=array_ops.zeros([], dtypes.int64),\r\n name=\"num_layer_examples\",\r\n trainable=False)\r\n num_layer_steps = variables.VariableV1(\r\n initial_value=array_ops.zeros([], dtypes.int64),\r\n name=\"num_layer_steps\",\r\n trainable=False)\r\n num_layers = variables.VariableV1(\r\n initial_value=array_ops.zeros([], dtypes.int64),\r\n name=\"num_layers\",\r\n trainable=False)\r\n active_tree = variables.VariableV1(\r\n initial_value=array_ops.zeros([], dtypes.int64),\r\n name=\"active_tree\",\r\n trainable=False)\r\n active_layer = variables.VariableV1(\r\n initial_value=array_ops.zeros([], dtypes.int64),\r\n name=\"active_layer\",\r\n trainable=False)\r\n # Variable that becomes false once bias centering is done.\r\n continue_centering = variables.VariableV1(\r\n initial_value=self._center_bias,\r\n name=\"continue_centering\",\r\n trainable=False)\r\n # Create bias stats accumulator.\r\n bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=self._gradient_shape,\r\n hessian_shape=self._hessian_shape,\r\n name=\"BiasAccumulator\")\r\n # Create steps accumulator.\r\n steps_accumulator = stats_accumulator_ops.StatsAccumulator(\r\n stamp_token=0,\r\n gradient_shape=tensor_shape.scalar(),\r\n hessian_shape=tensor_shape.scalar(),\r\n name=\"StepsAccumulator\")\r\n # Create ensemble stats summaries.\r\n summary.scalar(\"layer_stats/num_examples\", num_layer_examples)\r\n summary.scalar(\"layer_stats/num_steps\", num_layer_steps)\r\n summary.scalar(\"ensemble_stats/active_tree\", active_tree)\r\n summary.scalar(\"ensemble_stats/active_layer\", active_layer)\r\n\r\n # Update bias stats.\r\n stats_update_ops = []\r\n\r\n stats_update_ops.append(\r\n control_flow_ops.cond(\r\n continue_centering,\r\n self._make_update_bias_stats_fn(\r\n ensemble_stamp, predictions, gradients,\r\n bias_stats_accumulator), control_flow_ops.no_op))\r\n\r\n # Update handler stats.\r\n handler_reads = collections.OrderedDict()\r\n for handler in handlers:\r\n handler_reads[handler] = handler.scheduled_reads()\r\n\r\n handler_results = batch_ops_utils.run_handler_scheduled_ops(\r\n handler_reads, ensemble_stamp, worker_device)\r\n per_handler_updates = collections.OrderedDict()\r\n # Two values per handler. First one is if the handler is active for the\r\n # current layer. The second one is if the handler is going to be active\r\n # for the next layer.\r\n subsampling_type = self._learner_config.WhichOneof(\"feature_fraction\")\r\n if subsampling_type == \"feature_fraction_per_level\":\r\n seed = predictions_dict[NUM_LAYERS_ATTEMPTED]\r\n active_handlers_current_layer = stateless.stateless_random_uniform(\r\n shape=[len(handlers)], seed=[seed, 1])\r\n active_handlers_next_layer = stateless.stateless_random_uniform(\r\n shape=[len(handlers)], seed=[seed + 1, 1])\r\n active_handlers = array_ops.stack(\r\n [active_handlers_current_layer, active_handlers_next_layer], axis=1)\r\n active_handlers = (\r\n active_handlers < self._learner_config.feature_fraction_per_level)\r\n elif subsampling_type == \"feature_fraction_per_tree\":\r\n seed = predictions_dict[NUM_TREES_ATTEMPTED]\r\n active_handlers_current_layer = stateless.stateless_random_uniform(\r\n shape=[len(handlers)], seed=[seed, 2])\r\n active_handlers_current_layer = (\r\n active_handlers_current_layer <\r\n self._learner_config.feature_fraction_per_tree)\r\n active_handlers = array_ops.stack(\r\n [\r\n active_handlers_current_layer,\r\n array_ops.ones([len(handlers)], dtype=dtypes.bool)\r\n ],\r\n axis=1)\r\n else:\r\n active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)\r\n\r\n if self._learner_config.constraints.max_number_of_unique_feature_columns:\r\n target = (\r\n self._learner_config.constraints.max_number_of_unique_feature_columns)\r\n\r\n def _feature_selection_active_handlers():\r\n # The active list for current and the next iteration.\r\n used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK],\r\n [-1, 1])\r\n used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)\r\n return math_ops.logical_and(used_handlers, active_handlers)\r\n\r\n active_handlers = (\r\n control_flow_ops.cond(predictions_dict[NUM_USED_HANDLERS] >= target,\r\n _feature_selection_active_handlers,\r\n lambda: active_handlers))\r\n\r\n # Prepare empty gradients and hessians when handlers are not ready.\r\n empty_hess_shape = [1] + self._hessian_shape.as_list()\r\n empty_grad_shape = [1] + self._gradient_shape.as_list()\r\n\r\n empty_gradients = constant_op.constant(\r\n [], dtype=dtypes.float32, shape=empty_grad_shape)\r\n empty_hessians = constant_op.constant(\r\n [], dtype=dtypes.float32, shape=empty_hess_shape)\r\n\r\n active_handlers = array_ops.unstack(active_handlers, axis=0)\r\n for handler_idx in range(len(handlers)):\r\n handler = handlers[handler_idx]\r\n is_active = active_handlers[handler_idx]\r\n updates, scheduled_updates = handler.update_stats(\r\n ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians,\r\n empty_gradients, empty_hessians, weights, is_active,\r\n handler_results[handler])\r\n stats_update_ops.append(updates)\r\n per_handler_updates[handler] = scheduled_updates\r\n\r\n update_results = batch_ops_utils.run_handler_scheduled_ops(\r\n per_handler_updates, ensemble_stamp, worker_device)\r\n for update in update_results.values():\r\n stats_update_ops += update\r\n\r\n training_state = GBDTTrainingState(\r\n num_layer_examples=num_layer_examples,\r\n num_layer_steps=num_layer_steps,\r\n num_layers=num_layers,\r\n active_tree=active_tree,\r\n active_layer=active_layer,\r\n continue_centering=continue_centering,\r\n bias_stats_accumulator=bias_stats_accumulator,\r\n steps_accumulator=steps_accumulator,\r\n handlers=handlers)\r\n\r\n reset_op = control_flow_ops.no_op()\r\n if self._is_chief:\r\n # Advance the ensemble stamp to throw away staggered workers.\r\n stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle)\r\n next_stamp_token = stamp_token + 1\r\n\r\n reset_ops = []\r\n for handler in handlers:\r\n reset_ops.append(handler.reset(stamp_token, next_stamp_token))\r\n if self._center_bias:\r\n reset_ops.append(\r\n bias_stats_accumulator.flush(stamp_token, next_stamp_token))\r\n reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))\r\n reset_ops.append(self._finalized_trees.assign(0).op)\r\n reset_ops.append(self._attempted_trees.assign(0).op)\r\n reset_ops.append(\r\n model_ops.tree_ensemble_deserialize(\r\n self._ensemble_handle,\r\n stamp_token=next_stamp_token,\r\n tree_ensemble_config=\"\",\r\n name=\"reset_gbdt\"))\r\n\r\n reset_op = control_flow_ops.group([reset_ops])\r\n\r\n return stats_update_ops, reset_op, training_state\r\n\r\n def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict,\r\n training_state):\r\n \"\"\"Increments number of visited examples and grows the ensemble.\r\n\r\n If the number of visited examples reaches the target examples_per_layer,\r\n ensemble is updated.\r\n\r\n Args:\r\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\r\n about predictions per example.\r\n training_state: `dict` returned by update_stats.\r\n\r\n Returns:\r\n An op that updates the counters and potientially grows the ensemble.\r\n \"\"\"\r\n batch_size = math_ops.cast(\r\n array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)\r\n ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]\r\n # Accumulate a step after updating stats.\r\n\r\n steps_accumulator = training_state.steps_accumulator\r\n num_layer_examples = training_state.num_layer_examples\r\n num_layer_steps = training_state.num_layer_steps\r\n active_layer = training_state.active_layer\r\n add_step_op = steps_accumulator.add(\r\n ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])\r\n\r\n # After adding the step, decide if further processing is needed.\r\n ensemble_update_ops = [add_step_op]\r\n class_id = self._get_class_id(predictions_dict)\r\n\r\n with ops.control_dependencies([add_step_op]):\r\n if self._is_chief:\r\n dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]\r\n\r\n # Get accumulated steps and examples for the current layer.\r\n _, _, _, _, acc_examples, acc_steps = (\r\n steps_accumulator.serialize())\r\n acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)\r\n acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)\r\n ensemble_update_ops.append(\r\n num_layer_examples.assign(acc_examples))\r\n ensemble_update_ops.append(num_layer_steps.assign(acc_steps))\r\n # Determine whether we need to update tree ensemble.\r\n examples_per_layer = self._examples_per_layer\r\n if callable(examples_per_layer):\r\n examples_per_layer = examples_per_layer(active_layer)\r\n ensemble_update_ops.append(\r\n control_flow_ops.cond(\r\n acc_examples >= examples_per_layer,\r\n self.make_update_ensemble_fn(ensemble_stamp, training_state,\r\n dropout_seed, class_id),\r\n control_flow_ops.no_op))\r\n\r\n # Note, the loss is calculated from the prediction considering dropouts, so\r\n # that the value might look staggering over steps when the dropout ratio is\r\n # high. eval_loss might be referred instead in the aspect of convergence.\r\n return control_flow_ops.group(*ensemble_update_ops)\r\n\r\n def make_update_ensemble_fn(self, ensemble_stamp, training_state,\r\n dropout_seed, class_id):\r\n \"\"\"A method to create the function which updates the tree ensemble.\"\"\"\r\n # Determine learning rate.\r\n learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(\r\n \"tuner\")\r\n if learning_rate_tuner == \"fixed\" or learning_rate_tuner == \"dropout\":\r\n tuner = getattr(self._learner_config.learning_rate_tuner,\r\n learning_rate_tuner)\r\n learning_rate = tuner.learning_rate\r\n else:\r\n # TODO(nponomareva, soroush) do the line search.\r\n raise ValueError(\"Line search learning rate is not yet supported.\")\r\n\r\n def _update_ensemble():\r\n \"\"\"A method to update the tree ensemble.\"\"\"\r\n # Get next stamp token.\r\n next_ensemble_stamp = ensemble_stamp + 1\r\n # Finalize bias stats.\r\n _, _, _, bias_grads, bias_hess = (\r\n training_state.bias_stats_accumulator.flush(ensemble_stamp,\r\n next_ensemble_stamp))\r\n\r\n # Finalize handler splits.\r\n are_splits_ready_list = []\r\n partition_ids_list = []\r\n gains_list = []\r\n split_info_list = []\r\n\r\n for handler in training_state.handlers:\r\n (are_splits_ready,\r\n partition_ids, gains, split_info) = handler.make_splits(\r\n ensemble_stamp, next_ensemble_stamp, class_id)\r\n are_splits_ready_list.append(are_splits_ready)\r\n partition_ids_list.append(partition_ids)\r\n gains_list.append(gains)\r\n split_info_list.append(split_info)\r\n # Stack all the inputs to one tensor per type.\r\n # This is a workaround for the slowness of graph building in tf.cond.\r\n # See (b/36554864).\r\n split_sizes = array_ops.reshape(\r\n array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])\r\n partition_ids = array_ops.concat(partition_ids_list, axis=0)\r\n gains = array_ops.concat(gains_list, axis=0)\r\n split_infos = array_ops.concat(split_info_list, axis=0)\r\n\r\n # Determine if all splits are ready.\r\n are_all_splits_ready = math_ops.reduce_all(\r\n array_ops.stack(\r\n are_splits_ready_list, axis=0, name=\"stack_handler_readiness\"))\r\n\r\n # Define bias centering update operation.\r\n def _center_bias_fn():\r\n # Center tree ensemble bias.\r\n delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess,\r\n array_ops.zeros_like(bias_grads))\r\n center_bias = training_ops.center_tree_ensemble_bias(\r\n tree_ensemble_handle=self._ensemble_handle,\r\n stamp_token=ensemble_stamp,\r\n next_stamp_token=next_ensemble_stamp,\r\n delta_updates=delta_updates,\r\n learner_config=self._learner_config_serialized)\r\n return training_state.continue_centering.assign(center_bias)\r\n\r\n # Define ensemble growing operations.\r\n def _grow_ensemble_ready_fn():\r\n # Grow the ensemble given the current candidates.\r\n sizes = array_ops.unstack(split_sizes)\r\n partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))\r\n # When using the oblivious decision tree as weak learner, it produces\r\n # one gain and one split per handler and not number of partitions.\r\n if self._learner_config.weak_learner_type == (\r\n learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):\r\n sizes = len(training_state.handlers)\r\n\r\n gains_list = list(array_ops.split(gains, sizes, axis=0))\r\n split_info_list = list(array_ops.split(split_infos, sizes, axis=0))\r\n return training_ops.grow_tree_ensemble(\r\n tree_ensemble_handle=self._ensemble_handle,\r\n stamp_token=ensemble_stamp,\r\n next_stamp_token=next_ensemble_stamp,\r\n learning_rate=learning_rate,\r\n partition_ids=partition_ids_list,\r\n gains=gains_list,\r\n splits=split_info_list,\r\n learner_config=self._learner_config_serialized,\r\n dropout_seed=dropout_seed,\r\n center_bias=self._center_bias,\r\n max_tree_depth=self._max_tree_depth,\r\n weak_learner_type=self._learner_config.weak_learner_type)\r\n\r\n def _grow_ensemble_not_ready_fn():\r\n # Don't grow the ensemble, just update the stamp.\r\n return training_ops.grow_tree_ensemble(\r\n tree_ensemble_handle=self._ensemble_handle,\r\n stamp_token=ensemble_stamp,\r\n next_stamp_token=next_ensemble_stamp,\r\n learning_rate=0,\r\n partition_ids=[],\r\n gains=[],\r\n splits=[],\r\n learner_config=self._learner_config_serialized,\r\n dropout_seed=dropout_seed,\r\n center_bias=self._center_bias,\r\n max_tree_depth=self._max_tree_depth,\r\n weak_learner_type=self._learner_config.weak_learner_type)\r\n\r\n def _grow_ensemble_fn():\r\n # Conditionally grow an ensemble depending on whether the splits\r\n # from all the handlers are ready.\r\n return control_flow_ops.cond(are_all_splits_ready,\r\n _grow_ensemble_ready_fn,\r\n _grow_ensemble_not_ready_fn)\r\n\r\n # Update ensemble.\r\n update_ops = [are_all_splits_ready]\r\n if self._center_bias:\r\n update_model = control_flow_ops.cond(training_state.continue_centering,\r\n _center_bias_fn, _grow_ensemble_fn)\r\n else:\r\n update_model = _grow_ensemble_fn()\r\n update_ops.append(update_model)\r\n\r\n # Update ensemble stats.\r\n with ops.control_dependencies([update_model]):\r\n stats = training_ops.tree_ensemble_stats(\r\n self._ensemble_handle, stamp_token=next_ensemble_stamp)\r\n update_ops.append(self._finalized_trees.assign(stats.num_trees))\r\n update_ops.append(self._attempted_trees.assign(stats.attempted_trees))\r\n update_ops.append(training_state.num_layers.assign(stats.num_layers))\r\n update_ops.append(training_state.active_tree.assign(stats.active_tree))\r\n update_ops.append(\r\n training_state.active_layer.assign(stats.active_layer))\r\n\r\n # Flush step stats.\r\n update_ops.extend(\r\n training_state.steps_accumulator.flush(ensemble_stamp,\r\n next_ensemble_stamp))\r\n return control_flow_ops.group(*update_ops, name=\"update_ensemble\")\r\n\r\n return _update_ensemble\r\n\r\n def get_number_of_trees_tensor(self):\r\n return self._finalized_trees, self._attempted_trees\r\n\r\n def get_max_tree_depth(self):\r\n return self._max_tree_depth\r\n\r\n def train(self, loss, predictions_dict, labels):\r\n \"\"\"Updates the accumalator stats and grows the ensemble.\r\n\r\n Args:\r\n loss: A scalar tensor representing average loss of examples.\r\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\r\n about predictions per example.\r\n labels: Rank 2 `Tensor` representing labels per example. Has no effect\r\n on the training and is only kept for backward compatibility.\r\n\r\n Returns:\r\n An op that adds a new tree to the ensemble.\r\n\r\n Raises:\r\n ValueError: if inputs are not valid.\r\n \"\"\"\r\n del labels # unused; kept for backward compatibility.\r\n update_op, _, training_state = self.update_stats(loss, predictions_dict)\r\n with ops.control_dependencies(update_op):\r\n return self.increment_step_counter_and_maybe_update_ensemble(\r\n predictions_dict, training_state)\r\n\r\n def _get_weights(self, hessian_shape, hessians):\r\n \"\"\"Derives weights to be used based on hessians and multiclass strategy.\"\"\"\r\n if hessian_shape == tensor_shape.scalar():\r\n # This is tree per class.\r\n weights = hessians\r\n elif len(hessian_shape.dims) == 1:\r\n # This is diagonal hessian.\r\n weights = math_ops.reduce_sum(hessians, axis=1)\r\n else:\r\n # This is full hessian.\r\n weights = math_ops.trace(hessians)\r\n return weights\r\n\r\n def _full_hessian(self, grads, predictions):\r\n \"\"\"Prepares hessians for full-hessian multiclass strategy.\"\"\"\r\n # Because of\r\n # https://github.com/tensorflow/tensorflow/issues/675, we can't just\r\n # compute the full hessian with a single call to gradients, but instead\r\n # must compute it row-by-row.\r\n gradients_list = array_ops.unstack(\r\n grads, num=self._logits_dimension, axis=1)\r\n hessian_rows = []\r\n\r\n for row in range(self._logits_dimension):\r\n # If current row is i, K is number of classes,each row returns a tensor of\r\n # size batch_size x K representing for each example dx_i dx_1, dx_i dx_2\r\n # etc dx_i dx_K\r\n hessian_row = gradients_impl.gradients(\r\n gradients_list[row],\r\n predictions,\r\n name=\"Hessian_%d\" % row,\r\n colocate_gradients_with_ops=False,\r\n gate_gradients=0,\r\n aggregation_method=None)\r\n\r\n # hessian_row is of dimension 1, batch_size, K, => trim first dimension\r\n # to get batch_size x K\r\n hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])\r\n hessian_rows.append(hessian_row)\r\n return hessian_rows\r\n\r\n def _diagonal_hessian(self, grads, predictions):\r\n \"\"\"Prepares hessians for diagonal-hessian multiclass mode.\"\"\"\r\n diag_hessian_list = []\r\n\r\n gradients_list = array_ops.unstack(\r\n grads, num=self._logits_dimension, axis=1)\r\n\r\n for row, row_grads in enumerate(gradients_list):\r\n # If current row is i, K is number of classes,each row returns a tensor of\r\n # size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2\r\n # etc dx_i dx_K\r\n hessian_row = gradients_impl.gradients(\r\n row_grads,\r\n predictions,\r\n name=\"Hessian_%d\" % row,\r\n colocate_gradients_with_ops=False,\r\n gate_gradients=0,\r\n aggregation_method=None)\r\n\r\n # hessian_row is of dimension 1, batch_size, K, => trim first dimension\r\n # to get batch_size x K\r\n hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])\r\n\r\n # Get dx_i^2 for the whole batch.\r\n elem = array_ops.transpose(hessian_row)[row]\r\n diag_hessian_list.append(elem)\r\n\r\n return diag_hessian_list\r\n\r\n def _get_replica_device_setter(self, worker_device):\r\n \"\"\"Creates a replica device setter.\"\"\"\r\n ps_tasks = self._num_ps_replicas\r\n ps_ops = [\r\n \"Variable\",\r\n \"VariableV2\",\r\n \"DecisionTreeEnsembleResourceHandleOp\",\r\n \"StatsAccumulatorScalarResourceHandleOp\",\r\n \"StatsAccumulatorTensorResourceHandleOp\",\r\n ]\r\n ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)\r\n return device_setter.replica_device_setter(\r\n worker_device=worker_device,\r\n ps_tasks=ps_tasks,\r\n merge_devices=True,\r\n ps_ops=ps_ops,\r\n ps_strategy=ps_strategy)\r\n\r\n def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients,\r\n bias_stats_accumulator):\r\n \"\"\"A method to create the function which updates the bias stats.\"\"\"\r\n\r\n def _update_bias_stats():\r\n \"\"\"A method to update the bias stats.\"\"\"\r\n # Get reduced gradients and hessians.\r\n grads_sum = math_ops.reduce_sum(gradients, 0)\r\n hess = gradients_impl.gradients(\r\n grads_sum,\r\n predictions,\r\n name=\"Hessians\",\r\n colocate_gradients_with_ops=False,\r\n gate_gradients=0,\r\n aggregation_method=None)[0]\r\n hess_sum = math_ops.reduce_sum(hess, 0)\r\n\r\n # Accumulate gradients and hessians.\r\n partition_ids = math_ops.range(self._logits_dimension)\r\n feature_ids = array_ops.zeros(\r\n [self._logits_dimension, 2], dtype=dtypes.int64)\r\n\r\n add_stats_op = bias_stats_accumulator.add(\r\n ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)\r\n return control_flow_ops.group(*[add_stats_op], name=\"update_bias_stats\")\r\n\r\n return _update_bias_stats\r\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Classes and methods for processing debugger-decorated graphs.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom six.moves import xrange # pylint: disable=redefined-builtin\r\n\r\nfrom tensorflow.core.framework import graph_pb2\r\nfrom tensorflow.python.framework import op_def_registry\r\nfrom tensorflow.python.platform import tf_logging as logging\r\n\r\n\r\ndef parse_node_or_tensor_name(name):\r\n \"\"\"Get the node name from a string that can be node or tensor name.\r\n\r\n Args:\r\n name: An input node name (e.g., \"node_a\") or tensor name (e.g.,\r\n \"node_a:0\"), as a str.\r\n\r\n Returns:\r\n 1) The node name, as a str. If the input name is a tensor name, i.e.,\r\n consists of a colon, the final colon and the following output slot\r\n will be stripped.\r\n 2) If the input name is a tensor name, the output slot, as an int. If\r\n the input name is not a tensor name, None.\r\n \"\"\"\r\n\r\n if \":\" in name and not name.endswith(\":\"):\r\n node_name = name[:name.rfind(\":\")]\r\n output_slot = int(name[name.rfind(\":\") + 1:])\r\n\r\n return node_name, output_slot\r\n else:\r\n return name, None\r\n\r\n\r\ndef get_node_name(element_name):\r\n node_name, _ = parse_node_or_tensor_name(element_name)\r\n return node_name\r\n\r\n\r\ndef get_output_slot(element_name):\r\n \"\"\"Get the output slot number from the name of a graph element.\r\n\r\n If element_name is a node name without output slot at the end, 0 will be\r\n assumed.\r\n\r\n Args:\r\n element_name: (`str`) name of the graph element in question.\r\n\r\n Returns:\r\n (`int`) output slot number.\r\n \"\"\"\r\n _, output_slot = parse_node_or_tensor_name(element_name)\r\n return output_slot if output_slot is not None else 0\r\n\r\n\r\ndef is_copy_node(node_name):\r\n \"\"\"Determine whether a node name is that of a debug Copy node.\r\n\r\n Such nodes are inserted by TensorFlow core upon request in\r\n RunOptions.debug_options.debug_tensor_watch_opts.\r\n\r\n Args:\r\n node_name: Name of the node.\r\n\r\n Returns:\r\n A bool indicating whether the input argument is the name of a debug Copy\r\n node.\r\n \"\"\"\r\n return node_name.startswith(\"__copy_\")\r\n\r\n\r\ndef is_debug_node(node_name):\r\n \"\"\"Determine whether a node name is that of a debug node.\r\n\r\n Such nodes are inserted by TensorFlow core upon request in\r\n RunOptions.debug_options.debug_tensor_watch_opts.\r\n\r\n Args:\r\n node_name: Name of the node.\r\n\r\n Returns:\r\n A bool indicating whether the input argument is the name of a debug node.\r\n \"\"\"\r\n return node_name.startswith(\"__dbg_\")\r\n\r\n\r\ndef parse_debug_node_name(node_name):\r\n \"\"\"Parse the name of a debug node.\r\n\r\n Args:\r\n node_name: Name of the debug node.\r\n\r\n Returns:\r\n 1. Name of the watched node, as a str.\r\n 2. Output slot index of the watched tensor, as an int.\r\n 3. Index of the debug node, as an int.\r\n 4. Name of the debug op, as a str, e.g, \"DebugIdentity\".\r\n\r\n Raises:\r\n ValueError: If the input node name is not a valid debug node name.\r\n \"\"\"\r\n prefix = \"__dbg_\"\r\n\r\n name = node_name\r\n if not name.startswith(prefix):\r\n raise ValueError(\"Invalid prefix in debug node name: '%s'\" % node_name)\r\n\r\n name = name[len(prefix):]\r\n\r\n if name.count(\"_\") < 2:\r\n raise ValueError(\"Invalid debug node name: '%s'\" % node_name)\r\n\r\n debug_op = name[name.rindex(\"_\") + 1:]\r\n name = name[:name.rindex(\"_\")]\r\n\r\n debug_op_index = int(name[name.rindex(\"_\") + 1:])\r\n name = name[:name.rindex(\"_\")]\r\n\r\n if name.count(\":\") != 1:\r\n raise ValueError(\"Invalid tensor name in debug node name: '%s'\" % node_name)\r\n\r\n watched_node_name = name[:name.index(\":\")]\r\n watched_output_slot = int(name[name.index(\":\") + 1:])\r\n\r\n return watched_node_name, watched_output_slot, debug_op_index, debug_op\r\n\r\n\r\nclass GraphTracingReachedDestination(Exception):\r\n pass\r\n\r\n\r\nclass DFSGraphTracer(object):\r\n \"\"\"Graph input tracer using depth-first search.\"\"\"\r\n\r\n def __init__(self,\r\n input_lists,\r\n skip_node_names=None,\r\n destination_node_name=None):\r\n \"\"\"Constructor of _DFSGraphTracer.\r\n\r\n Args:\r\n input_lists: A list of dicts. Each dict is an adjacency (input) map from\r\n the recipient node name as the key and the list of input node names\r\n as the value.\r\n skip_node_names: Optional: a list of node names to skip tracing.\r\n destination_node_name: Optional: destination node name. If not `None`, it\r\n should be the name of a destination not as a str and the graph tracing\r\n will raise GraphTracingReachedDestination as soon as the node has been\r\n reached.\r\n\r\n Raises:\r\n GraphTracingReachedDestination: if stop_at_node_name is not None and\r\n the specified node is reached.\r\n \"\"\"\r\n\r\n self._input_lists = input_lists\r\n self._skip_node_names = skip_node_names\r\n\r\n self._inputs = []\r\n self._visited_nodes = []\r\n self._depth_count = 0\r\n self._depth_list = []\r\n\r\n self._destination_node_name = destination_node_name\r\n\r\n def trace(self, graph_element_name):\r\n \"\"\"Trace inputs.\r\n\r\n Args:\r\n graph_element_name: Name of the node or an output tensor of the node, as a\r\n str.\r\n\r\n Raises:\r\n GraphTracingReachedDestination: if destination_node_name of this tracer\r\n object is not None and the specified node is reached.\r\n \"\"\"\r\n self._depth_count += 1\r\n\r\n node_name = get_node_name(graph_element_name)\r\n if node_name == self._destination_node_name:\r\n raise GraphTracingReachedDestination()\r\n\r\n if node_name in self._skip_node_names:\r\n return\r\n if node_name in self._visited_nodes:\r\n return\r\n\r\n self._visited_nodes.append(node_name)\r\n\r\n for input_list in self._input_lists:\r\n if node_name not in input_list:\r\n continue\r\n for inp in input_list[node_name]:\r\n if get_node_name(inp) in self._visited_nodes:\r\n continue\r\n self._inputs.append(inp)\r\n self._depth_list.append(self._depth_count)\r\n self.trace(inp)\r\n\r\n self._depth_count -= 1\r\n\r\n def inputs(self):\r\n return self._inputs\r\n\r\n def depth_list(self):\r\n return self._depth_list\r\n\r\n\r\ndef _infer_device_name(graph_def):\r\n \"\"\"Infer device name from a partition GraphDef.\"\"\"\r\n device_name = None\r\n for node in graph_def.node:\r\n if node.device:\r\n device_name = node.device\r\n break\r\n if device_name is None:\r\n logging.warn(\r\n \"Failed to infer device name from partition GraphDef: none of the \"\r\n \"nodes of the GraphDef has a non-empty device name.\")\r\n return device_name\r\n\r\n\r\nclass DebugGraph(object):\r\n \"\"\"Represents a debugger-decorated graph.\"\"\"\r\n\r\n def __init__(self, debug_graph_def, device_name=None):\r\n self._debug_graph_def = debug_graph_def\r\n self._non_debug_graph_def = None\r\n\r\n self._node_attributes = {}\r\n self._node_inputs = {}\r\n self._node_reversed_ref_inputs = {}\r\n self._node_ctrl_inputs = {}\r\n self._node_recipients = {}\r\n self._node_ctrl_recipients = {}\r\n self._node_devices = {}\r\n self._node_op_types = {}\r\n self._copy_send_nodes = []\r\n self._ref_args = {}\r\n\r\n self._device_name = device_name\r\n if not self._device_name:\r\n self._device_name = _infer_device_name(debug_graph_def)\r\n\r\n for node in debug_graph_def.node:\r\n self._process_debug_graph_node(node)\r\n\r\n self._prune_non_control_edges_of_debug_ops()\r\n self._prune_control_edges_of_debug_ops()\r\n self._prune_nodes_from_input_and_recipient_maps(self._get_copy_nodes())\r\n\r\n self._populate_recipient_maps()\r\n\r\n def _process_debug_graph_node(self, node):\r\n \"\"\"Process a node from the debug GraphDef.\r\n\r\n Args:\r\n node: (NodeDef) A partition-graph node to be processed.\r\n\r\n Raises:\r\n ValueError: If duplicate node names are encountered.\r\n \"\"\"\r\n if is_debug_node(node.name):\r\n # This is a debug node. Parse the node name and retrieve the\r\n # information about debug watches on tensors. But do not include\r\n # the node in the graph.\r\n return\r\n\r\n if node.name in self._node_inputs:\r\n raise ValueError(\"Duplicate node name on device %s: '%s'\" %\r\n (self._device_name, node.name))\r\n\r\n self._node_attributes[node.name] = node.attr\r\n\r\n self._node_inputs[node.name] = []\r\n self._node_ctrl_inputs[node.name] = []\r\n self._node_recipients[node.name] = []\r\n self._node_ctrl_recipients[node.name] = []\r\n\r\n if node.name not in self._node_devices:\r\n self._node_devices[node.name] = set()\r\n self._node_devices[node.name].add(\r\n node.device if node.device else self._device_name)\r\n self._node_op_types[node.name] = node.op\r\n self._ref_args[node.name] = self._get_ref_args(node)\r\n\r\n for inp in node.input:\r\n if is_copy_node(inp) and (node.op == \"_Send\" or node.op == \"_Retval\"):\r\n self._copy_send_nodes.append(node.name)\r\n\r\n if inp.startswith(\"^\"):\r\n cinp = inp[1:]\r\n self._node_ctrl_inputs[node.name].append(cinp)\r\n else:\r\n self._node_inputs[node.name].append(inp)\r\n\r\n def _get_ref_args(self, node):\r\n \"\"\"Determine whether an input of an op is ref-type.\r\n\r\n Args:\r\n node: A `NodeDef`.\r\n\r\n Returns:\r\n A list of the arg names (as strs) that are ref-type.\r\n \"\"\"\r\n op_def = op_def_registry.get_registered_ops().get(node.op)\r\n ref_args = []\r\n if op_def:\r\n for i, output_arg in enumerate(op_def.output_arg):\r\n if output_arg.is_ref:\r\n arg_name = node.name if i == 0 else (\"%s:%d\" % (node.name, i))\r\n ref_args.append(arg_name)\r\n return ref_args\r\n\r\n def _get_copy_nodes(self):\r\n \"\"\"Find all Copy nodes in the loaded graph.\"\"\"\r\n copy_nodes = []\r\n for node in self._node_inputs:\r\n if is_copy_node(node):\r\n copy_nodes.append(node)\r\n return copy_nodes\r\n\r\n def _prune_non_control_edges_of_debug_ops(self):\r\n \"\"\"Prune (non-control) edges related to debug ops.\r\n\r\n Prune the Copy ops and associated _Send ops inserted by the debugger out\r\n from the non-control inputs and output recipients map. Replace the inputs\r\n and recipients with original ones.\r\n \"\"\"\r\n for node in self._node_inputs:\r\n inputs = self._node_inputs[node]\r\n\r\n for i in xrange(len(inputs)):\r\n inp = inputs[i]\r\n if is_copy_node(inp):\r\n # Find the input to the Copy node, which should be the original\r\n # input to the node.\r\n orig_inp = self._node_inputs[inp][0]\r\n inputs[i] = orig_inp\r\n\r\n def _prune_control_edges_of_debug_ops(self):\r\n \"\"\"Prune control edges related to the debug ops.\"\"\"\r\n for node in self._node_ctrl_inputs:\r\n ctrl_inputs = self._node_ctrl_inputs[node]\r\n debug_op_inputs = []\r\n for ctrl_inp in ctrl_inputs:\r\n if is_debug_node(ctrl_inp):\r\n debug_op_inputs.append(ctrl_inp)\r\n for debug_op_inp in debug_op_inputs:\r\n ctrl_inputs.remove(debug_op_inp)\r\n\r\n def _populate_recipient_maps(self):\r\n \"\"\"Populate the map from node name to recipient(s) of its output(s).\r\n\r\n This method also populates the input map based on reversed ref edges.\r\n \"\"\"\r\n for node in self._node_inputs:\r\n inputs = self._node_inputs[node]\r\n for inp in inputs:\r\n inp = get_node_name(inp)\r\n if inp not in self._node_recipients:\r\n self._node_recipients[inp] = []\r\n self._node_recipients[inp].append(node)\r\n\r\n if inp in self._ref_args:\r\n if inp not in self._node_reversed_ref_inputs:\r\n self._node_reversed_ref_inputs[inp] = []\r\n self._node_reversed_ref_inputs[inp].append(node)\r\n\r\n for node in self._node_ctrl_inputs:\r\n ctrl_inputs = self._node_ctrl_inputs[node]\r\n for ctrl_inp in ctrl_inputs:\r\n if ctrl_inp in self._copy_send_nodes:\r\n continue\r\n\r\n if ctrl_inp not in self._node_ctrl_recipients:\r\n self._node_ctrl_recipients[ctrl_inp] = []\r\n self._node_ctrl_recipients[ctrl_inp].append(node)\r\n\r\n def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):\r\n \"\"\"Prune nodes out of input and recipient maps.\r\n\r\n Args:\r\n nodes_to_prune: (`list` of `str`) Names of the nodes to be pruned.\r\n \"\"\"\r\n for node in nodes_to_prune:\r\n del self._node_inputs[node]\r\n del self._node_ctrl_inputs[node]\r\n del self._node_recipients[node]\r\n del self._node_ctrl_recipients[node]\r\n\r\n def _reconstruct_non_debug_graph_def(self):\r\n \"\"\"Reconstruct non-debug GraphDef.\r\n\r\n Non-debug GraphDef means the original GraphDef without the Copy* and Debug\r\n nodes inserted by the debugger.\r\n \"\"\"\r\n if self._non_debug_graph_def:\r\n return\r\n\r\n self._non_debug_graph_def = graph_pb2.GraphDef()\r\n for node in self._debug_graph_def.node:\r\n if is_copy_node(node.name) or is_debug_node(node.name):\r\n continue\r\n\r\n new_node = self._non_debug_graph_def.node.add()\r\n new_node.CopyFrom(node)\r\n\r\n # Redo the list of inputs, because in _debug_graph_def, the list can\r\n # consist of Copy* and Debug* nodes inserted by the debugger. Those will\r\n # be replaced with the original inputs here.\r\n del new_node.input[:]\r\n for inp in self._node_inputs[node.name]:\r\n new_node.input.append(inp)\r\n for ctrl_inp in self._node_ctrl_inputs[node.name]:\r\n new_node.input.append(\"^\" + ctrl_inp)\r\n\r\n @property\r\n def device_name(self):\r\n return self._device_name\r\n\r\n @property\r\n def debug_graph_def(self):\r\n \"\"\"The debugger-decorated GraphDef.\"\"\"\r\n return self._debug_graph_def\r\n\r\n @property\r\n def non_debug_graph_def(self):\r\n \"\"\"The GraphDef without the Copy* and Debug* nodes added by the debugger.\"\"\"\r\n self._reconstruct_non_debug_graph_def()\r\n return self._non_debug_graph_def\r\n\r\n @property\r\n def node_devices(self):\r\n return self._node_devices\r\n\r\n @property\r\n def node_op_types(self):\r\n return self._node_op_types\r\n\r\n @property\r\n def node_attributes(self):\r\n return self._node_attributes\r\n\r\n @property\r\n def node_inputs(self):\r\n return self._node_inputs\r\n\r\n @property\r\n def node_ctrl_inputs(self):\r\n return self._node_ctrl_inputs\r\n\r\n @property\r\n def node_reversed_ref_inputs(self):\r\n return self._node_reversed_ref_inputs\r\n\r\n @property\r\n def node_recipients(self):\r\n return self._node_recipients\r\n\r\n @property\r\n def node_ctrl_recipients(self):\r\n return self._node_ctrl_recipients\r\n\r\n\r\ndef reconstruct_non_debug_graph_def(debug_graph_def):\r\n \"\"\"Reconstruct original (non-debugger-decorated) partition GraphDef.\r\n\r\n This method strips the input `tf.GraphDef` of the Copy* and Debug*-type nodes\r\n inserted by the debugger.\r\n\r\n The reconstructed partition graph is identical to the original (i.e.,\r\n non-debugger-decorated) partition graph except in the following respects:\r\n 1) The exact names of the runtime-inserted internal nodes may differ.\r\n These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.\r\n 2) As a consequence of 1, the nodes that receive input directly from such\r\n send- and recv-type ops will have different input names.\r\n 3) The parallel_iteration attribute of while-loop Enter ops are set to 1.\r\n\r\n Args:\r\n debug_graph_def: The debugger-decorated `tf.GraphDef`, with the\r\n debugger-inserted Copy* and Debug* nodes.\r\n\r\n Returns:\r\n The reconstructed `tf.GraphDef` stripped of the debugger-inserted nodes.\r\n \"\"\"\r\n return DebugGraph(debug_graph_def).non_debug_graph_def\r\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"The step function abstraction represents a single training step.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.eager import backprop\r\nfrom tensorflow.python.training import optimizer as optimizer_lib\r\n\r\n\r\nclass Step(object):\r\n \"\"\"Interface for performing each step of a training algorithm.\"\"\"\r\n\r\n def __init__(self, distribution):\r\n self._distribution = distribution\r\n\r\n @property\r\n def distribution(self):\r\n return self._distribution\r\n\r\n def __call__(self):\r\n \"\"\"Perform one step of this training algorithm.\"\"\"\r\n raise NotImplementedError(\"must be implemented in descendants\")\r\n\r\n # TODO(priyag): Add an method to access initialization and finalize ops.\r\n\r\n\r\nclass StandardInputStep(Step):\r\n \"\"\"Step with a standard implementation of input handling.\r\n\r\n Args:\r\n dataset_fn: a function that returns a tf.data Dataset that produces the\r\n input for the model.\r\n \"\"\"\r\n\r\n def __init__(self, dataset_fn, distribution):\r\n super(StandardInputStep, self).__init__(distribution)\r\n self._distributed_input = distribution.distribute_dataset(dataset_fn)\r\n self._iterator = self._distributed_input.make_one_shot_iterator()\r\n\r\n\r\nclass StandardSingleLossStep(StandardInputStep):\r\n \"\"\"A step function that implements a training step for a feed forward network.\r\n\r\n An instance of this class is intended to be used as a callable:\r\n\r\n ```python\r\n ...\r\n step = step_fn.StandardSingleLossStep(\r\n dataset, loss_fn, optimizer, distribution)\r\n\r\n # Run a single training step on a given DistributionStrategy:\r\n step(distribution)\r\n ...\r\n ```\r\n\r\n Args:\r\n dataset_fn: a function that returns a tf.data Dataset that produces the\r\n input for the model.\r\n loss_fn: a function that takes a context and inputs as arguments. It returns\r\n the loss for those inputs. `context` is an instance of\r\n `values.MultiStepContext` that will be passed when `loss_fn` is run.\r\n `context` can be used to specify the outputs to be returned from\r\n `loss_fn`, among other things.\r\n optimizer: an optimizer that implements an update rule.\r\n distribution: a `DistributionStrategy` object.\r\n \"\"\"\r\n\r\n def __init__(self, dataset_fn, loss_fn, optimizer, distribution,\r\n iterations_per_step=1):\r\n super(StandardSingleLossStep, self).__init__(dataset_fn, distribution)\r\n self._loss_fn = loss_fn\r\n self._optimizer = optimizer\r\n self._is_run_concurrently = False\r\n self._iterations_per_step = iterations_per_step\r\n\r\n def __call__(self):\r\n with self._distribution.scope():\r\n def step_fn(ctx, *inputs):\r\n \"\"\"Function to run one iteration with one input.\"\"\"\r\n gradients_fn = backprop.implicit_grad(self._loss_fn)\r\n gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)\r\n\r\n grads_and_vars = self.distribution.call_for_each_tower(\r\n gradients_fn,\r\n ctx, *inputs,\r\n run_concurrently=self._is_run_concurrently)\r\n # If threads use layers, then we need to run the first step\r\n # sequentially, so that layers.build() is not executed in parallel.\r\n # Otherwise, multiple sets of mirrored variables are going to be\r\n # created.\r\n self._is_run_concurrently = True\r\n return self._optimizer._distributed_apply( # pylint: disable=protected-access\r\n self.distribution, grads_and_vars)\r\n\r\n # TODO(priyag): Return the outputs, context, etc as well.\r\n ctx = self.distribution.run_steps_on_dataset(\r\n step_fn, self._iterator, self._iterations_per_step)\r\n return ctx.run_op\r\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Boosted Trees estimators.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.data.ops import dataset_ops\r\nfrom tensorflow.python.estimator import estimator\r\nfrom tensorflow.python.estimator.canned import boosted_trees as canned_boosted_trees\r\nfrom tensorflow.python.estimator.canned import head as head_lib\r\n\r\n\r\ndef _validate_input_fn_and_repeat_dataset(train_input_fn):\r\n \"\"\"Validates whether the input_fn is valid, and repeat() if tf.Dataset.\"\"\"\r\n def _input_fn():\r\n result_input_fn = train_input_fn()\r\n if isinstance(result_input_fn, dataset_ops.Dataset):\r\n return result_input_fn.repeat()\r\n return result_input_fn\r\n\r\n return _input_fn\r\n\r\n\r\ndef _is_classification_head(head):\r\n \"\"\"Infers if the head is a classification head.\"\"\"\r\n # Check using all classification heads defined in canned/head.py. However, it\r\n # is not a complete list - it does not check for other classification heads\r\n # not defined in the head library.\r\n # pylint: disable=protected-access\r\n return isinstance(head,\r\n (head_lib._BinaryLogisticHeadWithSigmoidCrossEntropyLoss,\r\n head_lib._MultiClassHeadWithSoftmaxCrossEntropyLoss))\r\n # pylint: enable=protected-access\r\n\r\n\r\nclass _BoostedTreesEstimator(canned_boosted_trees._BoostedTreesBase): # pylint: disable=protected-access\r\n \"\"\"An Estimator for Tensorflow Boosted Trees models.\"\"\"\r\n\r\n def __init__(self,\r\n feature_columns,\r\n n_batches_per_layer,\r\n head,\r\n model_dir=None,\r\n weight_column=None,\r\n n_trees=100,\r\n max_depth=6,\r\n learning_rate=0.1,\r\n l1_regularization=0.,\r\n l2_regularization=0.,\r\n tree_complexity=0.,\r\n min_node_weight=0.,\r\n config=None,\r\n center_bias=False,\r\n pruning_mode='none'):\r\n \"\"\"Initializes a `BoostedTreesEstimator` instance.\r\n\r\n Args:\r\n feature_columns: An iterable containing all the feature columns used by\r\n the model. All items in the set should be instances of classes derived\r\n from `FeatureColumn`.\r\n n_batches_per_layer: the number of batches to collect statistics per\r\n layer.\r\n head: the `Head` instance defined for Estimator.\r\n model_dir: Directory to save model parameters, graph and etc. This can\r\n also be used to load checkpoints from the directory into a estimator\r\n to continue training a previously saved model.\r\n weight_column: A string or a `_NumericColumn` created by\r\n `tf.feature_column.numeric_column` defining feature column representing\r\n weights. It is used to downweight or boost examples during training. It\r\n will be multiplied by the loss of the example. If it is a string, it is\r\n used as a key to fetch weight tensor from the `features`. If it is a\r\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\r\n then weight_column.normalizer_fn is applied on it to get weight tensor.\r\n n_trees: number trees to be created.\r\n max_depth: maximum depth of the tree to grow.\r\n learning_rate: shrinkage parameter to be used when a tree added to the\r\n model.\r\n l1_regularization: regularization multiplier applied to the absolute\r\n weights of the tree leafs.\r\n l2_regularization: regularization multiplier applied to the square weights\r\n of the tree leafs.\r\n tree_complexity: regularization factor to penalize trees with more leaves.\r\n min_node_weight: minimum hessian a node must have for a split to be\r\n considered. The value will be compared with sum(leaf_hessian)/\r\n (batch_size * n_batches_per_layer).\r\n config: `RunConfig` object to configure the runtime settings.\r\n center_bias: Whether bias centering needs to occur. Bias centering refers\r\n to the first node in the very first tree returning the prediction that\r\n is aligned with the original labels distribution. For example, for\r\n regression problems, the first node will return the mean of the labels.\r\n For binary classification problems, it will return a logit for a prior\r\n probability of label 1.\r\n pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-\r\n pruning (do not split a node if not enough gain is observed) and post\r\n pruning (build the tree up to a max depth and then prune branches with\r\n negative gain). For pre and post pruning, you MUST provide\r\n tree_complexity >0.\r\n\r\n Raises:\r\n ValueError: when wrong arguments are given or unsupported functionalities\r\n are requested.\r\n \"\"\"\r\n # HParams for the model.\r\n # pylint: disable=protected-access\r\n tree_hparams = canned_boosted_trees._TreeHParams(\r\n n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,\r\n tree_complexity, min_node_weight, center_bias, pruning_mode)\r\n\r\n def _model_fn(features, labels, mode, config):\r\n return canned_boosted_trees._bt_model_fn(\r\n features,\r\n labels,\r\n mode,\r\n head,\r\n feature_columns,\r\n tree_hparams,\r\n n_batches_per_layer,\r\n config=config)\r\n\r\n super(_BoostedTreesEstimator, self).__init__(\r\n model_fn=_model_fn,\r\n model_dir=model_dir,\r\n config=config,\r\n feature_columns=feature_columns,\r\n head=head,\r\n center_bias=center_bias,\r\n is_classification=_is_classification_head(head))\r\n # pylint: enable=protected-access\r\n\r\n\r\ndef boosted_trees_classifier_train_in_memory(\r\n train_input_fn,\r\n feature_columns,\r\n model_dir=None,\r\n n_classes=canned_boosted_trees._HOLD_FOR_MULTI_CLASS_SUPPORT,\r\n weight_column=None,\r\n label_vocabulary=None,\r\n n_trees=100,\r\n max_depth=6,\r\n learning_rate=0.1,\r\n l1_regularization=0.,\r\n l2_regularization=0.,\r\n tree_complexity=0.,\r\n min_node_weight=0.,\r\n config=None,\r\n train_hooks=None,\r\n center_bias=False,\r\n pruning_mode='none'):\r\n \"\"\"Trains a boosted tree classifier with in memory dataset.\r\n\r\n Example:\r\n\r\n ```python\r\n bucketized_feature_1 = bucketized_column(\r\n numeric_column('feature_1'), BUCKET_BOUNDARIES_1)\r\n bucketized_feature_2 = bucketized_column(\r\n numeric_column('feature_2'), BUCKET_BOUNDARIES_2)\r\n\r\n def train_input_fn():\r\n dataset = create-dataset-from-training-data\r\n # This is tf.data.Dataset of a tuple of feature dict and label.\r\n # e.g. Dataset.zip((Dataset.from_tensors({'f1': f1_array, ...}),\r\n # Dataset.from_tensors(label_array)))\r\n # The returned Dataset shouldn't be batched.\r\n # If Dataset repeats, only the first repetition would be used for training.\r\n return dataset\r\n\r\n classifier = boosted_trees_classifier_train_in_memory(\r\n train_input_fn,\r\n feature_columns=[bucketized_feature_1, bucketized_feature_2],\r\n n_trees=100,\r\n ... <some other params>\r\n )\r\n\r\n def input_fn_eval():\r\n ...\r\n return dataset\r\n\r\n metrics = classifier.evaluate(input_fn=input_fn_eval, steps=10)\r\n ```\r\n\r\n Args:\r\n train_input_fn: the input function returns a dataset containing a single\r\n epoch of *unbatched* features and labels.\r\n feature_columns: An iterable containing all the feature columns used by\r\n the model. All items in the set should be instances of classes derived\r\n from `FeatureColumn`.\r\n model_dir: Directory to save model parameters, graph and etc. This can\r\n also be used to load checkpoints from the directory into a estimator\r\n to continue training a previously saved model.\r\n n_classes: number of label classes. Default is binary classification.\r\n Multiclass support is not yet implemented.\r\n weight_column: A string or a `_NumericColumn` created by\r\n `tf.feature_column.numeric_column` defining feature column representing\r\n weights. It is used to downweight or boost examples during training. It\r\n will be multiplied by the loss of the example. If it is a string, it is\r\n used as a key to fetch weight tensor from the `features`. If it is a\r\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\r\n then weight_column.normalizer_fn is applied on it to get weight tensor.\r\n label_vocabulary: A list of strings represents possible label values. If\r\n given, labels must be string type and have any value in\r\n `label_vocabulary`. If it is not given, that means labels are\r\n already encoded as integer or float within [0, 1] for `n_classes=2` and\r\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\r\n Also there will be errors if vocabulary is not provided and labels are\r\n string.\r\n n_trees: number trees to be created.\r\n max_depth: maximum depth of the tree to grow.\r\n learning_rate: shrinkage parameter to be used when a tree added to the\r\n model.\r\n l1_regularization: regularization multiplier applied to the absolute\r\n weights of the tree leafs.\r\n l2_regularization: regularization multiplier applied to the square weights\r\n of the tree leafs.\r\n tree_complexity: regularization factor to penalize trees with more leaves.\r\n min_node_weight: minimum hessian a node must have for a split to be\r\n considered. The value will be compared with sum(leaf_hessian)/\r\n (batch_size * n_batches_per_layer).\r\n config: `RunConfig` object to configure the runtime settings.\r\n train_hooks: a list of Hook instances to be passed to estimator.train()\r\n center_bias: Whether bias centering needs to occur. Bias centering refers\r\n to the first node in the very first tree returning the prediction that\r\n is aligned with the original labels distribution. For example, for\r\n regression problems, the first node will return the mean of the labels.\r\n For binary classification problems, it will return a logit for a prior\r\n probability of label 1.\r\n pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-\r\n pruning (do not split a node if not enough gain is observed) and post\r\n pruning (build the tree up to a max depth and then prune branches with\r\n negative gain). For pre and post pruning, you MUST provide\r\n tree_complexity >0.\r\n\r\n Returns:\r\n a `BoostedTreesClassifier` instance created with the given arguments and\r\n trained with the data loaded up on memory from the input_fn.\r\n\r\n Raises:\r\n ValueError: when wrong arguments are given or unsupported functionalities\r\n are requested.\r\n \"\"\"\r\n # pylint: disable=protected-access\r\n # TODO(nponomareva): Support multi-class cases.\r\n if n_classes == canned_boosted_trees._HOLD_FOR_MULTI_CLASS_SUPPORT:\r\n n_classes = 2\r\n head, closed_form = (\r\n canned_boosted_trees._create_classification_head_and_closed_form(\r\n n_classes, weight_column, label_vocabulary=label_vocabulary))\r\n\r\n # HParams for the model.\r\n tree_hparams = canned_boosted_trees._TreeHParams(\r\n n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,\r\n tree_complexity, min_node_weight, center_bias, pruning_mode)\r\n\r\n def _model_fn(features, labels, mode, config):\r\n return canned_boosted_trees._bt_model_fn(\r\n features,\r\n labels,\r\n mode,\r\n head,\r\n feature_columns,\r\n tree_hparams,\r\n n_batches_per_layer=1,\r\n config=config,\r\n closed_form_grad_and_hess_fn=closed_form,\r\n train_in_memory=True)\r\n\r\n in_memory_classifier = estimator.Estimator(\r\n model_fn=_model_fn, model_dir=model_dir, config=config)\r\n\r\n in_memory_classifier.train(\r\n input_fn=_validate_input_fn_and_repeat_dataset(train_input_fn),\r\n hooks=train_hooks)\r\n\r\n return in_memory_classifier\r\n # pylint: enable=protected-access\r\n\r\n\r\ndef boosted_trees_regressor_train_in_memory(\r\n train_input_fn,\r\n feature_columns,\r\n model_dir=None,\r\n label_dimension=canned_boosted_trees._HOLD_FOR_MULTI_DIM_SUPPORT,\r\n weight_column=None,\r\n n_trees=100,\r\n max_depth=6,\r\n learning_rate=0.1,\r\n l1_regularization=0.,\r\n l2_regularization=0.,\r\n tree_complexity=0.,\r\n min_node_weight=0.,\r\n config=None,\r\n train_hooks=None,\r\n center_bias=False,\r\n pruning_mode='none'):\r\n \"\"\"Trains a boosted tree regressor with in memory dataset.\r\n\r\n Example:\r\n\r\n ```python\r\n bucketized_feature_1 = bucketized_column(\r\n numeric_column('feature_1'), BUCKET_BOUNDARIES_1)\r\n bucketized_feature_2 = bucketized_column(\r\n numeric_column('feature_2'), BUCKET_BOUNDARIES_2)\r\n\r\n def train_input_fn():\r\n dataset = create-dataset-from-training-data\r\n # This is tf.data.Dataset of a tuple of feature dict and label.\r\n # e.g. Dataset.zip((Dataset.from_tensors({'f1': f1_array, ...}),\r\n # Dataset.from_tensors(label_array)))\r\n # The returned Dataset shouldn't be batched.\r\n # If Dataset repeats, only the first repetition would be used for training.\r\n return dataset\r\n\r\n regressor = boosted_trees_regressor_train_in_memory(\r\n train_input_fn,\r\n feature_columns=[bucketized_feature_1, bucketized_feature_2],\r\n n_trees=100,\r\n ... <some other params>\r\n )\r\n\r\n def input_fn_eval():\r\n ...\r\n return dataset\r\n\r\n metrics = regressor.evaluate(input_fn=input_fn_eval, steps=10)\r\n ```\r\n\r\n Args:\r\n train_input_fn: the input function returns a dataset containing a single\r\n epoch of *unbatched* features and labels.\r\n feature_columns: An iterable containing all the feature columns used by\r\n the model. All items in the set should be instances of classes derived\r\n from `FeatureColumn`.\r\n model_dir: Directory to save model parameters, graph and etc. This can\r\n also be used to load checkpoints from the directory into a estimator\r\n to continue training a previously saved model.\r\n label_dimension: Number of regression targets per example.\r\n Multi-dimensional support is not yet implemented.\r\n weight_column: A string or a `_NumericColumn` created by\r\n `tf.feature_column.numeric_column` defining feature column representing\r\n weights. It is used to downweight or boost examples during training. It\r\n will be multiplied by the loss of the example. If it is a string, it is\r\n used as a key to fetch weight tensor from the `features`. If it is a\r\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\r\n then weight_column.normalizer_fn is applied on it to get weight tensor.\r\n n_trees: number trees to be created.\r\n max_depth: maximum depth of the tree to grow.\r\n learning_rate: shrinkage parameter to be used when a tree added to the\r\n model.\r\n l1_regularization: regularization multiplier applied to the absolute\r\n weights of the tree leafs.\r\n l2_regularization: regularization multiplier applied to the square weights\r\n of the tree leafs.\r\n tree_complexity: regularization factor to penalize trees with more leaves.\r\n min_node_weight: minimum hessian a node must have for a split to be\r\n considered. The value will be compared with sum(leaf_hessian)/\r\n (batch_size * n_batches_per_layer).\r\n config: `RunConfig` object to configure the runtime settings.\r\n train_hooks: a list of Hook instances to be passed to estimator.train().\r\n center_bias: Whether bias centering needs to occur. Bias centering refers\r\n to the first node in the very first tree returning the prediction that\r\n is aligned with the original labels distribution. For example, for\r\n regression problems, the first node will return the mean of the labels.\r\n For binary classification problems, it will return a logit for a prior\r\n probability of label 1.\r\n pruning_mode: one of 'none', 'pre', 'post' to indicate no pruning, pre-\r\n pruning (do not split a node if not enough gain is observed) and post\r\n pruning (build the tree up to a max depth and then prune branches with\r\n negative gain). For pre and post pruning, you MUST provide\r\n tree_complexity >0.\r\n\r\n Returns:\r\n a `BoostedTreesClassifier` instance created with the given arguments and\r\n trained with the data loaded up on memory from the input_fn.\r\n\r\n Raises:\r\n ValueError: when wrong arguments are given or unsupported functionalities\r\n are requested.\r\n \"\"\"\r\n # pylint: disable=protected-access\r\n # TODO(nponomareva): Extend it to multi-dimension cases.\r\n if label_dimension == canned_boosted_trees._HOLD_FOR_MULTI_DIM_SUPPORT:\r\n label_dimension = 1\r\n head = canned_boosted_trees._create_regression_head(label_dimension,\r\n weight_column)\r\n\r\n # HParams for the model.\r\n tree_hparams = canned_boosted_trees._TreeHParams(\r\n n_trees, max_depth, learning_rate, l1_regularization, l2_regularization,\r\n tree_complexity, min_node_weight, center_bias, pruning_mode)\r\n\r\n def _model_fn(features, labels, mode, config):\r\n return canned_boosted_trees._bt_model_fn(\r\n features,\r\n labels,\r\n mode,\r\n head,\r\n feature_columns,\r\n tree_hparams,\r\n n_batches_per_layer=1,\r\n config=config,\r\n train_in_memory=True)\r\n\r\n in_memory_regressor = estimator.Estimator(\r\n model_fn=_model_fn, model_dir=model_dir, config=config)\r\n\r\n in_memory_regressor.train(\r\n input_fn=_validate_input_fn_and_repeat_dataset(train_input_fn),\r\n hooks=train_hooks)\r\n\r\n return in_memory_regressor\r\n # pylint: enable=protected-access\r\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Handles function calls, by generating compiled function names and calls.\r\n\r\nNote: this transformer does not rename the top level object being converted;\r\nthat is the caller's responsibility.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom collections import namedtuple\r\n\r\nimport gast\r\n\r\nfrom tensorflow.python.autograph.core import converter\r\nfrom tensorflow.python.autograph.pyct import anno\r\nfrom tensorflow.python.autograph.pyct import ast_util\r\nfrom tensorflow.python.autograph.pyct import inspect_utils\r\nfrom tensorflow.python.autograph.pyct import parser\r\nfrom tensorflow.python.autograph.pyct import templates\r\nfrom tensorflow.python.util import tf_inspect\r\n\r\n\r\nclass FunctionInfo(namedtuple('FunctionInfo', ('dtype',))):\r\n pass\r\n\r\n\r\n# TODO(mdan): Move this to config.py.\r\nKNOWN_NUMPY_FUNCTIONS = {\r\n ('numpy', 'random', 'binomial'): FunctionInfo(dtype='tf.int64'),\r\n}\r\n\r\n\r\n# TODO(mdan): Get rid of these interfaces. Can now depend directly on Namer.\r\n\r\n\r\nclass FunctionNamer(object):\r\n \"\"\"Describes the interface for CallTreeTransformer's namer.\"\"\"\r\n\r\n def compiled_function_name(self,\r\n original_fqn,\r\n live_entity=None,\r\n owner_type=None):\r\n \"\"\"Generate the name corresponding to the compiled version of a function.\r\n\r\n Args:\r\n original_fqn: string or tuple(string)\r\n live_entity: Callable, the actual target function, if known.\r\n owner_type: Optional object. If present, it indicates that the function is\r\n a member of the given type.\r\n Returns:\r\n string, bool\r\n \"\"\"\r\n raise NotImplementedError()\r\n\r\n def compiled_class_name(self, original_fqn, live_entity=None):\r\n \"\"\"Generate the name corresponding to the compiled version of a class.\r\n\r\n Args:\r\n original_fqn: string or tuple(string)\r\n live_entity: The actual target class, if known.\r\n Returns:\r\n string\r\n \"\"\"\r\n raise NotImplementedError()\r\n\r\n\r\n# TODO(mdan): Rename to CallsTransformer.\r\n\r\n\r\nclass CallTreeTransformer(converter.Base):\r\n \"\"\"Transforms the call tree by renaming transformed symbols.\"\"\"\r\n\r\n def _resolve_name(self, node):\r\n \"\"\"Used to resolve decorator info.\"\"\"\r\n if isinstance(node, gast.Call):\r\n return self._resolve_name(node.func)\r\n if isinstance(node, gast.Name):\r\n return self.ctx.namespace.get(node.id)\r\n if isinstance(node, gast.Attribute):\r\n parent = self._resolve_name(node.value)\r\n if parent is not None:\r\n return getattr(parent, node.attr)\r\n return None\r\n raise ValueError(node)\r\n\r\n def _try_resolve_target(self, node):\r\n \"\"\"Works for methods of objects of known type.\"\"\"\r\n if anno.hasanno(node, 'live_val'):\r\n return anno.getanno(node, 'live_val')\r\n if isinstance(node, gast.Attribute) and anno.hasanno(node, 'type'):\r\n owner_type = anno.getanno(node, 'type')\r\n if hasattr(owner_type, node.attr):\r\n return getattr(owner_type, node.attr)\r\n else:\r\n raise ValueError('Type \"%s\" has not attribute \"%s\". Is it dynamic?' %\r\n (owner_type, node.attr))\r\n return None\r\n\r\n def _function_is_compilable(self, target_entity):\r\n \"\"\"Determines whether an entity can be compiled at all.\"\"\"\r\n # TODO(mdan): This is just a placeholder. Implement.\r\n return not inspect_utils.isbuiltin(target_entity)\r\n\r\n def _should_compile(self, node, fqn):\r\n \"\"\"Determines whether an entity should be compiled in the context.\"\"\"\r\n # TODO(mdan): Needs cleanup. We should remove the use of fqn altogether.\r\n module_name = fqn[0]\r\n for mod in self.ctx.program.uncompiled_modules:\r\n if module_name.startswith(mod[0] + '.'):\r\n return False\r\n\r\n for i in range(1, len(fqn)):\r\n if fqn[:i] in self.ctx.program.uncompiled_modules:\r\n return False\r\n\r\n # Check for local decorations\r\n if anno.hasanno(node, 'graph_ready'):\r\n return False\r\n\r\n # The decorators themselves are not to be converted.\r\n # If present, the decorators should appear as static functions.\r\n target_entity = self._try_resolve_target(node.func)\r\n if target_entity is not None:\r\n # This attribute is set by the decorator itself.\r\n # TODO(mdan): This may not play nicely with other wrapping decorators.\r\n if hasattr(target_entity, '__pyct_is_compile_decorator'):\r\n return False\r\n\r\n if target_entity in self.ctx.program.autograph_decorators:\r\n return False\r\n\r\n # Inspect the target function decorators. If any include a @convert\r\n # or @graph_ready annotation, then they must be called as they are.\r\n # TODO(mdan): This may be quite heavy.\r\n # To parse and re-analyze each function for every call site could be quite\r\n # wasteful. Maybe we could cache the parsed AST?\r\n try:\r\n target_node, _ = parser.parse_entity(target_entity)\r\n target_node = target_node.body[0]\r\n except TypeError:\r\n # Functions whose source we cannot access are compilable (e.g. wrapped\r\n # to py_func).\r\n return True\r\n\r\n for dec in target_node.decorator_list:\r\n decorator_fn = self._resolve_name(dec)\r\n if (decorator_fn is not None and\r\n decorator_fn in self.ctx.program.autograph_decorators):\r\n return False\r\n\r\n return True\r\n\r\n def _rename_compilable_function(self, node):\r\n assert anno.hasanno(node.func, 'live_val')\r\n assert anno.hasanno(node.func, 'fqn')\r\n target_entity = anno.getanno(node.func, 'live_val')\r\n target_fqn = anno.getanno(node.func, 'fqn')\r\n\r\n if not self._should_compile(node, target_fqn):\r\n return node\r\n\r\n if anno.hasanno(node, 'is_constructor'):\r\n new_name = self.ctx.namer.compiled_class_name(\r\n target_fqn, live_entity=target_entity)\r\n do_rename = True\r\n else:\r\n if anno.hasanno(node.func, 'parent_type'):\r\n owner_type = anno.getanno(node.func, 'parent_type')\r\n else:\r\n # Fallback - not reliable.\r\n owner_type = inspect_utils.getmethodclass(target_entity)\r\n new_name, do_rename = self.ctx.namer.compiled_function_name(\r\n target_fqn, live_entity=target_entity, owner_type=owner_type)\r\n\r\n if do_rename:\r\n if target_entity is not None:\r\n if tf_inspect.ismethod(target_entity):\r\n # The renaming process will transform it into a regular function.\r\n # TODO(mdan): Is this complete? How does it work with nested members?\r\n node.args = [node.func.value] + node.args\r\n node.func = templates.replace('func_name', func_name=new_name)[0]\r\n return node\r\n\r\n def _wrap_to_py_func_no_return(self, node):\r\n # TODO(mdan): Properly handle varargs, etc.\r\n template = \"\"\"\r\n ag__.utils.wrap_py_func(func, None, (args,), kwargs, True)\r\n \"\"\"\r\n return templates.replace(\r\n template,\r\n func=node.func,\r\n args=node.args,\r\n kwargs=ast_util.keywords_to_dict(node.keywords))\r\n\r\n def _wrap_to_py_func_single_return(self, node, dtype):\r\n # TODO(mdan): Properly handle varargs, etc.\r\n template = \"\"\"\r\n ag__.utils.wrap_py_func(func, dtype, (args,), kwargs, False)\r\n \"\"\"\r\n return templates.replace_as_expression(\r\n template,\r\n func=node.func,\r\n dtype=parser.parse_expression(dtype),\r\n args=node.args,\r\n kwargs=ast_util.keywords_to_dict(node.keywords))\r\n\r\n def _insert_dynamic_conversion(self, node):\r\n \"\"\"Inlines a dynamic conversion for a dynamic function.\"\"\"\r\n # TODO(mdan): Pass information on the statically compiled functions.\r\n # Having access to the statically compiled functions can help avoid\r\n # unnecessary compilation.\r\n # For example, this would lead to function `a` being compiled twice:\r\n #\r\n # def a():\r\n # v = b\r\n # b()\r\n # def b():\r\n # a()\r\n #\r\n # This is really a problem with recursive calls, which currently can\r\n # only be gated by a static condition, and should be rare.\r\n # TODO(mdan): It probably makes sense to use dynamic conversion every time.\r\n # Before we could convert all the time though, we'd need a reasonable\r\n # caching mechanism.\r\n template = \"\"\"\r\n ag__.converted_call(\r\n func,\r\n ag__.ConversionOptions.new(recursive=recursive_val),\r\n args)\r\n \"\"\"\r\n call_expr = templates.replace(\r\n template,\r\n func=node.func,\r\n recursive_val=parser.parse_expression(str(self.ctx.program.recursive)),\r\n args=node.args)\r\n new_call = call_expr[0].value\r\n # TODO(mdan): Improve the template mechanism to better support this.\r\n new_call.keywords = node.keywords\r\n return new_call\r\n\r\n def visit_Expr(self, node):\r\n if isinstance(node.value, gast.Call):\r\n if anno.hasanno(node.value.func, 'live_val'):\r\n target_entity = anno.getanno(node.value.func, 'live_val')\r\n if not self._function_is_compilable(target_entity):\r\n if anno.hasanno(node.value.func, 'fqn'):\r\n target_fqn = anno.getanno(node.value.func, 'fqn')\r\n if not self._should_compile(node.value, target_fqn):\r\n return node\r\n node = self._wrap_to_py_func_no_return(node.value)\r\n return node\r\n # Only the case of py_func with no return value is special.\r\n # Everything else is processed by visit_Call.\r\n self.visit(node.value)\r\n else:\r\n self.generic_visit(node)\r\n return node\r\n\r\n def visit_Call(self, node):\r\n # If the function call is wrapped by one of the marker decorators,\r\n # consider it graph ready.\r\n if anno.hasanno(node.func, 'live_val'):\r\n target_entity = anno.getanno(node.func, 'live_val')\r\n if target_entity in self.ctx.program.autograph_decorators:\r\n if len(node.args) < 1:\r\n raise ValueError(\r\n 'Found call to decorator function \"%s\", but it had no arguments. '\r\n 'A decorator needs at least one positional argument.' %\r\n target_entity)\r\n anno.setanno(node.args[0], 'graph_ready', True)\r\n\r\n self.generic_visit(node)\r\n if anno.hasanno(node.func, 'live_val'):\r\n target_entity = anno.getanno(node.func, 'live_val')\r\n if anno.hasanno(node.func, 'fqn'):\r\n target_fqn = anno.getanno(node.func, 'fqn')\r\n else:\r\n target_fqn = None\r\n if self._function_is_compilable(target_entity):\r\n node = self._rename_compilable_function(node)\r\n elif target_fqn and target_fqn in KNOWN_NUMPY_FUNCTIONS:\r\n # TODO(mdan): Should we replace these with equivalent TF ops instead?\r\n node = self._wrap_to_py_func_single_return(\r\n node, KNOWN_NUMPY_FUNCTIONS[target_fqn].dtype)\r\n else:\r\n raise NotImplementedError(\r\n 'py_func with return values (unknown function)')\r\n else:\r\n if anno.hasanno(node.func, anno.Basic.QN):\r\n # Special-case a few builtins that otherwise go undetected. This\r\n # normally doesn't pose a problem, but the dict built-in doesn't\r\n # work with inspect.getargspec which is required for dynamic functions.\r\n # Note: expecting this is resilient to aliasing (e.g.\r\n # dict = an_evil_dict), because in those cases the regular mechanisms\r\n # process a simple user function.\r\n qn = anno.getanno(node.func, anno.Basic.QN)\r\n # Add items to this list as needed.\r\n if str(qn) in ('dict',):\r\n return node\r\n\r\n if ast_util.matches(node, 'super(_)'):\r\n # super() calls are preserved. The class conversion mechanism will\r\n # ensure that they return the correct value.\r\n return node\r\n\r\n if self.ctx.program.recursive:\r\n node = self._insert_dynamic_conversion(node)\r\n return node\r\n\r\n\r\ndef transform(node, ctx):\r\n \"\"\"Transform function call to the compiled counterparts.\r\n\r\n Args:\r\n node: AST\r\n ctx: EntityContext\r\n Returns:\r\n A tuple (node, new_names):\r\n node: The transformed AST\r\n new_names: set(string), containing any newly-generated names\r\n \"\"\"\r\n return CallTreeTransformer(ctx).visit(node)\r\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"## Functions for working with arbitrarily nested sequences of elements.\r\n\r\nThis module can perform operations on nested structures. A nested structure is a\r\nPython sequence, tuple (including `namedtuple`), or dict that can contain\r\nfurther sequences, tuples, and dicts.\r\n\r\nattr.s decorated classes (http://www.attrs.org) are also supported, in the\r\nsame way as `namedtuple`.\r\n\r\nThe utilities here assume (and do not check) that the nested structures form a\r\n'tree', i.e., no references in the structure of the input of these functions\r\nshould be recursive.\r\n\r\nExample structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),\r\n (np.array([3, 4]), tf.constant([3, 4])))`\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections as _collections\r\n\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\n\r\n\r\ndef _get_attrs_values(obj):\r\n \"\"\"Returns the list of values from an attrs instance.\"\"\"\r\n attrs = getattr(obj.__class__, \"__attrs_attrs__\")\r\n return [getattr(obj, a.name) for a in attrs]\r\n\r\n\r\ndef _sorted(dict_):\r\n \"\"\"Returns a sorted list of the dict keys, with error if keys not sortable.\"\"\"\r\n try:\r\n return sorted(_six.iterkeys(dict_))\r\n except TypeError:\r\n raise TypeError(\"nest only supports dicts with sortable keys.\")\r\n\r\n\r\ndef _is_namedtuple(instance, strict=False):\r\n \"\"\"Returns True iff `instance` is a `namedtuple`.\r\n\r\n Args:\r\n instance: An instance of a Python object.\r\n strict: If True, `instance` is considered to be a `namedtuple` only if\r\n it is a \"plain\" namedtuple. For instance, a class inheriting\r\n from a `namedtuple` will be considered to be a `namedtuple`\r\n iff `strict=False`.\r\n\r\n Returns:\r\n True if `instance` is a `namedtuple`.\r\n \"\"\"\r\n return _pywrap_tensorflow.IsNamedtuple(instance, strict)\r\n\r\n\r\n# See the swig file (util.i) for documentation.\r\n_is_mapping = _pywrap_tensorflow.IsMapping\r\n_is_attrs = _pywrap_tensorflow.IsAttrs\r\n\r\n\r\ndef _sequence_like(instance, args):\r\n \"\"\"Converts the sequence `args` to the same type as `instance`.\r\n\r\n Args:\r\n instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, or\r\n `collections.OrderedDict`.\r\n args: elements to be converted to the `instance` type.\r\n\r\n Returns:\r\n `args` with the type of `instance`.\r\n \"\"\"\r\n if _is_mapping(instance):\r\n # Pack dictionaries in a deterministic order by sorting the keys.\r\n # Notice this means that we ignore the original order of `OrderedDict`\r\n # instances. This is intentional, to avoid potential bugs caused by mixing\r\n # ordered and plain dicts (e.g., flattening a dict but using a\r\n # corresponding `OrderedDict` to pack it back).\r\n result = dict(zip(_sorted(instance), args))\r\n return type(instance)((key, result[key]) for key in _six.iterkeys(instance))\r\n elif _is_namedtuple(instance) or _is_attrs(instance):\r\n return type(instance)(*args)\r\n else:\r\n # Not a namedtuple\r\n return type(instance)(args)\r\n\r\n\r\ndef _yield_value(iterable):\r\n \"\"\"Yields the next value from the given iterable.\"\"\"\r\n if _is_mapping(iterable):\r\n # Iterate through dictionaries in a deterministic order by sorting the\r\n # keys. Notice this means that we ignore the original order of `OrderedDict`\r\n # instances. This is intentional, to avoid potential bugs caused by mixing\r\n # ordered and plain dicts (e.g., flattening a dict but using a\r\n # corresponding `OrderedDict` to pack it back).\r\n for key in _sorted(iterable):\r\n yield iterable[key]\r\n elif _is_attrs(iterable):\r\n for value in _get_attrs_values(iterable):\r\n yield value\r\n else:\r\n for value in iterable:\r\n yield value\r\n\r\n\r\n# See the swig file (util.i) for documentation.\r\nis_sequence = _pywrap_tensorflow.IsSequence\r\n\r\n\r\n# See the swig file (util.i) for documentation.\r\nflatten = _pywrap_tensorflow.Flatten\r\n\r\n\r\n# See the swig file (util.i) for documentation.\r\n_same_namedtuples = _pywrap_tensorflow.SameNamedtuples\r\n\r\n\r\nclass _DotString(object):\r\n\r\n def __str__(self):\r\n return \".\"\r\n\r\n def __repr__(self):\r\n return \".\"\r\n\r\n\r\n_DOT = _DotString()\r\n\r\n\r\ndef assert_same_structure(nest1, nest2, check_types=True):\r\n \"\"\"Asserts that two structures are nested in the same way.\r\n\r\n Note that namedtuples with identical name and fields are always considered\r\n to have the same shallow structure (even with `check_types=True`).\r\n For intance, this code will print `True`:\r\n\r\n ```python\r\n def nt(a, b):\r\n return collections.namedtuple('foo', 'a b')(a, b)\r\n print(assert_same_structure(nt(0, 1), nt(2, 3)))\r\n ```\r\n\r\n Args:\r\n nest1: an arbitrarily nested structure.\r\n nest2: an arbitrarily nested structure.\r\n check_types: if `True` (default) types of sequences are checked as well,\r\n including the keys of dictionaries. If set to `False`, for example a\r\n list and a tuple of objects will look the same if they have the same\r\n size. Note that namedtuples with identical name and fields are always\r\n considered to have the same shallow structure. Two types will also be\r\n considered the same if they are both list subtypes (which allows \"list\"\r\n and \"_ListWrapper\" from checkpointable dependency tracking to compare\r\n equal).\r\n\r\n Raises:\r\n ValueError: If the two structures do not have the same number of elements or\r\n if the two structures are not nested in the same way.\r\n TypeError: If the two structures differ in the type of sequence in any of\r\n their substructures. Only possible if `check_types` is `True`.\r\n \"\"\"\r\n try:\r\n _pywrap_tensorflow.AssertSameStructure(nest1, nest2, check_types)\r\n except (ValueError, TypeError) as e:\r\n str1 = str(map_structure(lambda _: _DOT, nest1))\r\n str2 = str(map_structure(lambda _: _DOT, nest2))\r\n raise type(e)(\"%s\\n\"\r\n \"Entire first structure:\\n%s\\n\"\r\n \"Entire second structure:\\n%s\"\r\n % (str(e), str1, str2))\r\n\r\n\r\ndef flatten_dict_items(dictionary):\r\n \"\"\"Returns a dictionary with flattened keys and values.\r\n\r\n This function flattens the keys and values of a dictionary, which can be\r\n arbitrarily nested structures, and returns the flattened version of such\r\n structures:\r\n\r\n ```python\r\n example_dictionary = {(4, 5, (6, 8)): (\"a\", \"b\", (\"c\", \"d\"))}\r\n result = {4: \"a\", 5: \"b\", 6: \"c\", 8: \"d\"}\r\n flatten_dict_items(example_dictionary) == result\r\n ```\r\n\r\n The input dictionary must satisfy two properties:\r\n\r\n 1. Its keys and values should have the same exact nested structure.\r\n 2. The set of all flattened keys of the dictionary must not contain repeated\r\n keys.\r\n\r\n Args:\r\n dictionary: the dictionary to zip\r\n\r\n Returns:\r\n The zipped dictionary.\r\n\r\n Raises:\r\n TypeError: If the input is not a dictionary.\r\n ValueError: If any key and value have not the same structure, or if keys are\r\n not unique.\r\n \"\"\"\r\n if not isinstance(dictionary, (dict, _collections.Mapping)):\r\n raise TypeError(\"input must be a dictionary\")\r\n flat_dictionary = {}\r\n for i, v in _six.iteritems(dictionary):\r\n if not is_sequence(i):\r\n if i in flat_dictionary:\r\n raise ValueError(\r\n \"Could not flatten dictionary: key %s is not unique.\" % i)\r\n flat_dictionary[i] = v\r\n else:\r\n flat_i = flatten(i)\r\n flat_v = flatten(v)\r\n if len(flat_i) != len(flat_v):\r\n raise ValueError(\r\n \"Could not flatten dictionary. Key had %d elements, but value had \"\r\n \"%d elements. Key: %s, value: %s.\"\r\n % (len(flat_i), len(flat_v), flat_i, flat_v))\r\n for new_i, new_v in zip(flat_i, flat_v):\r\n if new_i in flat_dictionary:\r\n raise ValueError(\r\n \"Could not flatten dictionary: key %s is not unique.\"\r\n % (new_i))\r\n flat_dictionary[new_i] = new_v\r\n return flat_dictionary\r\n\r\n\r\ndef _packed_nest_with_indices(structure, flat, index):\r\n \"\"\"Helper function for pack_sequence_as.\r\n\r\n Args:\r\n structure: Substructure (list / tuple / dict) to mimic.\r\n flat: Flattened values to output substructure for.\r\n index: Index at which to start reading from flat.\r\n\r\n Returns:\r\n The tuple (new_index, child), where:\r\n * new_index - the updated index into `flat` having processed `structure`.\r\n * packed - the subset of `flat` corresponding to `structure`,\r\n having started at `index`, and packed into the same nested\r\n format.\r\n\r\n Raises:\r\n ValueError: if `structure` contains more elements than `flat`\r\n (assuming indexing starts from `index`).\r\n \"\"\"\r\n packed = []\r\n for s in _yield_value(structure):\r\n if is_sequence(s):\r\n new_index, child = _packed_nest_with_indices(s, flat, index)\r\n packed.append(_sequence_like(s, child))\r\n index = new_index\r\n else:\r\n packed.append(flat[index])\r\n index += 1\r\n return index, packed\r\n\r\n\r\ndef pack_sequence_as(structure, flat_sequence):\r\n \"\"\"Returns a given flattened sequence packed into a given structure.\r\n\r\n If `structure` is a scalar, `flat_sequence` must be a single-element list;\r\n in this case the return value is `flat_sequence[0]`.\r\n\r\n If `structure` is or contains a dict instance, the keys will be sorted to\r\n pack the flat sequence in deterministic order. This is true also for\r\n `OrderedDict` instances: their sequence order is ignored, the sorting order of\r\n keys is used instead. The same convention is followed in `flatten`.\r\n This correctly repacks dicts and `OrderedDict`s after they have been\r\n flattened, and also allows flattening an `OrderedDict` and then repacking it\r\n back using a corresponding plain dict, or vice-versa.\r\n Dictionaries with non-sortable keys cannot be flattened.\r\n\r\n Args:\r\n structure: Nested structure, whose structure is given by nested lists,\r\n tuples, and dicts. Note: numpy arrays and strings are considered\r\n scalars.\r\n flat_sequence: flat sequence to pack.\r\n\r\n Returns:\r\n packed: `flat_sequence` converted to have the same recursive structure as\r\n `structure`.\r\n\r\n Raises:\r\n ValueError: If `flat_sequence` and `structure` have different\r\n element counts.\r\n TypeError: `structure` is or contains a dict with non-sortable keys.\r\n \"\"\"\r\n if not is_sequence(flat_sequence):\r\n raise TypeError(\"flat_sequence must be a sequence\")\r\n\r\n if not is_sequence(structure):\r\n if len(flat_sequence) != 1:\r\n raise ValueError(\"Structure is a scalar but len(flat_sequence) == %d > 1\"\r\n % len(flat_sequence))\r\n return flat_sequence[0]\r\n\r\n try:\r\n final_index, packed = _packed_nest_with_indices(structure, flat_sequence, 0)\r\n if final_index < len(flat_sequence):\r\n raise IndexError\r\n except IndexError:\r\n flat_structure = flatten(structure)\r\n if len(flat_structure) != len(flat_sequence):\r\n raise ValueError(\r\n \"Could not pack sequence. Structure had %d elements, but \"\r\n \"flat_sequence had %d elements. Structure: %s, flat_sequence: %s.\" %\r\n (len(flat_structure), len(flat_sequence), structure, flat_sequence))\r\n return _sequence_like(structure, packed)\r\n\r\n\r\ndef map_structure(func, *structure, **check_types_dict):\r\n \"\"\"Applies `func` to each entry in `structure` and returns a new structure.\r\n\r\n Applies `func(x[0], x[1], ...)` where x[i] is an entry in\r\n `structure[i]`. All structures in `structure` must have the same arity,\r\n and the return value will contain the results in the same structure.\r\n\r\n Args:\r\n func: A callable that accepts as many arguments as there are structures.\r\n *structure: scalar, or tuple or list of constructed scalars and/or other\r\n tuples/lists, or scalars. Note: numpy arrays are considered as scalars.\r\n **check_types_dict: only valid keyword argument is `check_types`. If set to\r\n `True` (default) the types of iterables within the structures have to be\r\n same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`\r\n exception). To allow this set this argument to `False`.\r\n Note that namedtuples with identical name and fields are always\r\n considered to have the same shallow structure.\r\n\r\n Returns:\r\n A new structure with the same arity as `structure`, whose values correspond\r\n to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding\r\n location in `structure[i]`. If there are different sequence types and\r\n `check_types` is `False` the sequence types of the first structure will be\r\n used.\r\n\r\n Raises:\r\n TypeError: If `func` is not callable or if the structures do not match\r\n each other by depth tree.\r\n ValueError: If no structure is provided or if the structures do not match\r\n each other by type.\r\n ValueError: If wrong keyword arguments are provided.\r\n \"\"\"\r\n if not callable(func):\r\n raise TypeError(\"func must be callable, got: %s\" % func)\r\n\r\n if not structure:\r\n raise ValueError(\"Must provide at least one structure\")\r\n\r\n if check_types_dict:\r\n if \"check_types\" not in check_types_dict or len(check_types_dict) > 1:\r\n raise ValueError(\"Only valid keyword argument is check_types\")\r\n check_types = check_types_dict[\"check_types\"]\r\n else:\r\n check_types = True\r\n\r\n for other in structure[1:]:\r\n assert_same_structure(structure[0], other, check_types=check_types)\r\n\r\n flat_structure = [flatten(s) for s in structure]\r\n entries = zip(*flat_structure)\r\n\r\n return pack_sequence_as(\r\n structure[0], [func(*x) for x in entries])\r\n\r\n\r\ndef map_structure_with_paths(func, *structure, **kwargs):\r\n \"\"\"Applies `func` to each entry in `structure` and returns a new structure.\r\n\r\n Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in\r\n `structure[i]` and `path` is the common path to x[i] in the structures. All\r\n structures in `structure` must have the same arity, and the return value will\r\n contain the results in the same structure. Special kwarg `check_types`\r\n determines whether the types of iterables within the structure must be the\r\n same-- see **kwargs definition below.\r\n\r\n Args:\r\n func: A callable with the signature func(path, *values, **kwargs) that is\r\n evaluated on the leaves of the structure.\r\n *structure: A variable number of compatible structures to process.\r\n **kwargs: Optional kwargs to be passed through to func. Special kwarg\r\n `check_types` is not passed to func, but instead determines whether the\r\n types of iterables within the structures have to be same (e.g.,\r\n `map_structure(func, [1], (1,))` raises a `TypeError` exception). By\r\n default, the types must match. To allow iteration over structures of\r\n different types (but common arity), set this kwarg to `False`.\r\n\r\n Returns:\r\n A structure of the same form as the input structures whose leaves are the\r\n result of evaluating func on corresponding leaves of the input structures.\r\n\r\n Raises:\r\n TypeError: If `func` is not callable or if the structures do not match\r\n each other by depth tree.\r\n TypeError: If `check_types` is not `False` and the two structures differ in\r\n the type of sequence in any of their substructures.\r\n ValueError: If no structures are provided.\r\n \"\"\"\r\n if not callable(func):\r\n raise TypeError(\"func must be callable, got: %s\" % func)\r\n if not structure:\r\n raise ValueError(\"Must provide at least one structure\")\r\n\r\n check_types = kwargs.pop(\"check_types\", True)\r\n for other in structure[1:]:\r\n assert_same_structure(structure[0], other, check_types=check_types)\r\n\r\n # First set paths_and_values to:\r\n # [[(p11, v11), ... (p1n, v1n)], ... [(pm1, vm1), ... (pmn, vmn)]]\r\n paths_and_values = [flatten_with_joined_string_paths(s) for s in structure]\r\n\r\n # Now zip(*paths_and_values) would be:\r\n # [((p11, v11), ... (pm1, vm1)), ... ((p1n, v1n), ... (pmn, vmn))]\r\n # so grouped_by_path is set to:\r\n # [[(p11, ... pm1), (v11, ... vm1)], ... [(p1n, ... pmn), (v1n, ... vmn)]]\r\n # Note that p1i, ... pmi must all be equal since the structures are the same.\r\n grouped_by_path = [zip(*p_v) for p_v in zip(*paths_and_values)]\r\n\r\n return pack_sequence_as(structure[0], [\r\n func(paths[0], *values, **kwargs) for paths, values in grouped_by_path])\r\n\r\n\r\ndef _yield_flat_up_to(shallow_tree, input_tree):\r\n \"\"\"Yields elements `input_tree` partially flattened up to `shallow_tree`.\"\"\"\r\n if is_sequence(shallow_tree):\r\n for shallow_branch, input_branch in zip(_yield_value(shallow_tree),\r\n _yield_value(input_tree)):\r\n for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):\r\n yield input_leaf\r\n else:\r\n yield input_tree\r\n\r\n\r\ndef assert_shallow_structure(shallow_tree, input_tree, check_types=True):\r\n \"\"\"Asserts that `shallow_tree` is a shallow structure of `input_tree`.\r\n\r\n That is, this function tests if the `input_tree` structure can be created from\r\n the `shallow_tree` structure by replacing its leaf nodes with deeper\r\n tree structures.\r\n\r\n Examples:\r\n\r\n The following code will raise an exception:\r\n ```python\r\n shallow_tree = [\"a\", \"b\"]\r\n input_tree = [\"c\", [\"d\", \"e\"], \"f\"]\r\n assert_shallow_structure(shallow_tree, input_tree)\r\n ```\r\n\r\n The following code will not raise an exception:\r\n ```python\r\n shallow_tree = [\"a\", \"b\"]\r\n input_tree = [\"c\", [\"d\", \"e\"]]\r\n assert_shallow_structure(shallow_tree, input_tree)\r\n ```\r\n\r\n Args:\r\n shallow_tree: an arbitrarily nested structure.\r\n input_tree: an arbitrarily nested structure.\r\n check_types: if `True` (default) the sequence types of `shallow_tree` and\r\n `input_tree` have to be the same. Note that even with check_types==True,\r\n this function will consider two different namedtuple classes with the same\r\n name and _fields attribute to be the same class.\r\n\r\n Raises:\r\n TypeError: If `shallow_tree` is a sequence but `input_tree` is not.\r\n TypeError: If the sequence types of `shallow_tree` are different from\r\n `input_tree`. Only raised if `check_types` is `True`.\r\n ValueError: If the sequence lengths of `shallow_tree` are different from\r\n `input_tree`.\r\n \"\"\"\r\n if is_sequence(shallow_tree):\r\n if not is_sequence(input_tree):\r\n raise TypeError(\r\n \"If shallow structure is a sequence, input must also be a sequence. \"\r\n \"Input has type: %s.\" % type(input_tree))\r\n\r\n if check_types and not isinstance(input_tree, type(shallow_tree)):\r\n # Duck-typing means that nest should be fine with two different\r\n # namedtuples with identical name and fields.\r\n shallow_is_namedtuple = _is_namedtuple(shallow_tree, False)\r\n input_is_namedtuple = _is_namedtuple(input_tree, False)\r\n if shallow_is_namedtuple and input_is_namedtuple:\r\n if not _same_namedtuples(shallow_tree, input_tree):\r\n raise TypeError(\r\n \"The two namedtuples don't have the same sequence type. Input \"\r\n \"structure has type %s, while shallow structure has type %s.\"\r\n % (type(input_tree), type(shallow_tree)))\r\n else:\r\n raise TypeError(\r\n \"The two structures don't have the same sequence type. Input \"\r\n \"structure has type %s, while shallow structure has type %s.\"\r\n % (type(input_tree), type(shallow_tree)))\r\n\r\n if len(input_tree) != len(shallow_tree):\r\n raise ValueError(\r\n \"The two structures don't have the same sequence length. Input \"\r\n \"structure has length %s, while shallow structure has length %s.\"\r\n % (len(input_tree), len(shallow_tree)))\r\n\r\n if check_types and isinstance(shallow_tree, (dict, _collections.Mapping)):\r\n if set(input_tree) != set(shallow_tree):\r\n raise ValueError(\r\n \"The two structures don't have the same keys. Input \"\r\n \"structure has keys %s, while shallow structure has keys %s.\" %\r\n (list(_six.iterkeys(input_tree)),\r\n list(_six.iterkeys(shallow_tree))))\r\n\r\n input_tree = list(sorted(_six.iteritems(input_tree)))\r\n shallow_tree = list(sorted(_six.iteritems(shallow_tree)))\r\n\r\n for shallow_branch, input_branch in zip(shallow_tree, input_tree):\r\n assert_shallow_structure(shallow_branch, input_branch,\r\n check_types=check_types)\r\n\r\n\r\ndef flatten_up_to(shallow_tree, input_tree):\r\n \"\"\"Flattens `input_tree` up to `shallow_tree`.\r\n\r\n Any further depth in structure in `input_tree` is retained as elements in the\r\n partially flatten output.\r\n\r\n If `shallow_tree` and `input_tree` are not sequences, this returns a\r\n single-element list: `[input_tree]`.\r\n\r\n Use Case:\r\n\r\n Sometimes we may wish to partially flatten a nested sequence, retaining some\r\n of the nested structure. We achieve this by specifying a shallow structure,\r\n `shallow_tree`, we wish to flatten up to.\r\n\r\n The input, `input_tree`, can be thought of as having the same structure as\r\n `shallow_tree`, but with leaf nodes that are themselves tree structures.\r\n\r\n Examples:\r\n\r\n ```python\r\n input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]\r\n shallow_tree = [[True, True], [False, True]]\r\n\r\n flattened_input_tree = flatten_up_to(shallow_tree, input_tree)\r\n flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)\r\n\r\n # Output is:\r\n # [[2, 2], [3, 3], [4, 9], [5, 5]]\r\n # [True, True, False, True]\r\n ```\r\n\r\n ```python\r\n input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]\r\n shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]\r\n\r\n input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)\r\n input_tree_flattened = flatten(input_tree)\r\n\r\n # Output is:\r\n # [('a', 1), ('b', 2), ('c', 3), ('d', 4)]\r\n # ['a', 1, 'b', 2, 'c', 3, 'd', 4]\r\n ```\r\n\r\n Non-Sequence Edge Cases:\r\n\r\n ```python\r\n flatten_up_to(0, 0) # Output: [0]\r\n flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]\r\n flatten_up_to([0, 1, 2], 0) # Output: TypeError\r\n flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]\r\n ```\r\n\r\n Args:\r\n shallow_tree: a possibly pruned structure of input_tree.\r\n input_tree: an arbitrarily nested structure or a scalar object.\r\n Note, numpy arrays are considered scalars.\r\n\r\n Returns:\r\n A Python list, the partially flattened version of `input_tree` according to\r\n the structure of `shallow_tree`.\r\n\r\n Raises:\r\n TypeError: If `shallow_tree` is a sequence but `input_tree` is not.\r\n TypeError: If the sequence types of `shallow_tree` are different from\r\n `input_tree`.\r\n ValueError: If the sequence lengths of `shallow_tree` are different from\r\n `input_tree`.\r\n \"\"\"\r\n assert_shallow_structure(shallow_tree, input_tree)\r\n return list(_yield_flat_up_to(shallow_tree, input_tree))\r\n\r\n\r\ndef map_structure_up_to(shallow_tree, func, *inputs):\r\n \"\"\"Applies a function or op to a number of partially flattened inputs.\r\n\r\n The `inputs` are flattened up to `shallow_tree` before being mapped.\r\n\r\n Use Case:\r\n\r\n Sometimes we wish to apply a function to a partially flattened\r\n sequence (for example when the function itself takes sequence inputs). We\r\n achieve this by specifying a shallow structure, `shallow_tree` we wish to\r\n flatten up to.\r\n\r\n The `inputs`, can be thought of as having the same structure as\r\n `shallow_tree`, but with leaf nodes that are themselves tree structures.\r\n\r\n This function therefore will return something with the same base structure as\r\n `shallow_tree`.\r\n\r\n Examples:\r\n\r\n ```python\r\n ab_tuple = collections.namedtuple(\"ab_tuple\", \"a, b\")\r\n op_tuple = collections.namedtuple(\"op_tuple\", \"add, mul\")\r\n inp_val = ab_tuple(a=2, b=3)\r\n inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))\r\n out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,\r\n inp_val, inp_ops)\r\n\r\n # Output is: ab_tuple(a=6, b=15)\r\n ```\r\n\r\n ```python\r\n data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]\r\n name_list = ['evens', ['odds', 'primes']]\r\n out = map_structure_up_to(\r\n name_list,\r\n lambda name, sec: \"first_{}_{}\".format(len(sec), name),\r\n name_list, data_list)\r\n\r\n # Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]\r\n ```\r\n\r\n Args:\r\n shallow_tree: a shallow tree, common to all the inputs.\r\n func: callable which will be applied to each input individually.\r\n *inputs: arbitrarily nested combination of objects that are compatible with\r\n shallow_tree. The function `func` is applied to corresponding\r\n partially flattened elements of each input, so the function must support\r\n arity of `len(inputs)`.\r\n\r\n Raises:\r\n TypeError: If `shallow_tree` is a sequence but `input_tree` is not.\r\n TypeError: If the sequence types of `shallow_tree` are different from\r\n `input_tree`.\r\n ValueError: If the sequence lengths of `shallow_tree` are different from\r\n `input_tree`.\r\n\r\n Returns:\r\n result of repeatedly applying `func`, with same structure as\r\n `shallow_tree`.\r\n \"\"\"\r\n if not inputs:\r\n raise ValueError(\"Cannot map over no sequences\")\r\n for input_tree in inputs:\r\n assert_shallow_structure(shallow_tree, input_tree)\r\n\r\n # Flatten each input separately, apply the function to corresponding elements,\r\n # then repack based on the structure of the first input.\r\n all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)\r\n for input_tree in inputs]\r\n results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]\r\n return pack_sequence_as(structure=shallow_tree, flat_sequence=results)\r\n\r\n\r\ndef get_traverse_shallow_structure(traverse_fn, structure):\r\n \"\"\"Generates a shallow structure from a `traverse_fn` and `structure`.\r\n\r\n `traverse_fn` must accept any possible subtree of `structure` and return\r\n a depth=1 structure containing `True` or `False` values, describing which\r\n of the top-level subtrees may be traversed. It may also\r\n return scalar `True` or `False` \"traversal is OK / not OK for all subtrees.\"\r\n\r\n Examples are available in the unit tests (nest_test.py).\r\n\r\n Args:\r\n traverse_fn: Function taking a substructure and returning either a scalar\r\n `bool` (whether to traverse that substructure or not) or a depth=1\r\n shallow structure of the same type, describing which parts of the\r\n substructure to traverse.\r\n structure: The structure to traverse.\r\n\r\n Returns:\r\n A shallow structure containing python bools, which can be passed to\r\n `map_structure_up_to` and `flatten_up_to`.\r\n\r\n Raises:\r\n TypeError: if `traverse_fn` returns a sequence for a non-sequence input,\r\n or a structure with depth higher than 1 for a sequence input,\r\n or if any leaf values in the returned structure or scalar are not type\r\n `bool`.\r\n \"\"\"\r\n to_traverse = traverse_fn(structure)\r\n if not is_sequence(structure):\r\n if not isinstance(to_traverse, bool):\r\n raise TypeError(\"traverse_fn returned structure: %s for non-structure: %s\"\r\n % (to_traverse, structure))\r\n return to_traverse\r\n level_traverse = []\r\n if isinstance(to_traverse, bool):\r\n if not to_traverse:\r\n # Do not traverse this substructure at all. Exit early.\r\n return False\r\n else:\r\n # Traverse the entire substructure.\r\n for branch in _yield_value(structure):\r\n level_traverse.append(\r\n get_traverse_shallow_structure(traverse_fn, branch))\r\n elif not is_sequence(to_traverse):\r\n raise TypeError(\"traverse_fn returned a non-bool scalar: %s for input: %s\"\r\n % (to_traverse, structure))\r\n else:\r\n # Traverse some subset of this substructure.\r\n assert_shallow_structure(to_traverse, structure)\r\n for t, branch in zip(_yield_value(to_traverse), _yield_value(structure)):\r\n if not isinstance(t, bool):\r\n raise TypeError(\r\n \"traverse_fn didn't return a depth=1 structure of bools. saw: %s \"\r\n \" for structure: %s\" % (to_traverse, structure))\r\n if t:\r\n level_traverse.append(\r\n get_traverse_shallow_structure(traverse_fn, branch))\r\n else:\r\n level_traverse.append(False)\r\n return _sequence_like(structure, level_traverse)\r\n\r\n\r\ndef yield_flat_paths(nest):\r\n \"\"\"Yields paths for some nested structure.\r\n\r\n Paths are lists of objects which can be str-converted, which may include\r\n integers or other types which are used as indices in a dict.\r\n\r\n The flat list will be in the corresponding order as if you called\r\n `snt.nest.flatten` on the structure. This is handy for naming Tensors such\r\n the TF scope structure matches the tuple structure.\r\n\r\n E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))`\r\n\r\n ```shell\r\n >>> nest.flatten(value)\r\n [3, 23, 42]\r\n >>> list(nest.yield_flat_paths(value))\r\n [('a',), ('b', 'c'), ('b', 'd')]\r\n ```\r\n\r\n ```shell\r\n >>> list(nest.yield_flat_paths({'a': [3]}))\r\n [('a', 0)]\r\n >>> list(nest.yield_flat_paths({'a': 3}))\r\n [('a',)]\r\n ```\r\n\r\n Args:\r\n nest: the value to produce a flattened paths list for.\r\n\r\n Yields:\r\n Tuples containing index or key values which form the path to a specific\r\n leaf value in the nested structure.\r\n \"\"\"\r\n\r\n # The _maybe_add_final_path_element function is used below in order to avoid\r\n # adding trailing slashes when the sub-element recursed into is a leaf.\r\n if isinstance(nest, (dict, _collections.Mapping)):\r\n for key in _sorted(nest):\r\n value = nest[key]\r\n for sub_path in yield_flat_paths(value):\r\n yield (key,) + sub_path\r\n elif _is_namedtuple(nest):\r\n for key in nest._fields:\r\n value = getattr(nest, key)\r\n for sub_path in yield_flat_paths(value):\r\n yield (key,) + sub_path\r\n elif isinstance(nest, _six.string_types):\r\n yield ()\r\n elif isinstance(nest, _collections.Sequence):\r\n for idx, value in enumerate(nest):\r\n for sub_path in yield_flat_paths(value):\r\n yield (idx,) + sub_path\r\n else:\r\n yield ()\r\n\r\n\r\ndef flatten_with_joined_string_paths(structure, separator=\"/\"):\r\n \"\"\"Returns a list of (string path, data element) tuples.\r\n\r\n The order of tuples produced matches that of `nest.flatten`. This allows you\r\n to flatten a nested structure while keeping information about where in the\r\n structure each data element was located. See `nest.yield_flat_paths`\r\n for more information.\r\n\r\n Args:\r\n structure: the nested structure to flatten.\r\n separator: string to separate levels of hierarchy in the results, defaults\r\n to '/'.\r\n\r\n Returns:\r\n A list of (string, data element) tuples.\r\n \"\"\"\r\n flat_paths = yield_flat_paths(structure)\r\n def stringify_and_join(path_elements):\r\n return separator.join(str(path_element) for path_element in path_elements)\r\n flat_string_paths = [stringify_and_join(path) for path in flat_paths]\r\n return list(zip(flat_string_paths, flatten(structure)))\r\n\r\n\r\n_pywrap_tensorflow.RegisterType(\"Mapping\", _collections.Mapping)\r\n_pywrap_tensorflow.RegisterType(\"Sequence\", _collections.Sequence)\r\n",
"\"\"\"Python wrappers around TensorFlow ops.\r\n\r\nThis file is MACHINE GENERATED! Do not edit.\r\n\"\"\"\r\n\r\nimport collections as _collections\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\nfrom tensorflow.python.eager import context as _context\r\nfrom tensorflow.python.eager import core as _core\r\nfrom tensorflow.python.eager import execute as _execute\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import errors as _errors\r\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\r\n\r\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\r\n# Needed to trigger the call to _set_call_cpp_shape_fn.\r\nfrom tensorflow.python.framework import common_shapes as _common_shapes\r\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import op_def_library as _op_def_library\r\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n_hard_routing_function_outputs = [\"path_probability\", \"path\"]\r\n_HardRoutingFunctionOutput = _collections.namedtuple(\r\n \"HardRoutingFunction\", _hard_routing_function_outputs)\r\n\r\n\r\n@tf_export('hard_routing_function')\r\ndef hard_routing_function(input_data, tree_parameters, tree_biases, max_nodes, tree_depth, name=None):\r\n r\"\"\" Chooses a single path for each instance in `input_data` and returns the leaf\r\n\r\n the probability of the path and the path taken.\r\n\r\n tree_depth: The depth of the decision tree.\r\n\r\n input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`\r\n gives the j-th feature of the i-th input.\r\n tree_parameters: `tree_parameters[i]` gives the weight of\r\n the logistic regression model that translates from node features to\r\n probabilities.\r\n tree_biases: `tree_biases[i]` gives the bias of the logistic\r\n regression model that translates from node features to\r\n probabilities.\r\n\r\n path_probability: `path_probability[i]` gives the probability of reaching each\r\n node in `path[i]`.\r\n path: `path[i][j]` gives the jth node in the path taken by the ith data\r\n instance.\r\n\r\n Args:\r\n input_data: A `Tensor` of type `float32`.\r\n tree_parameters: A `Tensor` of type `float32`.\r\n tree_biases: A `Tensor` of type `float32`.\r\n max_nodes: An `int`.\r\n tree_depth: An `int`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (path_probability, path).\r\n\r\n path_probability: A `Tensor` of type `float32`.\r\n path: A `Tensor` of type `int32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n tree_depth = _execute.make_int(tree_depth, \"tree_depth\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"HardRoutingFunction\", input_data=input_data,\r\n tree_parameters=tree_parameters, tree_biases=tree_biases,\r\n max_nodes=max_nodes, tree_depth=tree_depth, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"max_nodes\", _op.get_attr(\"max_nodes\"), \"tree_depth\",\r\n _op.get_attr(\"tree_depth\"))\r\n _execute.record_gradient(\r\n \"HardRoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result = _HardRoutingFunctionOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"HardRoutingFunction\", name, _ctx._post_execution_callbacks,\r\n input_data, tree_parameters, tree_biases, \"max_nodes\", max_nodes,\r\n \"tree_depth\", tree_depth)\r\n _result = _HardRoutingFunctionOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return hard_routing_function_eager_fallback(\r\n input_data, tree_parameters, tree_biases, max_nodes=max_nodes,\r\n tree_depth=tree_depth, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef hard_routing_function_eager_fallback(input_data, tree_parameters, tree_biases, max_nodes, tree_depth, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function hard_routing_function\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n tree_depth = _execute.make_int(tree_depth, \"tree_depth\")\r\n input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)\r\n tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)\r\n tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)\r\n _inputs_flat = [input_data, tree_parameters, tree_biases]\r\n _attrs = (\"max_nodes\", max_nodes, \"tree_depth\", tree_depth)\r\n _result = _execute.execute(b\"HardRoutingFunction\", 2, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"HardRoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result = _HardRoutingFunctionOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"HardRoutingFunction\")(None)\r\n\r\n\r\n_k_feature_gradient_outputs = [\"routing_gradient\", \"data_gradient\",\r\n \"weight_gradient\"]\r\n_KFeatureGradientOutput = _collections.namedtuple(\r\n \"KFeatureGradient\", _k_feature_gradient_outputs)\r\n\r\n\r\n@tf_export('k_feature_gradient')\r\ndef k_feature_gradient(input_data, tree_parameters, tree_biases, routes, layer_num, random_seed, name=None):\r\n r\"\"\" Computes the derivative of the routing loss with respect to each decision\r\n\r\n node. Each decision node is constrained to make a decision based on only\r\n k features.\r\n\r\n layer_num: The layer number of this tree.\r\n random_seed: The base random seed.\r\n\r\n input_data: The training batch's features as a 2-d tensor;\r\n `input_data[i][j]` gives the j-th feature of the i-th input.\r\n tree_parameters: `tree_parameters[i]` gives the weight of\r\n the logistic regression model that translates from node features to\r\n probabilities.\r\n tree_biases: `tree_biases[i]` gives the bias of the logistic\r\n regression model that translates from node features to\r\n probabilities.\r\n routes: The routes computed by routing_function_op.\r\n\r\n routing_gradient: `routing_gradient` provides du / df, where u is the\r\n routing function and f is the (vector of) decision functions. A decision\r\n function f_i computes the routing decision at node i.\r\n\r\n data_gradient: `data_gradient` provides df / dx, where f is the (vector\r\n of) decision functions and x is a batch of data.\r\n\r\n weights_gradient: `weights_gradient` provides df / dw, where f is the\r\n (vector of) decision functions and w is the matrix of parameters that\r\n determine how instances are routed through a tree.\r\n\r\n f_i, the decision function at node i, is parameterized by t_i (parameters)\r\n and b_i (bias) and takes data x as input. This op is called in\r\n training_ops.py to compute du / df, and we use that to compute\r\n\r\n du / dx = du / df * df / dx,\r\n du / dt = du / df * df / dt, and\r\n du / db = du / df * df / db.\r\n\r\n Args:\r\n input_data: A `Tensor` of type `float32`.\r\n tree_parameters: A `Tensor` of type `float32`.\r\n tree_biases: A `Tensor` of type `float32`.\r\n routes: A `Tensor` of type `float32`.\r\n layer_num: An `int`.\r\n random_seed: An `int`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (routing_gradient, data_gradient, weight_gradient).\r\n\r\n routing_gradient: A `Tensor` of type `float32`.\r\n data_gradient: A `Tensor` of type `float32`.\r\n weight_gradient: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n layer_num = _execute.make_int(layer_num, \"layer_num\")\r\n random_seed = _execute.make_int(random_seed, \"random_seed\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"KFeatureGradient\", input_data=input_data,\r\n tree_parameters=tree_parameters, tree_biases=tree_biases,\r\n routes=routes, layer_num=layer_num, random_seed=random_seed,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"layer_num\", _op.get_attr(\"layer_num\"), \"random_seed\",\r\n _op.get_attr(\"random_seed\"))\r\n _execute.record_gradient(\r\n \"KFeatureGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _KFeatureGradientOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"KFeatureGradient\", name, _ctx._post_execution_callbacks, input_data,\r\n tree_parameters, tree_biases, routes, \"layer_num\", layer_num,\r\n \"random_seed\", random_seed)\r\n _result = _KFeatureGradientOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return k_feature_gradient_eager_fallback(\r\n input_data, tree_parameters, tree_biases, routes,\r\n layer_num=layer_num, random_seed=random_seed, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef k_feature_gradient_eager_fallback(input_data, tree_parameters, tree_biases, routes, layer_num, random_seed, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function k_feature_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n layer_num = _execute.make_int(layer_num, \"layer_num\")\r\n random_seed = _execute.make_int(random_seed, \"random_seed\")\r\n input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)\r\n tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)\r\n tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)\r\n routes = _ops.convert_to_tensor(routes, _dtypes.float32)\r\n _inputs_flat = [input_data, tree_parameters, tree_biases, routes]\r\n _attrs = (\"layer_num\", layer_num, \"random_seed\", random_seed)\r\n _result = _execute.execute(b\"KFeatureGradient\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"KFeatureGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _KFeatureGradientOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"KFeatureGradient\")(None)\r\n\r\n\r\n@tf_export('k_feature_routing_function')\r\ndef k_feature_routing_function(input_data, tree_parameters, tree_biases, layer_num, max_nodes, num_features_per_node, random_seed, name=None):\r\n r\"\"\" Returns the probability that each input will reach each leaf node. Each\r\n\r\n decision is made based on k features.\r\n\r\n layer_num: The layer number of this tree.\r\n max_nodes: The number of nodes in the tree.\r\n num_features_per_node: The number of features each node can use to make a\r\n decision.\r\n random_seed: The base random seed.\r\n\r\n input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`\r\n gives the j-th feature of the i-th input.\r\n tree_parameters: `tree_parameters[i]` gives the weight of\r\n the logistic regression model that translates from node features to\r\n probabilities.\r\n tree_biases: `tree_biases[i]` gives the bias of the logistic\r\n regression model that translates from node features to\r\n probabilities.\r\n tree_features: `tree_features[i]` gives the decision feature for node i.\r\n\r\n probabilities: `probabilities[i][j]` is the probability that input i\r\n will reach node j.\r\n\r\n Args:\r\n input_data: A `Tensor` of type `float32`.\r\n tree_parameters: A `Tensor` of type `float32`.\r\n tree_biases: A `Tensor` of type `float32`.\r\n layer_num: An `int`.\r\n max_nodes: An `int`.\r\n num_features_per_node: An `int`.\r\n random_seed: An `int`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n layer_num = _execute.make_int(layer_num, \"layer_num\")\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n num_features_per_node = _execute.make_int(num_features_per_node, \"num_features_per_node\")\r\n random_seed = _execute.make_int(random_seed, \"random_seed\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"KFeatureRoutingFunction\", input_data=input_data,\r\n tree_parameters=tree_parameters, tree_biases=tree_biases,\r\n layer_num=layer_num, max_nodes=max_nodes,\r\n num_features_per_node=num_features_per_node, random_seed=random_seed,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"layer_num\", _op.get_attr(\"layer_num\"), \"max_nodes\",\r\n _op.get_attr(\"max_nodes\"), \"num_features_per_node\",\r\n _op.get_attr(\"num_features_per_node\"), \"random_seed\",\r\n _op.get_attr(\"random_seed\"))\r\n _execute.record_gradient(\r\n \"KFeatureRoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"KFeatureRoutingFunction\", name, _ctx._post_execution_callbacks,\r\n input_data, tree_parameters, tree_biases, \"layer_num\", layer_num,\r\n \"max_nodes\", max_nodes, \"num_features_per_node\",\r\n num_features_per_node, \"random_seed\", random_seed)\r\n return _result\r\n except _core._FallbackException:\r\n return k_feature_routing_function_eager_fallback(\r\n input_data, tree_parameters, tree_biases, layer_num=layer_num,\r\n max_nodes=max_nodes, num_features_per_node=num_features_per_node,\r\n random_seed=random_seed, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef k_feature_routing_function_eager_fallback(input_data, tree_parameters, tree_biases, layer_num, max_nodes, num_features_per_node, random_seed, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function k_feature_routing_function\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n layer_num = _execute.make_int(layer_num, \"layer_num\")\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n num_features_per_node = _execute.make_int(num_features_per_node, \"num_features_per_node\")\r\n random_seed = _execute.make_int(random_seed, \"random_seed\")\r\n input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)\r\n tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)\r\n tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)\r\n _inputs_flat = [input_data, tree_parameters, tree_biases]\r\n _attrs = (\"layer_num\", layer_num, \"max_nodes\", max_nodes,\r\n \"num_features_per_node\", num_features_per_node, \"random_seed\", random_seed)\r\n _result = _execute.execute(b\"KFeatureRoutingFunction\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"KFeatureRoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n_ops.RegisterShape(\"KFeatureRoutingFunction\")(None)\r\n\r\n\r\n@tf_export('routing_function')\r\ndef routing_function(input_data, tree_parameters, tree_biases, max_nodes, name=None):\r\n r\"\"\" Returns the probability that each input will reach each leaf node.\r\n\r\n max_nodes: The number of nodes in the tree.\r\n\r\n input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`\r\n gives the j-th feature of the i-th input.\r\n tree_parameters: `tree_parameters[i]` gives the weight of\r\n the logistic regression model that translates from node features to\r\n probabilities.\r\n tree_biases: `tree_biases[i]` gives the bias of the logistic\r\n regression model that translates from node features to\r\n probabilities.\r\n\r\n probabilities: `probabilities[i][j]` is the probability that input i\r\n will reach node j.\r\n\r\n Args:\r\n input_data: A `Tensor` of type `float32`.\r\n tree_parameters: A `Tensor` of type `float32`.\r\n tree_biases: A `Tensor` of type `float32`.\r\n max_nodes: An `int`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"RoutingFunction\", input_data=input_data,\r\n tree_parameters=tree_parameters, tree_biases=tree_biases,\r\n max_nodes=max_nodes, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"max_nodes\", _op.get_attr(\"max_nodes\"))\r\n _execute.record_gradient(\r\n \"RoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"RoutingFunction\", name, _ctx._post_execution_callbacks, input_data,\r\n tree_parameters, tree_biases, \"max_nodes\", max_nodes)\r\n return _result\r\n except _core._FallbackException:\r\n return routing_function_eager_fallback(\r\n input_data, tree_parameters, tree_biases, max_nodes=max_nodes,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef routing_function_eager_fallback(input_data, tree_parameters, tree_biases, max_nodes, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function routing_function\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)\r\n tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)\r\n tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)\r\n _inputs_flat = [input_data, tree_parameters, tree_biases]\r\n _attrs = (\"max_nodes\", max_nodes)\r\n _result = _execute.execute(b\"RoutingFunction\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"RoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n_ops.RegisterShape(\"RoutingFunction\")(None)\r\n\r\n\r\n@tf_export('routing_gradient')\r\ndef routing_gradient(input_data, tree_parameters, tree_biases, routes, max_nodes, name=None):\r\n r\"\"\" Computes the derivative of the routing loss with respect to each decision\r\n\r\n node.\r\n\r\n max_nodes: The number of nodes in the tree.\r\n\r\n tree_parameters: `tree_parameters[i]` gives the weight of\r\n the logistic regression model that translates from node features to\r\n probabilities.\r\n tree_biases: `tree_biases[i]` gives the bias of the logistic\r\n regression model that translates from node features to\r\n probabilities.\r\n routes: The routes computed by routing_function_op.\r\n\r\n routing_gradient: `routing_gradient` provides du / df, where u is the routing\r\n function and f is the (vector of) decision functions. A decision function\r\n f_i computes the routing decision at node i.\r\n\r\n f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as\r\n input. This op is called in training_ops.py to compute du / df, and we use\r\n that to compute\r\n\r\n du / dx = du / df * df / dx,\r\n du / dt = du / df * df / dt, and\r\n du / db = du / df * df / db.\r\n\r\n Args:\r\n input_data: A `Tensor` of type `float32`.\r\n tree_parameters: A `Tensor` of type `float32`.\r\n tree_biases: A `Tensor` of type `float32`.\r\n routes: A `Tensor` of type `float32`.\r\n max_nodes: An `int`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"RoutingGradient\", input_data=input_data,\r\n tree_parameters=tree_parameters, tree_biases=tree_biases,\r\n routes=routes, max_nodes=max_nodes, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"max_nodes\", _op.get_attr(\"max_nodes\"))\r\n _execute.record_gradient(\r\n \"RoutingGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"RoutingGradient\", name, _ctx._post_execution_callbacks, input_data,\r\n tree_parameters, tree_biases, routes, \"max_nodes\", max_nodes)\r\n return _result\r\n except _core._FallbackException:\r\n return routing_gradient_eager_fallback(\r\n input_data, tree_parameters, tree_biases, routes,\r\n max_nodes=max_nodes, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef routing_gradient_eager_fallback(input_data, tree_parameters, tree_biases, routes, max_nodes, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function routing_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n max_nodes = _execute.make_int(max_nodes, \"max_nodes\")\r\n input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)\r\n tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)\r\n tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)\r\n routes = _ops.convert_to_tensor(routes, _dtypes.float32)\r\n _inputs_flat = [input_data, tree_parameters, tree_biases, routes]\r\n _attrs = (\"max_nodes\", max_nodes)\r\n _result = _execute.execute(b\"RoutingGradient\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"RoutingGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n_ops.RegisterShape(\"RoutingGradient\")(None)\r\n\r\n\r\n_stochastic_hard_routing_function_outputs = [\"path_probability\", \"path\"]\r\n_StochasticHardRoutingFunctionOutput = _collections.namedtuple(\r\n \"StochasticHardRoutingFunction\",\r\n _stochastic_hard_routing_function_outputs)\r\n\r\n\r\n@tf_export('stochastic_hard_routing_function')\r\ndef stochastic_hard_routing_function(input_data, tree_parameters, tree_biases, tree_depth, random_seed, name=None):\r\n r\"\"\" Samples a path for each instance in `input_data` and returns the\r\n\r\n probability of the path and the path taken.\r\n\r\n tree_depth: The depth of the decision tree.\r\n random_seed: The base random seed.\r\n\r\n input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`\r\n gives the j-th feature of the i-th input.\r\n tree_parameters: `tree_parameters[i]` gives the weight of\r\n the logistic regression model that translates from node features to\r\n probabilities.\r\n tree_biases: `tree_biases[i]` gives the bias of the logistic\r\n regression model that translates from node features to\r\n probabilities.\r\n\r\n path_probability: `path_probability[i]` gives the probability of reaching each\r\n node in `path[i]`.\r\n path: `path[i][j]` gives the jth node in the path taken by the ith data\r\n instance.\r\n\r\n Args:\r\n input_data: A `Tensor` of type `float32`.\r\n tree_parameters: A `Tensor` of type `float32`.\r\n tree_biases: A `Tensor` of type `float32`.\r\n tree_depth: An `int`.\r\n random_seed: An `int`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (path_probability, path).\r\n\r\n path_probability: A `Tensor` of type `float32`.\r\n path: A `Tensor` of type `int32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n tree_depth = _execute.make_int(tree_depth, \"tree_depth\")\r\n random_seed = _execute.make_int(random_seed, \"random_seed\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StochasticHardRoutingFunction\", input_data=input_data,\r\n tree_parameters=tree_parameters, tree_biases=tree_biases,\r\n tree_depth=tree_depth, random_seed=random_seed, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"tree_depth\", _op.get_attr(\"tree_depth\"), \"random_seed\",\r\n _op.get_attr(\"random_seed\"))\r\n _execute.record_gradient(\r\n \"StochasticHardRoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result = _StochasticHardRoutingFunctionOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StochasticHardRoutingFunction\", name, _ctx._post_execution_callbacks,\r\n input_data, tree_parameters, tree_biases, \"tree_depth\", tree_depth,\r\n \"random_seed\", random_seed)\r\n _result = _StochasticHardRoutingFunctionOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return stochastic_hard_routing_function_eager_fallback(\r\n input_data, tree_parameters, tree_biases, tree_depth=tree_depth,\r\n random_seed=random_seed, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef stochastic_hard_routing_function_eager_fallback(input_data, tree_parameters, tree_biases, tree_depth, random_seed, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function stochastic_hard_routing_function\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n tree_depth = _execute.make_int(tree_depth, \"tree_depth\")\r\n random_seed = _execute.make_int(random_seed, \"random_seed\")\r\n input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)\r\n tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)\r\n tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)\r\n _inputs_flat = [input_data, tree_parameters, tree_biases]\r\n _attrs = (\"tree_depth\", tree_depth, \"random_seed\", random_seed)\r\n _result = _execute.execute(b\"StochasticHardRoutingFunction\", 2,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"StochasticHardRoutingFunction\", _inputs_flat, _attrs, _result, name)\r\n _result = _StochasticHardRoutingFunctionOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"StochasticHardRoutingFunction\")(None)\r\n\r\n\r\n_stochastic_hard_routing_gradient_outputs = [\"routing_gradient\",\r\n \"data_gradient\",\r\n \"parameter_gradient\",\r\n \"bias_gradient\"]\r\n_StochasticHardRoutingGradientOutput = _collections.namedtuple(\r\n \"StochasticHardRoutingGradient\",\r\n _stochastic_hard_routing_gradient_outputs)\r\n\r\n\r\n@tf_export('stochastic_hard_routing_gradient')\r\ndef stochastic_hard_routing_gradient(input_data, tree_parameters, tree_biases, path_probability, path, tree_depth, name=None):\r\n r\"\"\" Computes the derivative of the routing loss with respect to each decision\r\n\r\n node.\r\n\r\n tree_depth: The depth of the decision tree.\r\n\r\n input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`\r\n gives the j-th feature of the i-th input\r\n tree_parameters: `tree_parameters[i]` gives the weight of\r\n the logistic regression model that translates from node features to\r\n probabilities.\r\n tree_biases: `tree_biases[i]` gives the bias of the logistic\r\n regression model that translates from node features to\r\n probabilities.\r\n path_probability: `path_probability[i]` gives the probability of reaching each\r\n node in `path[i]`.\r\n path: `path[i][j]` gives the jth node in the path taken by the ith data\r\n instance.\r\n\r\n routing_gradient: `routing_gradient` provides du / df, where u is the routing\r\n function and f is the (vector of) decision functions. A decision function\r\n f_i computes the routing decision at node i.\r\n data_gradient: `data_gradient` provides df / dx, where f is the (vector\r\n of) decision functions and x is a batch of data.\r\n parameter_gradient: `parameter_gradient` provides df / dw, where f is the\r\n (vector of) decision functions and w is the matrix of parameters that\r\n determine how instances are routed through a tree.\r\n bias_gradient: `bias_gradient` provides df / db, where f is the\r\n (vector of) decision functions and b is the vector of bias parameters that\r\n determine how instances are routed through a tree.\r\n\r\n f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as\r\n input. This op is called in training_ops.py to compute du / df, and we use\r\n that to compute\r\n\r\n du / dx = du / df * df / dx,\r\n du / dt = du / df * df / dt, and\r\n du / db = du / df * df / db.\r\n\r\n Args:\r\n input_data: A `Tensor` of type `float32`.\r\n tree_parameters: A `Tensor` of type `float32`.\r\n tree_biases: A `Tensor` of type `float32`.\r\n path_probability: A `Tensor` of type `float32`.\r\n path: A `Tensor` of type `int32`.\r\n tree_depth: An `int`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (routing_gradient, data_gradient, parameter_gradient, bias_gradient).\r\n\r\n routing_gradient: A `Tensor` of type `float32`.\r\n data_gradient: A `Tensor` of type `float32`.\r\n parameter_gradient: A `Tensor` of type `float32`.\r\n bias_gradient: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n tree_depth = _execute.make_int(tree_depth, \"tree_depth\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StochasticHardRoutingGradient\", input_data=input_data,\r\n tree_parameters=tree_parameters, tree_biases=tree_biases,\r\n path_probability=path_probability, path=path, tree_depth=tree_depth,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"tree_depth\", _op.get_attr(\"tree_depth\"))\r\n _execute.record_gradient(\r\n \"StochasticHardRoutingGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _StochasticHardRoutingGradientOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StochasticHardRoutingGradient\", name, _ctx._post_execution_callbacks,\r\n input_data, tree_parameters, tree_biases, path_probability, path,\r\n \"tree_depth\", tree_depth)\r\n _result = _StochasticHardRoutingGradientOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return stochastic_hard_routing_gradient_eager_fallback(\r\n input_data, tree_parameters, tree_biases, path_probability, path,\r\n tree_depth=tree_depth, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef stochastic_hard_routing_gradient_eager_fallback(input_data, tree_parameters, tree_biases, path_probability, path, tree_depth, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function stochastic_hard_routing_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n tree_depth = _execute.make_int(tree_depth, \"tree_depth\")\r\n input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)\r\n tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)\r\n tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)\r\n path_probability = _ops.convert_to_tensor(path_probability, _dtypes.float32)\r\n path = _ops.convert_to_tensor(path, _dtypes.int32)\r\n _inputs_flat = [input_data, tree_parameters, tree_biases, path_probability, path]\r\n _attrs = (\"tree_depth\", tree_depth)\r\n _result = _execute.execute(b\"StochasticHardRoutingGradient\", 4,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"StochasticHardRoutingGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _StochasticHardRoutingGradientOutput._make(_result)\r\n return _result\r\n\r\n_ops.RegisterShape(\"StochasticHardRoutingGradient\")(None)\r\n\r\n\r\n@tf_export('unpack_path')\r\ndef unpack_path(path, path_values, name=None):\r\n r\"\"\" Takes a batch of paths through a tree and a batch of values along those paths\r\n\r\n and returns a batch_size by num_nodes encoding of the path values.\r\n\r\n path: `path[i][j]` gives the jth node in the path taken by the ith data\r\n instance.\r\n path_values: `path_values[i][j]` gives the value associated with node j in the\r\n path defined by the ith instance\r\n\r\n unpacked_paths: `unpacked_paths[i][path[i][k]]` is path_values[i][k] for k in\r\n [0, tree_depth). All other elements of unpacked_paths are zero.\r\n\r\n Args:\r\n path: A `Tensor` of type `int32`.\r\n path_values: A `Tensor` of type `float32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UnpackPath\", path=path, path_values=path_values, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = None\r\n _execute.record_gradient(\r\n \"UnpackPath\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"UnpackPath\",\r\n name, _ctx._post_execution_callbacks, path, path_values)\r\n return _result\r\n except _core._FallbackException:\r\n return unpack_path_eager_fallback(\r\n path, path_values, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef unpack_path_eager_fallback(path, path_values, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unpack_path\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n path = _ops.convert_to_tensor(path, _dtypes.int32)\r\n path_values = _ops.convert_to_tensor(path_values, _dtypes.float32)\r\n _inputs_flat = [path, path_values]\r\n _attrs = None\r\n _result = _execute.execute(b\"UnpackPath\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UnpackPath\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n_ops.RegisterShape(\"UnpackPath\")(None)\r\n\r\ndef _InitOpDefLibrary(op_list_proto_bytes):\r\n op_list = _op_def_pb2.OpList()\r\n op_list.ParseFromString(op_list_proto_bytes)\r\n _op_def_registry.register_op_list(op_list)\r\n op_def_lib = _op_def_library.OpDefLibrary()\r\n op_def_lib.add_op_list(op_list)\r\n return op_def_lib\r\n# op {\r\n# name: \"HardRoutingFunction\"\r\n# input_arg {\r\n# name: \"input_data\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_parameters\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_biases\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"path_probability\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"path\"\r\n# type: DT_INT32\r\n# }\r\n# attr {\r\n# name: \"max_nodes\"\r\n# type: \"int\"\r\n# }\r\n# attr {\r\n# name: \"tree_depth\"\r\n# type: \"int\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"KFeatureGradient\"\r\n# input_arg {\r\n# name: \"input_data\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_parameters\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_biases\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"routes\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"routing_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"data_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"weight_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"layer_num\"\r\n# type: \"int\"\r\n# }\r\n# attr {\r\n# name: \"random_seed\"\r\n# type: \"int\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"KFeatureRoutingFunction\"\r\n# input_arg {\r\n# name: \"input_data\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_parameters\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_biases\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"probabilities\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"layer_num\"\r\n# type: \"int\"\r\n# }\r\n# attr {\r\n# name: \"max_nodes\"\r\n# type: \"int\"\r\n# }\r\n# attr {\r\n# name: \"num_features_per_node\"\r\n# type: \"int\"\r\n# }\r\n# attr {\r\n# name: \"random_seed\"\r\n# type: \"int\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"RoutingFunction\"\r\n# input_arg {\r\n# name: \"input_data\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_parameters\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_biases\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"probabilities\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"max_nodes\"\r\n# type: \"int\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"RoutingGradient\"\r\n# input_arg {\r\n# name: \"input_data\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_parameters\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_biases\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"routes\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"routing_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"max_nodes\"\r\n# type: \"int\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"StochasticHardRoutingFunction\"\r\n# input_arg {\r\n# name: \"input_data\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_parameters\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_biases\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"path_probability\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"path\"\r\n# type: DT_INT32\r\n# }\r\n# attr {\r\n# name: \"tree_depth\"\r\n# type: \"int\"\r\n# }\r\n# attr {\r\n# name: \"random_seed\"\r\n# type: \"int\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"StochasticHardRoutingGradient\"\r\n# input_arg {\r\n# name: \"input_data\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_parameters\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"tree_biases\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"path_probability\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"path\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"routing_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"data_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"parameter_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"bias_gradient\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"tree_depth\"\r\n# type: \"int\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"UnpackPath\"\r\n# input_arg {\r\n# name: \"path\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"path_values\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"unpacked_path\"\r\n# type: DT_FLOAT\r\n# }\r\n# }\r\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\220\\001\\n\\023HardRoutingFunction\\022\\016\\n\\ninput_data\\030\\001\\022\\023\\n\\017tree_parameters\\030\\001\\022\\017\\n\\013tree_biases\\030\\001\\032\\024\\n\\020path_probability\\030\\001\\032\\010\\n\\004path\\030\\003\\\"\\020\\n\\tmax_nodes\\022\\003int\\\"\\021\\n\\ntree_depth\\022\\003int\\n\\270\\001\\n\\020KFeatureGradient\\022\\016\\n\\ninput_data\\030\\001\\022\\023\\n\\017tree_parameters\\030\\001\\022\\017\\n\\013tree_biases\\030\\001\\022\\n\\n\\006routes\\030\\001\\032\\024\\n\\020routing_gradient\\030\\001\\032\\021\\n\\rdata_gradient\\030\\001\\032\\023\\n\\017weight_gradient\\030\\001\\\"\\020\\n\\tlayer_num\\022\\003int\\\"\\022\\n\\013random_seed\\022\\003int\\n\\270\\001\\n\\027KFeatureRoutingFunction\\022\\016\\n\\ninput_data\\030\\001\\022\\023\\n\\017tree_parameters\\030\\001\\022\\017\\n\\013tree_biases\\030\\001\\032\\021\\n\\rprobabilities\\030\\001\\\"\\020\\n\\tlayer_num\\022\\003int\\\"\\020\\n\\tmax_nodes\\022\\003int\\\"\\034\\n\\025num_features_per_node\\022\\003int\\\"\\022\\n\\013random_seed\\022\\003int\\nl\\n\\017RoutingFunction\\022\\016\\n\\ninput_data\\030\\001\\022\\023\\n\\017tree_parameters\\030\\001\\022\\017\\n\\013tree_biases\\030\\001\\032\\021\\n\\rprobabilities\\030\\001\\\"\\020\\n\\tmax_nodes\\022\\003int\\n{\\n\\017RoutingGradient\\022\\016\\n\\ninput_data\\030\\001\\022\\023\\n\\017tree_parameters\\030\\001\\022\\017\\n\\013tree_biases\\030\\001\\022\\n\\n\\006routes\\030\\001\\032\\024\\n\\020routing_gradient\\030\\001\\\"\\020\\n\\tmax_nodes\\022\\003int\\n\\234\\001\\n\\035StochasticHardRoutingFunction\\022\\016\\n\\ninput_data\\030\\001\\022\\023\\n\\017tree_parameters\\030\\001\\022\\017\\n\\013tree_biases\\030\\001\\032\\024\\n\\020path_probability\\030\\001\\032\\010\\n\\004path\\030\\003\\\"\\021\\n\\ntree_depth\\022\\003int\\\"\\022\\n\\013random_seed\\022\\003int\\n\\334\\001\\n\\035StochasticHardRoutingGradient\\022\\016\\n\\ninput_data\\030\\001\\022\\023\\n\\017tree_parameters\\030\\001\\022\\017\\n\\013tree_biases\\030\\001\\022\\024\\n\\020path_probability\\030\\001\\022\\010\\n\\004path\\030\\003\\032\\024\\n\\020routing_gradient\\030\\001\\032\\021\\n\\rdata_gradient\\030\\001\\032\\026\\n\\022parameter_gradient\\030\\001\\032\\021\\n\\rbias_gradient\\030\\001\\\"\\021\\n\\ntree_depth\\022\\003int\\n:\\n\\nUnpackPath\\022\\010\\n\\004path\\030\\003\\022\\017\\n\\013path_values\\030\\001\\032\\021\\n\\runpacked_path\\030\\001\")\r\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Aliases for logit_fn builders used by canned (core) tf.Estimator's.\r\n\r\nA logit_fn is an abstraction within model_fn that factors out the logit\r\nconstruction logic. Its output can be fed into Heads or otherwise composed. It\r\nshould follow the following signature:\r\n\r\nArgs:\r\n`features`: This is the first item returned from the `input_fn` passed to\r\n `train`, `evaluate`, and `predict`. This should be a single\r\n `Tensor` or `dict` of same, and is the only required argument.\r\n`mode`: Optional. Specifies if this training, evaluation or prediction. See\r\n `ModeKeys`.\r\n`params`: Optional `dict` of hyperparameters. Will receive what is passed to\r\n Estimator in `params` parameter. This allows configuration of\r\n Estimators from hyperparameter tuning.\r\n`config`: Optional configuration object. Will receive what is passed to\r\n Estimator in `config` parameter, or the default `config`. Allows\r\n updating things in your model_fn based on configuration such as\r\n `num_ps_replicas`, or `model_dir`.\r\n\r\nReturns:\r\n A Tensor representing the logits.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport six\r\n\r\nfrom tensorflow.python.estimator.canned import dnn as dnn_core\r\nfrom tensorflow.python.estimator.canned import linear as linear_core\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.util import function_utils\r\n\r\n# pylint: disable=protected-access\r\ndnn_logit_fn_builder = dnn_core._dnn_logit_fn_builder\r\nlinear_logit_fn_builder = linear_core._linear_logit_fn_builder\r\n# pylint: enable=protected-access\r\n\r\n\r\ndef call_logit_fn(logit_fn, features, mode, params, config):\r\n \"\"\"Calls logit_fn.\r\n\r\n A utility function that calls the provided logit_fn with the relevant subset\r\n of provided arguments. Similar to tf.estimator._call_model_fn().\r\n\r\n Args:\r\n logit_fn: A logit_fn as defined above.\r\n features: The features dict.\r\n mode: TRAIN / EVAL / PREDICT ModeKeys.\r\n params: The hyperparameter dict.\r\n config: The configuration object.\r\n\r\n Returns:\r\n A logit Tensor, the output of logit_fn.\r\n\r\n Raises:\r\n ValueError: if logit_fn does not return a Tensor or a dictionary mapping\r\n strings to Tensors.\r\n \"\"\"\r\n logit_fn_args = function_utils.fn_args(logit_fn)\r\n kwargs = {}\r\n if 'mode' in logit_fn_args:\r\n kwargs['mode'] = mode\r\n if 'params' in logit_fn_args:\r\n kwargs['params'] = params\r\n if 'config' in logit_fn_args:\r\n kwargs['config'] = config\r\n logit_fn_results = logit_fn(features=features, **kwargs)\r\n\r\n result_is_valid_dictionary = (\r\n isinstance(logit_fn_results, dict) and\r\n all([(isinstance(k, six.string_types) and isinstance(v, ops.Tensor))\r\n for k, v in six.iteritems(logit_fn_results)]))\r\n result_is_tensor = isinstance(logit_fn_results, ops.Tensor)\r\n\r\n if not (result_is_valid_dictionary or result_is_tensor):\r\n raise ValueError('logit_fn should return a Tensor or a dictionary mapping '\r\n 'strings to Tensors. logit_fn returned: %s' %\r\n logit_fn_results)\r\n\r\n return logit_fn_results\r\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"TensorFlow collective Ops.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import device\r\nfrom tensorflow.python.ops import gen_collective_ops\r\n\r\n\r\ndef all_reduce(t, group_size, group_key, instance_key, merge_op, final_op,\r\n subdiv_offsets=(0,)):\r\n \"\"\"Reduces tensors collectively, across devices.\r\n\r\n Args:\r\n t: the tensor to be reduced.\r\n group_size: the total number of tensors to be collectively reduced.\r\n Each must reside on a different device.\r\n group_key: an integer identifying the group of devices.\r\n instance_key: an integer identifying the participating group of Ops.\r\n merge_op: string naming the binary Op to be applied to compute each\r\n partial reduction.\r\n final_op: string naming the unary Op to be applied to each fully\r\n reduced value. Can be 'Id' for no operation.\r\n subdiv_offsets: a list of integer offsets into the tensor at which each\r\n independent subdivision should begin. Use [0] if no subdivision should\r\n be done.\r\n\r\n Returns:\r\n An Op implementing the distributed reduction.\r\n\r\n Raises:\r\n ValueError: if any of the input parameter constraints are not met.\r\n \"\"\"\r\n if not device.canonical_name(t.device):\r\n raise ValueError('Device assignment required for collective ops')\r\n if group_size <= 1:\r\n raise ValueError('Parameter group_size to add_reduce must be at least 2.')\r\n return gen_collective_ops.collective_reduce(t,\r\n group_size=group_size,\r\n group_key=group_key,\r\n instance_key=instance_key,\r\n merge_op=merge_op,\r\n final_op=final_op,\r\n subdiv_offsets=subdiv_offsets)\r\n\r\n\r\ndef broadcast_send(t, shape, dtype, group_size, group_key, instance_key):\r\n \"\"\"Broadcasts one tensor to a group of others, across devices.\r\n\r\n Args:\r\n t: the tensor to be sent.\r\n shape: the shape of the tensor being sent, which must agree with t.\r\n dtype: the type of the tensor being sent, which must agree with t.\r\n group_size: one plus the number of receiving tensors, i.e. the total\r\n number of devices participating. Each tensor must reside on a\r\n different device.\r\n group_key: an integer identifying the group of devices.\r\n instance_key: an integer identifying the participating group of Ops.\r\n\r\n Returns:\r\n An Op implementing the distributed broadcast send.\r\n\r\n Raises:\r\n ValueError: if any of the input parameter constraints are not met.\r\n\r\n Note that the shape and dtype arguments appear redundant since they\r\n should be obtainable from t. The are two reasons for including\r\n them. First, the shape and type of tensors passed via broadcast must\r\n be known ahead of time in their most specific form so that the receive\r\n side can allocate memory for the operation and shape/type inference can\r\n carry forward from there. Including the same declarations on the\r\n send side clarifies a commitment already made. Secondly, having nearly\r\n identical use syntax for send and receive sides may simplify tool-driven\r\n generation of broadcast.\r\n \"\"\"\r\n if not device.canonical_name(t.device):\r\n raise ValueError('Device assignment required for collective ops')\r\n if group_size <= 1:\r\n raise ValueError(\r\n 'Parameter group_size to broadcast_send must be at least 2.')\r\n if t.shape != shape:\r\n raise ValueError(\r\n 'Shape of broadcast_send tensor not equal to delcared shape')\r\n if t.dtype != dtype:\r\n raise ValueError(\r\n 'Type of broadcast_send tensor not equal to declared type')\r\n return gen_collective_ops.collective_bcast_send(t,\r\n shape=shape,\r\n group_size=group_size,\r\n group_key=group_key,\r\n instance_key=instance_key)\r\n\r\n\r\ndef broadcast_recv(shape, dtype, group_size, group_key, instance_key):\r\n \"\"\"Receives a broadcasts tensor, across devices.\r\n\r\n Args:\r\n shape: Shape of the tensor to be received.\r\n dtype: Type of the tensor to be received.\r\n group_size: one plus the number of receiving tensors, i.e. the total\r\n number of devices participating. Each tensor must reside on a\r\n different device.\r\n group_key: an integer identifying the group of devices.\r\n instance_key: an integer identifying the participating group of Ops.\r\n\r\n Returns:\r\n An Op implementing the broadcast receive.\r\n\r\n Raises:\r\n ValueError: if any of the input parameter constraints are not met.\r\n \"\"\"\r\n if group_size <= 1:\r\n raise ValueError(\r\n 'Parameter group_size to broadcast_send must be at least 2.')\r\n return gen_collective_ops.collective_bcast_recv(shape=shape,\r\n T=dtype,\r\n group_size=group_size,\r\n group_key=group_key,\r\n instance_key=instance_key)\r\n",
"\"\"\"Utilities for saving/loading Checkpointable objects.\"\"\"\r\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport abc\r\nimport collections\r\nimport os\r\nimport weakref\r\n\r\nfrom tensorflow.core.protobuf import checkpointable_object_graph_pb2\r\nfrom tensorflow.python import pywrap_tensorflow\r\nfrom tensorflow.python.client import session as session_lib\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import errors_impl\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_io_ops as io_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.ops import variables\r\nfrom tensorflow.python.training import checkpoint_management\r\nfrom tensorflow.python.training import optimizer as optimizer_lib\r\nfrom tensorflow.python.training import saveable_object as saveable_object_lib\r\nfrom tensorflow.python.training import saver as saver_lib\r\nfrom tensorflow.python.training.checkpointable import base\r\nfrom tensorflow.python.training.checkpointable import data_structures\r\nfrom tensorflow.python.training.checkpointable import tracking\r\nfrom tensorflow.python.util import deprecation\r\nfrom tensorflow.python.util import tf_contextlib\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\n_ESCAPE_CHAR = \".\" # For avoiding conflicts with user-specified names.\r\n\r\n# Keyword for identifying that the next bit of a checkpoint variable name is a\r\n# slot name. Checkpoint names for slot variables look like:\r\n#\r\n# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>\r\n#\r\n# Where <path to variable> is a full path from the checkpoint root to the\r\n# variable being slotted for.\r\n_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + \"OPTIMIZER_SLOT\"\r\n# Keyword for separating the path to an object from the name of an\r\n# attribute in checkpoint names. Used like:\r\n# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>\r\n_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + \"ATTRIBUTES\"\r\n\r\n\r\nclass _CheckpointRestoreCoordinator(object):\r\n \"\"\"Holds the status of an object-based checkpoint load.\"\"\"\r\n\r\n def __init__(self, object_graph_proto, save_path, save_path_tensor,\r\n restore_op_cache, saveable_object_cache):\r\n \"\"\"Specify the checkpoint being loaded.\r\n\r\n Args:\r\n object_graph_proto: The CheckpointableObjectGraph protocol buffer\r\n associated with this checkpoint.\r\n save_path: A string, the path to the checkpoint, as returned by\r\n `tf.train.latest_checkpoint`.\r\n save_path_tensor: A string `Tensor` which contains or will be fed the save\r\n path.\r\n restore_op_cache: A dictionary shared between\r\n `_CheckpointRestoreCoordinator`s for the same Python objects, used to\r\n look up restore ops by name to avoid re-creating them across multiple\r\n `restore()` calls.\r\n saveable_object_cache: A mapping of checkpointable objects -> attribute\r\n names -> list(`SaveableObject`s), used when `SaveableObjects` must be\r\n referenced every restore (e.g. for Python state); otherwise they would\r\n create their own ops every restore.\r\n \"\"\"\r\n self.builder = saver_lib.BulkSaverBuilder()\r\n self.object_graph_proto = object_graph_proto\r\n self.restore_uid = ops.uid()\r\n # Maps from objects to lists of attributes which were in the checkpoint but\r\n # not loaded into any object, for error checking.\r\n self.unused_attributes = weakref.WeakKeyDictionary()\r\n # Dictionary mapping from an id in the protocol buffer flat array to\r\n # Checkpointable Python objects. This mapping may be deferred if a\r\n # checkpoint is restored before all dependencies have been tracked. Uses\r\n # weak references so that partial restorations don't create reference cycles\r\n # (as objects with deferred dependencies will generally have references to\r\n # this object).\r\n self.object_by_proto_id = weakref.WeakValueDictionary()\r\n # A set of all Python objects we've seen as dependencies, even if we didn't\r\n # use them (for example because of inconsistent references when\r\n # loading). Used to make status assertions fail when loading checkpoints\r\n # that don't quite match.\r\n self.all_python_objects = _ObjectIdentityWeakSet()\r\n self.save_path_tensor = save_path_tensor\r\n self.save_path_string = save_path\r\n self.dtype_map = pywrap_tensorflow.NewCheckpointReader(\r\n save_path).get_variable_to_dtype_map()\r\n # A NewCheckpointReader for the most recent checkpoint, for streaming Python\r\n # state restoration.\r\n # When graph building, contains a list of ops to run to restore objects from\r\n # this checkpoint.\r\n self.restore_ops = []\r\n self.restore_ops_by_name = restore_op_cache\r\n self.saveable_object_cache = saveable_object_cache\r\n self.new_restore_ops_callback = None\r\n # A mapping from optimizer proto ids to lists of slot variables to be\r\n # restored when the optimizer is tracked. Only includes slot variables whose\r\n # regular variables have already been created, and only for optimizer\r\n # objects which have not yet been created/tracked.\r\n self.deferred_slot_restorations = {}\r\n # A mapping from variable proto ids to lists of slot variables to be\r\n # restored when the variable is created/tracked. These get shifted over to\r\n # deferred_slot_restorations if the optimizer hasn't been created when that\r\n # happens.\r\n self.slot_restorations = {}\r\n for node_index, node in enumerate(self.object_graph_proto.nodes):\r\n for slot_reference in node.slot_variables:\r\n # `node` refers to an `Optimizer`, since only these have slot variables.\r\n self.slot_restorations.setdefault(\r\n slot_reference.original_variable_node_id, []).append(\r\n base._SlotVariableRestoration( # pylint: disable=protected-access\r\n optimizer_id=node_index,\r\n slot_variable_id=slot_reference.slot_variable_node_id,\r\n slot_name=slot_reference.slot_name))\r\n\r\n def new_restore_ops(self, new_ops):\r\n self.restore_ops.extend(new_ops)\r\n if self.new_restore_ops_callback:\r\n self.new_restore_ops_callback(new_ops) # pylint: disable=not-callable\r\n\r\n\r\nclass _NameBasedRestoreCoordinator(object):\r\n \"\"\"Keeps the status of a name-based checkpoint restore.\"\"\"\r\n\r\n def __init__(self, save_path, dtype_map=None):\r\n self.save_path = save_path\r\n self.dtype_map = dtype_map\r\n self.unused_attributes = weakref.WeakKeyDictionary()\r\n self.restore_uid = ops.uid()\r\n\r\n def globally_named_object_attributes(self, checkpointable):\r\n \"\"\"Create globally named SaveableObjects from attributes.\r\n\r\n If an object's attribute has no global name specified (default construction\r\n for the SaveableObject factory), records the failure in\r\n `self.unused_attributes` (which can then be used to make status assertions\r\n fail; see `NameBasedSaverStatus`).\r\n\r\n Args:\r\n checkpointable: An object to save.\r\n\r\n Yields:\r\n SaveableObjects for `checkpointable`'s attributes.\r\n \"\"\"\r\n for attribute_name, saveable_factory in (\r\n checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access\r\n if callable(saveable_factory):\r\n try:\r\n # This saveable object factory does not have a default name= argument,\r\n # which means there's no way to save/restore it using a name-based\r\n # checkpoint. Ignore the error now and make sure assert_consumed()\r\n # fails.\r\n saveable = saveable_factory()\r\n except TypeError:\r\n self.unused_attributes.setdefault(checkpointable, []).append(\r\n attribute_name)\r\n continue\r\n else:\r\n saveable = saveable_factory\r\n names_to_saveables = saver_lib.BaseSaverBuilder.OpListToDict(\r\n [saveable],\r\n convert_variable_to_tensor=False)\r\n for name, op in names_to_saveables.items():\r\n for saveable_object in saver_lib.BaseSaverBuilder.SaveableObjectsForOp(\r\n op=op, name=name):\r\n yield saveable_object\r\n\r\n def eager_restore(self, checkpointable):\r\n \"\"\"Runs restore ops for `checkpointable`'s attributes.\"\"\"\r\n # When graph building, we don't add any restore ops to the graph until\r\n # run_restore_ops/initialize_or_restore on the status object for name-based\r\n # checkpoints.\r\n assert context.executing_eagerly()\r\n for saveable in self.globally_named_object_attributes(\r\n checkpointable):\r\n restored_tensors = []\r\n tensor_missing = False\r\n for spec in saveable.specs:\r\n if spec.name in self.dtype_map:\r\n with ops.device(\"cpu:0\"):\r\n restored, = io_ops.restore_v2(\r\n prefix=self.save_path,\r\n tensor_names=[spec.name],\r\n shape_and_slices=[\"\"],\r\n dtypes=[self.dtype_map[spec.name]],\r\n name=\"%s_checkpoint_read\" % (spec.name,))\r\n restored_tensors.append(array_ops.identity(restored))\r\n else:\r\n tensor_missing = True\r\n\r\n if not tensor_missing:\r\n # Ignores values missing from the checkpoint, as with object-based\r\n # restore. Status assertions can be used to check exact matches,\r\n # although it's unlikely to ever happen for name-based checkpoints.\r\n saveable.restore(restored_tensors=restored_tensors,\r\n restored_shapes=None)\r\n\r\n\r\n# TODO(allenl): If this ends up in a public API, consider adding LINT.IfChange\r\n# or consolidating the implementation with get_variable.\r\ndef _default_getter(name, shape, dtype, initializer=None,\r\n partition_info=None, **kwargs):\r\n \"\"\"A pared-down version of get_variable which does not reuse variables.\"\"\"\r\n dtype = dtypes.as_dtype(dtype)\r\n shape_object = tensor_shape.as_shape(shape)\r\n with ops.init_scope():\r\n if initializer is None:\r\n initializer, initializing_from_value = (\r\n variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access\r\n name=name, shape=shape_object, dtype=dtype))\r\n else:\r\n initializing_from_value = not callable(initializer)\r\n # Same logic as get_variable\r\n variable_dtype = dtype.base_dtype\r\n if initializing_from_value:\r\n if shape is not None:\r\n raise ValueError(\"If initializer is a constant, do not specify shape.\")\r\n initial_value = initializer\r\n else:\r\n # Instantiate initializer if provided initializer is a type object.\r\n if isinstance(initializer, type(init_ops.Initializer)):\r\n initializer = initializer(dtype=dtype)\r\n def initial_value():\r\n return initializer(\r\n shape_object.as_list(), dtype=dtype, partition_info=partition_info)\r\n return variables.VariableV1(\r\n initial_value=initial_value,\r\n name=name,\r\n dtype=variable_dtype,\r\n use_resource=True,\r\n **kwargs\r\n )\r\n\r\n\r\ndef add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,\r\n initializer=None):\r\n \"\"\"Add a variable to a Checkpointable with no scope influence.\"\"\"\r\n return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access\r\n name=name, shape=shape, dtype=dtype,\r\n initializer=initializer, getter=_default_getter)\r\n\r\n\r\ndef object_metadata(save_path):\r\n \"\"\"Retrieves information about the objects in a checkpoint.\r\n\r\n Example usage:\r\n\r\n ```python\r\n object_graph = tf.contrib.checkpoint.object_metadata(\r\n tf.train.latest_checkpoint(checkpoint_directory))\r\n ckpt_variable_names = set()\r\n for node in object_graph.nodes:\r\n for attribute in node.attributes:\r\n ckpt_variable_names.add(attribute.full_name)\r\n ```\r\n\r\n Args:\r\n save_path: The path to the checkpoint, as returned by `save` or\r\n `tf.train.latest_checkpoint`.\r\n Returns:\r\n A parsed `tf.contrib.checkpoint.CheckpointableObjectGraph` protocol buffer.\r\n Raises:\r\n ValueError: If an object graph was not found in the checkpoint.\r\n \"\"\"\r\n reader = pywrap_tensorflow.NewCheckpointReader(save_path)\r\n try:\r\n object_graph_string = reader.get_tensor(\r\n base.OBJECT_GRAPH_PROTO_KEY)\r\n except errors_impl.NotFoundError:\r\n raise ValueError(\r\n ('The specified checkpoint \"%s\" does not appear to be object-based (it '\r\n 'is missing the key \"%s\"). Likely it was created with a name-based '\r\n 'saver and does not contain an object dependency graph.') % (\r\n save_path, base.OBJECT_GRAPH_PROTO_KEY))\r\n object_graph_proto = (\r\n checkpointable_object_graph_pb2.CheckpointableObjectGraph())\r\n object_graph_proto.ParseFromString(object_graph_string)\r\n return object_graph_proto\r\n\r\n\r\nclass _ObjectIdentityWrapper(object):\r\n \"\"\"Wraps an object, mapping __eq__ on wrapper to \"is\" on wrapped.\r\n\r\n Since __eq__ is based on object identity, it's safe to also define __hash__\r\n based on object ids. This lets us add unhashable types like checkpointable\r\n _ListWrapper objects to object-identity collections.\r\n \"\"\"\r\n\r\n def __init__(self, wrapped):\r\n self._wrapped = wrapped\r\n\r\n @property\r\n def unwrapped(self):\r\n return self._wrapped\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, _ObjectIdentityWrapper):\r\n return self._wrapped is other._wrapped # pylint: disable=protected-access\r\n return self._wrapped is other\r\n\r\n def __hash__(self):\r\n # Wrapper id() is also fine for weakrefs. In fact, we rely on\r\n # id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is\r\n # weakref.ref(a) in _WeakObjectIdentityWrapper.\r\n return id(self._wrapped)\r\n\r\n\r\nclass _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):\r\n\r\n def __init__(self, wrapped):\r\n super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped))\r\n\r\n @property\r\n def unwrapped(self):\r\n return self._wrapped()\r\n\r\n\r\nclass _ObjectIdentityDictionary(collections.MutableMapping):\r\n \"\"\"A mutable mapping data structure which compares using \"is\".\r\n\r\n This is necessary because we have checkpointable objects (_ListWrapper) which\r\n have behavior identical to built-in Python lists (including being unhashable\r\n and comparing based on the equality of their contents by default).\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self._storage = {}\r\n\r\n def _wrap_key(self, key):\r\n return _ObjectIdentityWrapper(key)\r\n\r\n def __getitem__(self, key):\r\n return self._storage[self._wrap_key(key)]\r\n\r\n def __setitem__(self, key, value):\r\n self._storage[self._wrap_key(key)] = value\r\n\r\n def __delitem__(self, key):\r\n del self._storage[self._wrap_key(key)]\r\n\r\n def __len__(self):\r\n return len(self._storage)\r\n\r\n def __iter__(self):\r\n for key in self._storage:\r\n yield key.unwrapped\r\n\r\n\r\nclass _ObjectIdentityWeakKeyDictionary(_ObjectIdentityDictionary):\r\n \"\"\"Like weakref.WeakKeyDictionary, but compares objects with \"is\".\"\"\"\r\n\r\n def _wrap_key(self, key):\r\n return _WeakObjectIdentityWrapper(key)\r\n\r\n def __len__(self):\r\n # Iterate, discarding old weak refs\r\n return len(list(self._storage))\r\n\r\n def __iter__(self):\r\n keys = self._storage.keys()\r\n for key in keys:\r\n unwrapped = key.unwrapped\r\n if unwrapped is None:\r\n del self[key]\r\n else:\r\n yield unwrapped\r\n\r\n\r\nclass _ObjectIdentitySet(collections.MutableSet):\r\n \"\"\"Like the built-in set, but compares objects with \"is\".\"\"\"\r\n\r\n def __init__(self, *args):\r\n self._storage = set([self._wrap_key(obj) for obj in list(*args)])\r\n\r\n def _wrap_key(self, key):\r\n return _ObjectIdentityWrapper(key)\r\n\r\n def __contains__(self, key):\r\n return self._wrap_key(key) in self._storage\r\n\r\n def discard(self, key):\r\n self._storage.discard(self._wrap_key(key))\r\n\r\n def add(self, key):\r\n self._storage.add(self._wrap_key(key))\r\n\r\n def __len__(self):\r\n return len(self._storage)\r\n\r\n def __iter__(self):\r\n keys = list(self._storage)\r\n for key in keys:\r\n yield key.unwrapped\r\n\r\n\r\nclass _ObjectIdentityWeakSet(_ObjectIdentitySet):\r\n \"\"\"Like weakref.WeakSet, but compares objects with \"is\".\"\"\"\r\n\r\n def _wrap_key(self, key):\r\n return _WeakObjectIdentityWrapper(key)\r\n\r\n def __len__(self):\r\n # Iterate, discarding old weak refs\r\n return len([_ for _ in self])\r\n\r\n def __iter__(self):\r\n keys = list(self._storage)\r\n for key in keys:\r\n unwrapped = key.unwrapped\r\n if unwrapped is None:\r\n self.discard(key)\r\n else:\r\n yield unwrapped\r\n\r\n\r\ndef _breadth_first_checkpointable_traversal(root_checkpointable):\r\n \"\"\"Find shortest paths to all variables owned by dependencies of root.\"\"\"\r\n bfs_sorted = []\r\n to_visit = collections.deque([root_checkpointable])\r\n path_to_root = _ObjectIdentityDictionary()\r\n path_to_root[root_checkpointable] = ()\r\n while to_visit:\r\n current_checkpointable = to_visit.popleft()\r\n if isinstance(current_checkpointable, tracking.NotCheckpointable):\r\n raise NotImplementedError(\r\n (\"The object %s does not support object-based saving. File a feature \"\r\n \"request if this limitation bothers you. In the meantime, you can \"\r\n \"remove the dependency on this object and save everything else.\")\r\n % (current_checkpointable,))\r\n current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access\r\n bfs_sorted.append(current_checkpointable)\r\n for child_checkpointable in (\r\n current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access\r\n if child_checkpointable.ref not in path_to_root:\r\n path_to_root[child_checkpointable.ref] = (\r\n path_to_root[current_checkpointable] + (child_checkpointable,))\r\n to_visit.append(child_checkpointable.ref)\r\n return bfs_sorted, path_to_root\r\n\r\n\r\ndef _escape_local_name(name):\r\n # We need to support slashes in local names for compatibility, since this\r\n # naming scheme is being patched in to things like Layer.add_variable where\r\n # slashes were previously accepted. We also want to use slashes to indicate\r\n # edges traversed to reach the variable, so we escape forward slashes in\r\n # names.\r\n return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)\r\n .replace(r\"/\", _ESCAPE_CHAR + \"S\"))\r\n\r\n\r\ndef _object_prefix_from_path(path_to_root):\r\n return \"/\".join(\r\n (_escape_local_name(checkpointable.name)\r\n for checkpointable in path_to_root))\r\n\r\n\r\ndef _slot_variable_naming_for_optimizer(optimizer_path):\r\n \"\"\"Make a function for naming slot variables in an optimizer.\"\"\"\r\n # Name slot variables:\r\n #\r\n # <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>\r\n #\r\n # where <variable name> is exactly the checkpoint name used for the original\r\n # variable, including the path from the checkpoint root and the local name in\r\n # the object which owns it. Note that we only save slot variables if the\r\n # variable it's slotting for is also being saved.\r\n\r\n optimizer_identifier = \"/%s/%s/\" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)\r\n\r\n def _name_slot_variable(variable_path, slot_name):\r\n \"\"\"With an optimizer specified, name a slot variable.\"\"\"\r\n return (variable_path\r\n + optimizer_identifier\r\n + _escape_local_name(slot_name))\r\n\r\n return _name_slot_variable\r\n\r\n\r\ndef _serialize_slot_variables(checkpointable_objects, node_ids, object_names):\r\n \"\"\"Gather and name slot variables.\"\"\"\r\n non_slot_objects = list(checkpointable_objects)\r\n slot_variables = _ObjectIdentityDictionary()\r\n for checkpointable in non_slot_objects:\r\n if isinstance(checkpointable, optimizer_lib.Optimizer):\r\n naming_scheme = _slot_variable_naming_for_optimizer(\r\n optimizer_path=object_names[checkpointable])\r\n slot_names = checkpointable.get_slot_names()\r\n for slot_name in slot_names:\r\n for original_variable_node_id, original_variable in enumerate(\r\n non_slot_objects):\r\n try:\r\n slot_variable = checkpointable.get_slot(\r\n original_variable, slot_name)\r\n except AttributeError:\r\n slot_variable = None\r\n if slot_variable is None:\r\n continue\r\n slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access\r\n if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access\r\n # TODO(allenl): Gather dependencies of slot variables.\r\n raise NotImplementedError(\r\n \"Currently only variables with no dependencies can be saved as \"\r\n \"slot variables. File a feature request if this limitation \"\r\n \"bothers you.\")\r\n if slot_variable in node_ids:\r\n raise NotImplementedError(\r\n \"A slot variable was re-used as a dependency of a \"\r\n \"Checkpointable object. This is not currently allowed. File a \"\r\n \"feature request if this limitation bothers you.\")\r\n checkpoint_name = naming_scheme(\r\n variable_path=object_names[original_variable],\r\n slot_name=slot_name)\r\n object_names[slot_variable] = checkpoint_name\r\n slot_variable_node_id = len(checkpointable_objects)\r\n node_ids[slot_variable] = slot_variable_node_id\r\n checkpointable_objects.append(slot_variable)\r\n slot_variable_proto = (\r\n checkpointable_object_graph_pb2.CheckpointableObjectGraph\r\n .CheckpointableObject.SlotVariableReference(\r\n slot_name=slot_name,\r\n original_variable_node_id=original_variable_node_id,\r\n slot_variable_node_id=slot_variable_node_id))\r\n slot_variables.setdefault(checkpointable, []).append(\r\n slot_variable_proto)\r\n return slot_variables\r\n\r\n\r\ndef _serialize_checkpointables(\r\n checkpointable_objects, node_ids, object_names, slot_variables,\r\n saveables_cache):\r\n \"\"\"Name non-slot `Checkpointable`s and add them to `object_graph_proto`.\"\"\"\r\n object_graph_proto = (\r\n checkpointable_object_graph_pb2.CheckpointableObjectGraph())\r\n named_saveables = []\r\n if saveables_cache is None:\r\n # No SaveableObject caching. Either we're executing eagerly, or building a\r\n # static save which is specialized to the current Python state.\r\n feed_additions = None\r\n else:\r\n # If we are caching SaveableObjects, we need to build up a feed_dict with\r\n # functions computing volatile Python state to be saved with the checkpoint.\r\n feed_additions = {}\r\n for checkpoint_id, checkpointable in enumerate(checkpointable_objects):\r\n assert node_ids[checkpointable] == checkpoint_id\r\n object_proto = object_graph_proto.nodes.add()\r\n object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))\r\n object_name = object_names[checkpointable]\r\n if saveables_cache is not None:\r\n cached_attributes = saveables_cache.setdefault(checkpointable, {})\r\n else:\r\n cached_attributes = None\r\n for name, saveable_factory in (\r\n checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access\r\n attribute = object_proto.attributes.add()\r\n attribute.name = name\r\n attribute.checkpoint_key = \"%s/%s/%s\" % (\r\n object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))\r\n if cached_attributes is None:\r\n saveables = None\r\n else:\r\n saveables = cached_attributes.get(name, None)\r\n if saveables is not None:\r\n for saveable in saveables:\r\n if attribute.checkpoint_key not in saveable.name:\r\n # The checkpoint key for this SaveableObject is different. We need\r\n # to re-create it.\r\n saveables = None\r\n del cached_attributes[name]\r\n break\r\n if saveables is None:\r\n if callable(saveable_factory):\r\n maybe_saveable = saveable_factory(name=attribute.checkpoint_key)\r\n else:\r\n maybe_saveable = saveable_factory\r\n if isinstance(maybe_saveable, saveable_object_lib.SaveableObject):\r\n saveables = (maybe_saveable,)\r\n else:\r\n # Figure out the name-based Saver's name for this variable. If it's\r\n # already a SaveableObject we'd just get the checkpoint key back, so\r\n # we leave full_name blank.\r\n saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(\r\n [maybe_saveable], convert_variable_to_tensor=False)\r\n full_name, = saver_dict.keys()\r\n saveables = tuple(saver_lib.BaseSaverBuilder.SaveableObjectsForOp(\r\n op=maybe_saveable, name=attribute.checkpoint_key))\r\n for saveable in saveables:\r\n saveable.full_name = full_name\r\n for saveable in saveables:\r\n if attribute.checkpoint_key not in saveable.name:\r\n raise AssertionError(\r\n (\"The object %s produced a SaveableObject with name '%s' for \"\r\n \"attribute '%s'. Expected a name containing '%s'.\")\r\n % (checkpointable, name, saveable.name,\r\n attribute.checkpoint_key))\r\n if cached_attributes is not None:\r\n cached_attributes[name] = saveables\r\n\r\n for saveable in saveables:\r\n if hasattr(saveable, \"full_name\"):\r\n attribute.full_name = saveable.full_name\r\n if isinstance(saveable, base.PythonStateSaveable):\r\n if feed_additions is None:\r\n assert saveables_cache is None\r\n # If we're not caching saveables, then we're either executing\r\n # eagerly or building a static save/restore (e.g. for a\r\n # SavedModel). In either case, we should embed the current Python\r\n # state in the graph rather than relying on a feed dict.\r\n saveable = saveable.freeze()\r\n else:\r\n saveable_feed_dict = saveable.feed_dict_additions()\r\n for new_feed_key in saveable_feed_dict.keys():\r\n if new_feed_key in feed_additions:\r\n raise AssertionError(\r\n (\"The object %s tried to feed a value for the Tensor %s \"\r\n \"when saving, but another object is already feeding a \"\r\n \"value.\")\r\n % (checkpointable, new_feed_key))\r\n feed_additions.update(saveable_feed_dict)\r\n named_saveables.append(saveable)\r\n\r\n for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access\r\n child_proto = object_proto.children.add()\r\n child_proto.node_id = node_ids[child.ref]\r\n child_proto.local_name = child.name\r\n\r\n return named_saveables, object_graph_proto, feed_additions\r\n\r\n\r\ndef _serialize_object_graph(root_checkpointable, saveables_cache):\r\n \"\"\"Determine checkpoint keys for variables and build a serialized graph.\r\n\r\n Non-slot variables are keyed based on a shortest path from the root saveable\r\n to the object which owns the variable (i.e. the one which called\r\n `Checkpointable._add_variable` to create it).\r\n\r\n Slot variables are keyed based on a shortest path to the variable being\r\n slotted for, a shortest path to their optimizer, and the slot name.\r\n\r\n Args:\r\n root_checkpointable: A `Checkpointable` object whose variables (including\r\n the variables of dependencies, recursively) should be saved.\r\n saveables_cache: A dictionary mapping `Checkpointable` objects -> attribute\r\n names -> SaveableObjects, used to avoid re-creating SaveableObjects when\r\n graph building.\r\n\r\n Returns:\r\n A tuple of (named_variables, object_graph_proto, feed_additions):\r\n named_variables: A dictionary mapping names to variable objects.\r\n object_graph_proto: A CheckpointableObjectGraph protocol buffer containing\r\n the serialized object graph and variable references.\r\n feed_additions: A dictionary mapping from Tensors to values which should\r\n be fed when saving.\r\n\r\n Raises:\r\n ValueError: If there are invalid characters in an optimizer's slot names.\r\n \"\"\"\r\n checkpointable_objects, path_to_root = (\r\n _breadth_first_checkpointable_traversal(root_checkpointable))\r\n object_names = _ObjectIdentityDictionary()\r\n for obj, path in path_to_root.items():\r\n object_names[obj] = _object_prefix_from_path(path)\r\n node_ids = _ObjectIdentityDictionary()\r\n for node_id, node in enumerate(checkpointable_objects):\r\n node_ids[node] = node_id\r\n slot_variables = _serialize_slot_variables(\r\n checkpointable_objects=checkpointable_objects,\r\n node_ids=node_ids,\r\n object_names=object_names)\r\n return _serialize_checkpointables(\r\n checkpointable_objects=checkpointable_objects,\r\n node_ids=node_ids,\r\n object_names=object_names,\r\n slot_variables=slot_variables,\r\n saveables_cache=saveables_cache)\r\n\r\n\r\ndef named_saveables(root_checkpointable):\r\n \"\"\"Gather list of all SaveableObjects in the Checkpointable object.\"\"\"\r\n return _serialize_object_graph(root_checkpointable, None)[0]\r\n\r\n\r\ndef list_objects(root_checkpointable):\r\n \"\"\"Traverse the object graph and list all accessible objects.\r\n\r\n Looks for `Checkpointable` objects which are dependencies of\r\n `root_checkpointable`. Includes slot variables only if the variable they are\r\n slotting for and the optimizer are dependencies of `root_checkpointable`\r\n (i.e. if they would be saved with a checkpoint).\r\n\r\n Args:\r\n root_checkpointable: A `Checkpointable` object whose dependencies should be\r\n flattened.\r\n Returns:\r\n A flat list of objects.\r\n \"\"\"\r\n # TODO(allenl): Extract out gathering logic so the naming logic doesn't have\r\n # to run.\r\n checkpointable_objects, path_to_root = (\r\n _breadth_first_checkpointable_traversal(root_checkpointable))\r\n object_names = _ObjectIdentityDictionary()\r\n for obj, path in path_to_root.items():\r\n object_names[obj] = _object_prefix_from_path(path)\r\n node_ids = _ObjectIdentityDictionary()\r\n for node_id, node in enumerate(checkpointable_objects):\r\n node_ids[node] = node_id\r\n _serialize_slot_variables(\r\n checkpointable_objects=checkpointable_objects,\r\n node_ids=node_ids,\r\n object_names=object_names)\r\n return checkpointable_objects\r\n\r\n\r\ndef gather_initializers(root_checkpointable):\r\n \"\"\"Traverse the object graph and find initialization ops.\r\n\r\n Looks for `Checkpointable` objects which are dependencies of\r\n `root_checkpointable` and which have an `initializer` property. Includes\r\n initializers for slot variables only if the variable they are slotting for and\r\n the optimizer are dependencies of `root_checkpointable` (i.e. if they would be\r\n saved with a checkpoint).\r\n\r\n Args:\r\n root_checkpointable: A `Checkpointable` object to gather initializers for.\r\n Returns:\r\n A list of initialization ops.\r\n \"\"\"\r\n checkpointable_objects = list_objects(root_checkpointable)\r\n return [c.initializer for c in checkpointable_objects\r\n if hasattr(c, \"initializer\") and c.initializer is not None]\r\n\r\n\r\n@tf_contextlib.contextmanager\r\ndef capture_dependencies(template):\r\n \"\"\"Capture variables created within this scope as `Template` dependencies.\r\n\r\n Requires that `template.variable_scope` is active.\r\n\r\n This scope is intended as a compatibility measure, allowing a checkpointable\r\n object to add dependencies on variables created in a block of code which is\r\n not aware of object-based saving (and instead uses variable names\r\n heavily). This is how `Template` objects add dependencies on variables and\r\n sub-`Template`s. Where possible, use `tf.make_template` directly.\r\n\r\n Args:\r\n template: The `Template` object to register dependencies with.\r\n\r\n Yields:\r\n None (when used as a context manager).\r\n \"\"\"\r\n name_prefix = template.variable_scope.name\r\n\r\n def _checkpointable_custom_creator(next_creator, name, initial_value,\r\n checkpointable_parent=None, **kwargs):\r\n \"\"\"A variable creation hook which adds Checkpointable dependencies.\r\n\r\n Set for example during a `Template`'s first wrapped function\r\n execution. Ensures that (a) `template` depends on any checkpointable\r\n objects using their own `capture_dependencies` scope inside this scope which\r\n create variables, and (b) that any variables not in a more deeply nested\r\n scope are added as dependencies directly.\r\n\r\n The `checkpointable_parent` argument is passed between custom creators but\r\n ignored when the variable object itself is created. This argument indicates\r\n (if not `None`) that a more deeply nested scope has already added the\r\n variable as a dependency, and that parent scopes should add a dependency on\r\n that object rather than on the variable directly.\r\n\r\n Args:\r\n next_creator: See `variable_scope.variable_creator_scope`; the next\r\n creator in the chain.\r\n name: The (full, scope-influenced) name of the variable. The `name_prefix`\r\n itself is stripped for the purposes of object-based dependency tracking,\r\n but scopes opened within this scope are respected.\r\n initial_value: See `variable_scope.variable_creator_scope`. Taken\r\n explicitly so the argument can be re-named and used with\r\n `Checkpointable._add_variable_with_custom_getter`.\r\n checkpointable_parent: If not None, a more deeply nested checkpointable\r\n object and its name prefix which were passed to `capture_dependencies`\r\n to add a dependency on (rather than depending on the variable directly).\r\n **kwargs: Passed through to the next creator.\r\n\r\n Returns:\r\n The output of `next_creator`: the fetched/created variable object.\r\n \"\"\"\r\n def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):\r\n inner_kwargs.pop(\"name\") # Ignored; this is the scope-stripped name which\r\n # we don't want to propagate.\r\n return next_creator(\r\n initial_value=initializer,\r\n name=name,\r\n **inner_kwargs)\r\n if name is not None and name.startswith(name_prefix):\r\n scope_stripped_name = name[len(name_prefix) + 1:]\r\n if not checkpointable_parent:\r\n return template._add_variable_with_custom_getter( # pylint: disable=protected-access\r\n initializer=initial_value,\r\n name=scope_stripped_name,\r\n getter=_call_next_creator_renaming_initializer,\r\n # Disable error checking for Checkpointable. Exceptions are instead\r\n # raised if necessary when the object-based saver tries to\r\n # save/restore the object.\r\n overwrite=True,\r\n checkpointable_parent=(template, name_prefix),\r\n **kwargs)\r\n else:\r\n parent_object, parent_name_prefix = checkpointable_parent\r\n template._track_checkpointable( # pylint: disable=protected-access\r\n parent_object,\r\n name=parent_name_prefix[len(name_prefix) + 1:],\r\n overwrite=True)\r\n return next_creator(\r\n name=name, initial_value=initial_value,\r\n checkpointable_parent=(template, name_prefix), **kwargs)\r\n\r\n with variable_scope.variable_creator_scope(_checkpointable_custom_creator):\r\n yield\r\n\r\n\r\nclass _LoadStatus(object):\r\n \"\"\"Abstract base for load status callbacks.\"\"\"\r\n\r\n @abc.abstractmethod\r\n def assert_consumed(self):\r\n \"\"\"Raises an exception unless a non-trivial restoration has completed.\"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def assert_existing_objects_matched(self):\r\n \"\"\"Raises an exception unless existing Python objects have been matched.\"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def run_restore_ops(self, session=None):\r\n \"\"\"Runs restore ops from the checkpoint. Requires a valid checkpoint.\"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def initialize_or_restore(self, session=None):\r\n \"\"\"Runs restore ops from the checkpoint, or initializes variables.\"\"\"\r\n pass\r\n\r\n\r\ndef streaming_restore(status, session=None):\r\n \"\"\"When graph building, runs restore ops as soon as they come in.\r\n\r\n Args:\r\n status: A _LoadStatus objects from an object-based saver's\r\n restore(). Streaming restore from name-based checkpoints is not currently\r\n supported.\r\n session: A session to run new restore ops in.\r\n \"\"\"\r\n if context.executing_eagerly():\r\n # Streaming restore is the default/only behavior when executing eagerly.\r\n return\r\n if session is None:\r\n session = ops.get_default_session()\r\n if isinstance(status, NameBasedSaverStatus):\r\n raise NotImplementedError(\r\n \"Streaming restore not supported from name-based checkpoints. File a \"\r\n \"feature request if this limitation bothers you.\")\r\n status.run_restore_ops(session=session)\r\n # pylint: disable=protected-access\r\n status._checkpoint.new_restore_ops_callback = (\r\n lambda ops: session.run(ops, feed_dict=status._feed_dict))\r\n # pylint: enable=protected-access\r\n\r\n\r\nclass CheckpointLoadStatus(_LoadStatus):\r\n \"\"\"Checks the status of checkpoint loading and manages restore ops.\r\n\r\n Returned from `Saver.restore`. Since `restore` may defer the loading of values\r\n in the checkpoint which don't yet have corresponding Python objects,\r\n `CheckpointLoadStatus` provides a callback to verify that checkpoint loading\r\n is complete (`assert_consumed`).\r\n\r\n When graph building, `restore` does not run restore ops itself since their\r\n creation may be deferred. The `run_restore_ops` method must be called once all\r\n Python objects with values to restore have been created and added to the\r\n dependency graph (this does not necessarily have to be the whole checkpoint;\r\n calling `run_restore_ops` while `assert_consumed` fails is supported and will\r\n partially restore the checkpoint).\r\n\r\n See `Saver.restore` for usage examples.\r\n \"\"\"\r\n\r\n def __init__(self, checkpoint, feed_dict, root_checkpointable):\r\n self._checkpoint = checkpoint\r\n self._feed_dict = feed_dict\r\n self._root_checkpointable = root_checkpointable\r\n\r\n def assert_consumed(self):\r\n \"\"\"Asserts that all objects in the checkpoint have been created/matched.\r\n\r\n Returns:\r\n `self` for chaining.\r\n Raises:\r\n AssertionError: If there are any Python objects in the dependency graph\r\n which have not been restored from this checkpoint or a later `restore`,\r\n or if there are any checkpointed values which have not been matched to\r\n Python objects.\r\n \"\"\"\r\n self.assert_existing_objects_matched()\r\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\r\n checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)\r\n if checkpointable is None:\r\n raise AssertionError(\"Unresolved object in checkpoint: %s\" % (node,))\r\n if self._checkpoint.slot_restorations:\r\n # Sanity check; this collection should be clear if everything has been\r\n # restored.\r\n raise AssertionError(\"Unresolved slot restorations: %s\" % (\r\n self._checkpoint.slot_restorations,))\r\n if self._checkpoint.unused_attributes:\r\n raise AssertionError(\r\n (\"Unused attributes in these objects (the attributes exist in the \"\r\n \"checkpoint but not in the objects): %s\") % (\r\n self._checkpoint.unused_attributes.items(),))\r\n return self\r\n\r\n def assert_existing_objects_matched(self):\r\n \"\"\"Asserts that checkpointable Python objects have been matched.\r\n\r\n Note that this is a weaker assertion than `assert_consumed`. It will only\r\n fail for existing Python objects which are (transitive) dependencies of the\r\n root object and which do not have an entry in the checkpoint.\r\n\r\n It will not fail, for example, if a `tf.keras.Layer` object has not yet been\r\n built and so has not created any `tf.Variable` objects.\r\n\r\n Returns:\r\n `self` for chaining.\r\n\r\n Raises:\r\n AssertionError: If a Python object exists in the transitive dependencies\r\n of the root object but does not have a value in the checkpoint.\r\n \"\"\"\r\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\r\n checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)\r\n if (checkpointable is not None\r\n and checkpointable._update_uid < self._checkpoint.restore_uid): # pylint: disable=protected-access\r\n raise AssertionError(\r\n \"Object not assigned a value from checkpoint: %s\" % (node,))\r\n for checkpointable_object in list_objects(self._root_checkpointable):\r\n self._checkpoint.all_python_objects.add(checkpointable_object)\r\n unused_python_objects = (\r\n _ObjectIdentitySet(self._checkpoint.all_python_objects)\r\n - _ObjectIdentitySet(self._checkpoint.object_by_proto_id.values()))\r\n if unused_python_objects:\r\n raise AssertionError(\r\n (\"Some Python objects were not bound to checkpointed values, likely \"\r\n \"due to changes in the Python program: %s\")\r\n % (list(unused_python_objects),))\r\n return self\r\n\r\n def run_restore_ops(self, session=None):\r\n \"\"\"Run operations to restore objects in the dependency graph.\"\"\"\r\n if context.executing_eagerly():\r\n return # Run eagerly\r\n if session is None:\r\n session = ops.get_default_session()\r\n session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)\r\n\r\n def initialize_or_restore(self, session=None):\r\n \"\"\"Run operations to initialize or restore objects in the dependency graph.\r\n\r\n Any objects in the dependency graph which have initializers but are not in\r\n the checkpoint will have those initializers run, unless those variables are\r\n being restored by a later call to `tf.train.Checkpoint.restore()`.\r\n\r\n This method has a sibling in `InitializationOnlyStatus` which instead\r\n initializes variables. That type is returned if no checkpoint is specified\r\n in `Saver.restore`.\r\n\r\n Args:\r\n session: The session to run init/restore ops in. If `None`, uses the\r\n default session.\r\n \"\"\"\r\n if context.executing_eagerly():\r\n return # Initialization and restoration ops are run eagerly\r\n if session is None:\r\n session = ops.get_default_session()\r\n all_objects = list_objects(self._root_checkpointable)\r\n already_initialized_objects = _ObjectIdentitySet(\r\n self._checkpoint.object_by_proto_id.values())\r\n initializers_for_non_restored_variables = [\r\n c.initializer for c in all_objects\r\n if hasattr(c, \"initializer\")\r\n and c not in already_initialized_objects\r\n and (getattr(c, \"_update_uid\", self._checkpoint.restore_uid - 1)\r\n < self._checkpoint.restore_uid)]\r\n self.run_restore_ops(session=session)\r\n session.run(initializers_for_non_restored_variables)\r\n\r\n\r\nclass InitializationOnlyStatus(_LoadStatus):\r\n \"\"\"Returned from `Saver.restore` when no checkpoint has been specified.\r\n\r\n Objects of this type have the same `assert_consumed` method as\r\n `CheckpointLoadStatus`, but it always fails. However,\r\n `initialize_or_restore` works on objects of both types, and will\r\n initialize variables in `InitializationOnlyStatus` objects or restore them\r\n otherwise.\r\n \"\"\"\r\n\r\n def __init__(self, root_checkpointable, restore_uid):\r\n self._restore_uid = restore_uid\r\n self._root_checkpointable = root_checkpointable\r\n\r\n def assert_consumed(self):\r\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\r\n raise AssertionError(\r\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\r\n\r\n def assert_existing_objects_matched(self):\r\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\r\n raise AssertionError(\r\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\r\n\r\n def run_restore_ops(self, session=None):\r\n \"\"\"For consistency with `CheckpointLoadStatus`.\r\n\r\n Use `initialize_or_restore` for initializing if no checkpoint was passed\r\n to `Saver.restore` and restoring otherwise.\r\n\r\n Args:\r\n session: Not used.\r\n \"\"\"\r\n raise AssertionError(\r\n \"No checkpoint specified, so no restore ops are available \"\r\n \"(save_path=None to Saver.restore).\")\r\n\r\n def initialize_or_restore(self, session=None):\r\n \"\"\"Runs initialization ops for variables.\r\n\r\n Objects which would be saved by `Saver.save` will be initialized, unless\r\n those variables are being restored by a later call to\r\n `tf.train.Checkpoint.restore()`.\r\n\r\n This method does nothing when executing eagerly (initializers get run\r\n eagerly).\r\n\r\n Args:\r\n session: The session to run initialization ops in. If `None`, uses the\r\n default session.\r\n \"\"\"\r\n if context.executing_eagerly():\r\n return # run eagerly\r\n if session is None:\r\n session = ops.get_default_session()\r\n checkpointable_objects = list_objects(self._root_checkpointable)\r\n initializers = [\r\n c.initializer for c in checkpointable_objects\r\n if hasattr(c, \"initializer\") and c.initializer is not None\r\n and (getattr(c, \"_update_uid\", self._restore_uid - 1)\r\n < self._restore_uid)]\r\n session.run(initializers)\r\n\r\n\r\n_DEPRECATED_RESTORE_INSTRUCTIONS = (\r\n \"Restoring a name-based tf.train.Saver checkpoint using the object-based \"\r\n \"restore API. This mode uses global names to match variables, and so is \"\r\n \"somewhat fragile. It also adds new restore ops to the graph each time it \"\r\n \"is called when graph building. Prefer re-encoding training checkpoints in \"\r\n \"the object-based format: run save() on the object-based saver (the same \"\r\n \"one this message is coming from) and use that checkpoint in the future.\")\r\n\r\n\r\nclass NameBasedSaverStatus(_LoadStatus):\r\n \"\"\"Status for loading a name-based training checkpoint.\"\"\"\r\n\r\n # Ideally this deprecation decorator would be on the class, but that\r\n # interferes with isinstance checks.\r\n @deprecation.deprecated(\r\n date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)\r\n def __init__(self, checkpoint, root_checkpointable):\r\n self._checkpoint = checkpoint\r\n self._root_checkpointable = root_checkpointable\r\n\r\n def assert_consumed(self):\r\n \"\"\"Raises an exception if any variables/objects are unmatched.\"\"\"\r\n unused_attributes = dict(self._checkpoint.unused_attributes)\r\n if unused_attributes:\r\n raise AssertionError(\r\n \"Some objects had attributes which were not restored: %s\"\r\n % (unused_attributes,))\r\n for checkpointable in list_objects(self._root_checkpointable):\r\n # pylint: disable=protected-access\r\n checkpointable._maybe_initialize_checkpointable()\r\n if checkpointable._update_uid < self._checkpoint.restore_uid:\r\n raise AssertionError(\"Object not restored: %s\" % (checkpointable,))\r\n # pylint: enable=protected-access\r\n return self\r\n\r\n def assert_existing_objects_matched(self):\r\n \"\"\"Raises an exception if currently created objects are unmatched.\"\"\"\r\n # For name-based checkpoints there's no object information in the\r\n # checkpoint, so there's no distinction between\r\n # assert_existing_objects_matched and assert_consumed (and both are less\r\n # useful since we don't touch Python objects or Python state).\r\n return self.assert_consumed()\r\n\r\n def _gather_saveable_objects(self):\r\n \"\"\"Walk the object graph, using global names for SaveableObjects.\"\"\"\r\n objects = list_objects(self._root_checkpointable)\r\n saveable_objects = []\r\n for checkpointable in objects:\r\n # pylint: disable=protected-access\r\n checkpointable._maybe_initialize_checkpointable()\r\n if checkpointable._update_uid < self._checkpoint.restore_uid:\r\n checkpointable._update_uid = self._checkpoint.restore_uid\r\n else:\r\n continue\r\n # pylint: enable=protected-access\r\n saveable_objects.extend(\r\n self._checkpoint.globally_named_object_attributes(\r\n checkpointable))\r\n return saveable_objects\r\n\r\n def run_restore_ops(self, session=None):\r\n \"\"\"Load the name-based training checkpoint using a new `tf.train.Saver`.\"\"\"\r\n if context.executing_eagerly():\r\n return # Nothing to do, variables are restored on creation.\r\n if session is None:\r\n session = ops.get_default_session()\r\n with ops.device(\"/cpu:0\"):\r\n saveables = self._gather_saveable_objects()\r\n saver_lib.Saver(saveables).restore(\r\n sess=session, save_path=self._checkpoint.save_path)\r\n\r\n def initialize_or_restore(self, session=None):\r\n \"\"\"Alias for `run_restore_ops`.\"\"\"\r\n self.run_restore_ops(session=session)\r\n\r\n\r\nclass _SessionWithFeedDictAdditions(session_lib.SessionInterface):\r\n \"\"\"Pretends to be a session, inserts extra feeds on run().\"\"\"\r\n\r\n def __init__(self, session, feed_additions):\r\n self._wrapped_session = session\r\n self._feed_additions = feed_additions\r\n\r\n def run(self, fetches, feed_dict=None, **kwargs):\r\n if feed_dict is None:\r\n feed_dict = {}\r\n else:\r\n feed_dict = feed_dict.copy()\r\n feed_dict.update(self._feed_additions)\r\n return self._wrapped_session.run(\r\n fetches=fetches, feed_dict=feed_dict, **kwargs)\r\n\r\n\r\ndef _copy_saver_with_new_var_list(old_saver, new_var_list):\r\n \"\"\"Copy a `tf.train.Saver`'s state to a new Saver with different variables.\"\"\"\r\n new_saver = saver_lib.Saver(var_list=new_var_list, max_to_keep=None)\r\n # TODO(allenl): Move to copying functionality to Saver?\r\n # pylint: disable=protected-access\r\n new_saver._last_checkpoints = old_saver._last_checkpoints\r\n new_saver._checkpoints_to_be_deleted = old_saver._checkpoints_to_be_deleted\r\n new_saver._next_checkpoint_time = old_saver._next_checkpoint_time\r\n # pylint: enable=protected-access\r\n return new_saver\r\n\r\n\r\nclass CheckpointableSaver(object):\r\n \"\"\"Saves and restores a `Checkpointable` object and its dependencies.\r\n\r\n See `Checkpointable` for details of dependency management. `Saver` wraps\r\n `tf.train.Saver` for saving, including extra information about the graph of\r\n dependencies between Python objects. When restoring, it uses this information\r\n about the save-time dependency graph to more robustly match objects with their\r\n checkpointed values. When executing eagerly, it supports restoring variables\r\n on object creation (see `Saver.restore`).\r\n\r\n Values in a checkpoint are mapped to `Checkpointable` Python objects\r\n (`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the\r\n checkpoint was written. To avoid breaking existing checkpoints when modifying\r\n a class, dependency names (the names of attributes to which `Checkpointable`\r\n objects are assigned) may not change. These names are local to objects, in\r\n contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and\r\n so allow additional program transformations.\r\n \"\"\"\r\n\r\n def __init__(self, root_checkpointable):\r\n \"\"\"Configure saving.\r\n\r\n Args:\r\n root_checkpointable: The root of the object graph to save/restore. This\r\n object and all of its dependencies are saved in the checkpoint. When\r\n restoring, objects are matched and restored starting from this root.\r\n \"\"\"\r\n # Allow passing in a weak reference to avoid reference cycles when\r\n # `Checkpointable` objects save themselves.\r\n self._root_checkpointable_ref = root_checkpointable\r\n # The file prefix placeholder is created lazily when graph building (and not\r\n # at all when executing eagerly) to avoid creating ops in the constructor\r\n # (when they may never be necessary).\r\n self._file_prefix_placeholder = None\r\n\r\n # Op caching for save\r\n self._object_graph_feed_tensor = None\r\n self._last_save_object_graph = None\r\n self._last_save_saver = None\r\n\r\n # Op caching for restore, shared between _CheckpointRestoreCoordinators\r\n self._restore_op_cache = {}\r\n\r\n if context.executing_eagerly():\r\n # SaveableObjects are always recreated when executing eagerly.\r\n self._saveable_object_cache = None\r\n else:\r\n # Maps Checkpointable objects -> attribute names -> list(SaveableObjects),\r\n # to avoid re-creating SaveableObjects when graph building.\r\n self._saveable_object_cache = _ObjectIdentityWeakKeyDictionary()\r\n\r\n @property\r\n def _root_checkpointable(self):\r\n if isinstance(self._root_checkpointable_ref, weakref.ref):\r\n derefed = self._root_checkpointable_ref()\r\n assert derefed is not None\r\n return derefed\r\n else:\r\n return self._root_checkpointable_ref\r\n\r\n def _gather_saveables(\r\n self, object_graph_tensor=None, saveable_object_cache=None):\r\n \"\"\"Wraps _serialize_object_graph to include the object graph proto.\"\"\"\r\n assert ((object_graph_tensor is None and saveable_object_cache is None)\r\n or (object_graph_tensor is not None\r\n and saveable_object_cache is not None))\r\n (named_saveable_objects, graph_proto,\r\n feed_additions) = _serialize_object_graph(\r\n self._root_checkpointable,\r\n saveables_cache=saveable_object_cache)\r\n if object_graph_tensor is None:\r\n with ops.device(\"/cpu:0\"):\r\n object_graph_tensor = constant_op.constant(\r\n graph_proto.SerializeToString(), dtype=dtypes.string)\r\n else:\r\n feed_additions.update(\r\n {object_graph_tensor: graph_proto.SerializeToString()})\r\n assert base.OBJECT_GRAPH_PROTO_KEY not in named_saveable_objects\r\n named_saveable_objects.append(\r\n base.NoRestoreSaveable(\r\n tensor=object_graph_tensor,\r\n name=base.OBJECT_GRAPH_PROTO_KEY))\r\n return named_saveable_objects, graph_proto, feed_additions\r\n\r\n def freeze(self):\r\n \"\"\"Creates a `tf.train.Saver` with the current object graph frozen.\"\"\"\r\n named_saveable_objects, _, _ = self._gather_saveables(\r\n object_graph_tensor=None, saveable_object_cache=None)\r\n return saver_lib.Saver(\r\n var_list=named_saveable_objects, max_to_keep=None)\r\n\r\n def _prepare_save(self,\r\n object_graph_tensor=None,\r\n saveable_object_cache=None):\r\n \"\"\"Create or retrieve save ops.\r\n\r\n When graph building, `saveable_object_cache` will typically be non-`None`,\r\n meaning that existing `SaveableObject`s are re-used across calls to\r\n `_prepare_save` even if the object graph has grown. This avoids\r\n unnecessarily re-creating save ops.\r\n\r\n Args:\r\n object_graph_tensor: A `Tensor` to which the current object graph will be\r\n fed.\r\n saveable_object_cache: A dictionary; if specified, used to cache\r\n `SaveableObject`s.\r\n\r\n Returns:\r\n A two-element tuple with a `tf.train.Saver` and a feed_dict of `Tensor`s\r\n to feed when running save ops. The feed dict contains the current object\r\n graph and any Python state to be saved in the checkpoint.\r\n \"\"\"\r\n (named_saveable_objects, graph_proto,\r\n feed_additions) = self._gather_saveables(\r\n object_graph_tensor=object_graph_tensor,\r\n saveable_object_cache=saveable_object_cache)\r\n if (self._last_save_object_graph != graph_proto\r\n # When executing eagerly, we need to re-create SaveableObjects each time\r\n # save() is called so they pick up new Tensors passed to their\r\n # constructors. That means the Saver needs to be copied with a new\r\n # var_list.\r\n or context.executing_eagerly()):\r\n if self._last_save_object_graph is not None:\r\n self._last_save_saver = _copy_saver_with_new_var_list(\r\n old_saver=self._last_save_saver,\r\n new_var_list=named_saveable_objects)\r\n else:\r\n self._last_save_saver = saver_lib.Saver(\r\n var_list=named_saveable_objects, max_to_keep=None)\r\n self._last_save_object_graph = graph_proto\r\n return self._last_save_saver, feed_additions\r\n\r\n def save(self, file_prefix, checkpoint_number=None, session=None):\r\n \"\"\"Save a training checkpoint.\r\n\r\n The saved checkpoint includes variables created by this object and any\r\n Checkpointable objects it depends on at the time `Saver.save()` is called.\r\n\r\n Args:\r\n file_prefix: A prefix to use for the checkpoint filenames\r\n (/path/to/directory/and_a_prefix). Names are generated based on this\r\n prefix and `checkpoint_number`, if provided.\r\n checkpoint_number: An integer variable or Tensor, used to number\r\n checkpoints. Typically this value is saved along with other variables in\r\n training checkpoints, which will happen automatically if it was created\r\n by `root_checkpointable` or one of its dependencies (via\r\n `Checkpointable._add_variable`).\r\n session: The session to evaluate variables in. Ignored when executing\r\n eagerly. If not provided when graph building, the default session is\r\n used.\r\n\r\n Returns:\r\n The full path to the checkpoint.\r\n \"\"\"\r\n feed_additions = {}\r\n graph_building = not context.executing_eagerly()\r\n if graph_building:\r\n if self._object_graph_feed_tensor is None:\r\n with ops.device(\"/cpu:0\"):\r\n self._object_graph_feed_tensor = constant_op.constant(\r\n \"\", dtype=dtypes.string)\r\n object_graph_tensor = self._object_graph_feed_tensor\r\n else:\r\n object_graph_tensor = None\r\n\r\n saver, new_feed_additions = self._prepare_save(\r\n object_graph_tensor=object_graph_tensor,\r\n saveable_object_cache=self._saveable_object_cache)\r\n if new_feed_additions:\r\n feed_additions.update(new_feed_additions)\r\n if not graph_building:\r\n session = None\r\n elif session is None:\r\n session = ops.get_default_session()\r\n\r\n with ops.device(\"/cpu:0\"):\r\n save_path = saver.save(\r\n sess=_SessionWithFeedDictAdditions(\r\n session=session, feed_additions=feed_additions),\r\n save_path=file_prefix,\r\n write_meta_graph=False,\r\n write_state=False,\r\n global_step=checkpoint_number)\r\n return save_path\r\n\r\n def restore(self, save_path):\r\n \"\"\"Restore a training checkpoint.\r\n\r\n Restores `root_checkpointable` and any objects that it tracks\r\n (transitive). Either assigns values immediately if variables to restore have\r\n been created already, or defers restoration until the variables are\r\n created. Dependencies added to the `root_checkpointable` passed to the\r\n constructor after this call will be matched if they have a corresponding\r\n object in the checkpoint.\r\n\r\n When building a graph, restorations are added to the graph but not run.\r\n\r\n To disallow deferred loading, assert immediately that all checkpointed\r\n variables have been matched to variable objects:\r\n\r\n ```python\r\n saver = Saver(root)\r\n saver.restore(path).assert_consumed()\r\n ```\r\n\r\n An exception will be raised unless every object was matched and its\r\n variables already exist.\r\n\r\n When graph building, `assert_consumed()` indicates that all of the restore\r\n ops which will be created for this checkpoint have been created. They can be\r\n run via the `run_restore_ops()` function of the status object:\r\n\r\n ```python\r\n saver.restore(path).assert_consumed().run_restore_ops()\r\n ```\r\n\r\n If the checkpoint has not been consumed completely, then the list of restore\r\n ops will grow as more objects are added to the dependency graph.\r\n\r\n Name-based `tf.train.Saver` checkpoints can be loaded using this\r\n method. There is no deferred loading, and names are used to match\r\n variables. No restore ops are created/run until `run_restore_ops()` or\r\n `initialize_or_restore()` are called on the returned status object, even\r\n when executing eagerly. Re-encode name-based checkpoints using this\r\n object-based `Saver.save` as soon as possible.\r\n\r\n Args:\r\n save_path: The path to the checkpoint, as returned by `save` or\r\n `tf.train.latest_checkpoint`. If None (as when there is no latest\r\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\r\n object which may run initializers for objects in the dependency\r\n graph. If the checkpoint was written by the name-based `tf.train.Saver`,\r\n names are used to match variables.\r\n\r\n Returns:\r\n A load status object, which can be used to make assertions about the\r\n status of checkpoint restoration and run initialization/restore ops\r\n (of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if\r\n `save_path` is `None`).\r\n\r\n If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`\r\n object is returned which runs restore ops from a name-based saver.\r\n \"\"\"\r\n if save_path is None:\r\n return InitializationOnlyStatus(self._root_checkpointable, ops.uid())\r\n reader = pywrap_tensorflow.NewCheckpointReader(save_path)\r\n graph_building = not context.executing_eagerly()\r\n if graph_building:\r\n dtype_map = None\r\n else:\r\n dtype_map = reader.get_variable_to_dtype_map()\r\n try:\r\n object_graph_string = reader.get_tensor(\r\n base.OBJECT_GRAPH_PROTO_KEY)\r\n except errors_impl.NotFoundError:\r\n # The object graph proto does not exist in this checkpoint. Try the\r\n # name-based compatibility mode.\r\n restore_coordinator = _NameBasedRestoreCoordinator(\r\n save_path=save_path, dtype_map=dtype_map)\r\n if not graph_building:\r\n for existing_checkpointable in list_objects(self._root_checkpointable):\r\n # pylint: disable=protected-access\r\n existing_checkpointable._maybe_initialize_checkpointable()\r\n existing_checkpointable._name_based_restores.add(restore_coordinator)\r\n existing_checkpointable._name_based_attribute_restore(\r\n restore_coordinator)\r\n # pylint: enable=protected-access\r\n return NameBasedSaverStatus(\r\n restore_coordinator, root_checkpointable=self._root_checkpointable)\r\n\r\n if graph_building:\r\n if self._file_prefix_placeholder is None:\r\n with ops.device(\"/cpu:0\"):\r\n self._file_prefix_placeholder = constant_op.constant(\"model\")\r\n file_prefix_tensor = self._file_prefix_placeholder\r\n file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}\r\n else:\r\n with ops.device(\"/cpu:0\"):\r\n file_prefix_tensor = constant_op.constant(save_path)\r\n file_prefix_feed_dict = None\r\n object_graph_proto = (\r\n checkpointable_object_graph_pb2.CheckpointableObjectGraph())\r\n object_graph_proto.ParseFromString(object_graph_string)\r\n checkpoint = _CheckpointRestoreCoordinator(\r\n object_graph_proto=object_graph_proto,\r\n save_path=save_path,\r\n save_path_tensor=file_prefix_tensor,\r\n restore_op_cache=self._restore_op_cache,\r\n saveable_object_cache=self._saveable_object_cache)\r\n base._CheckpointPosition( # pylint: disable=protected-access\r\n checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)\r\n load_status = CheckpointLoadStatus(\r\n checkpoint,\r\n root_checkpointable=self._root_checkpointable,\r\n feed_dict=file_prefix_feed_dict)\r\n return load_status\r\n\r\n\r\ndef frozen_saver(root_checkpointable):\r\n \"\"\"Creates a static `tf.train.Saver` from a checkpointable object.\r\n\r\n The returned `Saver` saves object-based checkpoints, but these checkpoints\r\n will no longer reflect structural changes to the object graph, only changes to\r\n the values of `Variable`s added as dependencies of the root object before\r\n `freeze` was called.\r\n\r\n `restore` works on the returned `Saver`, but requires that the object graph of\r\n the checkpoint being loaded exactly matches the object graph when `freeze` was\r\n called. This is in contrast the object-based restore performed by\r\n `tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's\r\n object graph and the current Python object graph.\r\n\r\n Args:\r\n root_checkpointable: A checkpointable object to save.\r\n\r\n Returns:\r\n A `tf.train.Saver` which saves object-based checkpoints for the object graph\r\n frozen at the time `frozen_saver` was called.\r\n \"\"\"\r\n return CheckpointableSaver(root_checkpointable).freeze()\r\n\r\n\r\n@tf_export(\"train.Checkpoint\")\r\nclass Checkpoint(tracking.Checkpointable):\r\n \"\"\"Groups checkpointable objects, saving and restoring them.\r\n\r\n `Checkpoint`'s constructor accepts keyword arguments whose values are types\r\n that contain checkpointable state, such as `tf.train.Optimizer`\r\n implementations, `tf.Variable`, `tf.keras.Layer` implementations, or\r\n `tf.keras.Model` implementations. It saves these values with a checkpoint, and\r\n maintains a `save_counter` for numbering checkpoints.\r\n\r\n Example usage when graph building:\r\n\r\n ```python\r\n import tensorflow as tf\r\n import os\r\n\r\n checkpoint_directory = \"/tmp/training_checkpoints\"\r\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\r\n\r\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\r\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\r\n train_op = optimizer.minimize( ... )\r\n status.assert_consumed() # Optional sanity checks.\r\n with tf.Session() as session:\r\n # Use the Session to restore variables, or initialize them if\r\n # tf.train.latest_checkpoint returned None.\r\n status.initialize_or_restore(session)\r\n for _ in range(num_training_steps):\r\n session.run(train_op)\r\n checkpoint.save(file_prefix=checkpoint_prefix)\r\n ```\r\n\r\n Example usage with eager execution enabled:\r\n\r\n ```python\r\n import tensorflow as tf\r\n import os\r\n\r\n tf.enable_eager_execution()\r\n\r\n checkpoint_directory = \"/tmp/training_checkpoints\"\r\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\r\n\r\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\r\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\r\n for _ in range(num_training_steps):\r\n optimizer.minimize( ... ) # Variables will be restored on creation.\r\n status.assert_consumed() # Optional sanity checks.\r\n checkpoint.save(file_prefix=checkpoint_prefix)\r\n ```\r\n\r\n `Checkpoint.save` and `Checkpoint.restore` write and read object-based\r\n checkpoints, in contrast to `tf.train.Saver` which writes and reads\r\n `variable.name` based checkpoints. Object-based checkpointing saves a graph of\r\n dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,\r\n etc.) with named edges, and this graph is used to match variables when\r\n restoring a checkpoint. It can be more robust to changes in the Python\r\n program, and helps to support restore-on-create for variables when executing\r\n eagerly. Prefer `tf.train.Checkpoint` over `tf.train.Saver` for new code.\r\n\r\n `Checkpoint` objects have dependencies on the objects passed as keyword\r\n arguments to their constructors, and each dependency is given a name that is\r\n identical to the name of the keyword argument for which it was created.\r\n TensorFlow classes like `Layer`s and `Optimizer`s will automatically add\r\n dependencies on their variables (e.g. \"kernel\" and \"bias\" for\r\n `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing\r\n dependencies easy in user-defined classes, since `Model` hooks into attribute\r\n assignment. For example:\r\n\r\n ```python\r\n class Regress(tf.keras.Model):\r\n\r\n def __init__(self):\r\n super(Regress, self).__init__()\r\n self.input_transform = tf.keras.layers.Dense(10)\r\n # ...\r\n\r\n def call(self, inputs):\r\n x = self.input_transform(inputs)\r\n # ...\r\n ```\r\n\r\n This `Model` has a dependency named \"input_transform\" on its `Dense` layer,\r\n which in turn depends on its variables. As a result, saving an instance of\r\n `Regress` using `tf.train.Checkpoint` will also save all the variables created\r\n by the `Dense` layer.\r\n\r\n Attributes:\r\n save_counter: Incremented when `save()` is called. Used to number\r\n checkpoints.\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"Group objects into a training checkpoint.\r\n\r\n Args:\r\n **kwargs: Keyword arguments are set as attributes of this object, and are\r\n saved with the checkpoint. Values must be checkpointable objects.\r\n Raises:\r\n ValueError: If objects in `kwargs` are not checkpointable.\r\n \"\"\"\r\n super(Checkpoint, self).__init__()\r\n for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\r\n if not isinstance(v, base.CheckpointableBase):\r\n raise ValueError(\r\n (\"`Checkpoint` was expecting a checkpointable object (an object \"\r\n \"derived from `CheckpointableBase`), got %s. If you believe this \"\r\n \"object should be checkpointable (i.e. it is part of the \"\r\n \"TensorFlow Python API and manages state), please open an issue.\")\r\n % (v,))\r\n setattr(self, k, v)\r\n self._save_counter = None # Created lazily for restore-on-create.\r\n self._save_assign_op = None\r\n self._saver = CheckpointableSaver(weakref.ref(self))\r\n\r\n def _maybe_create_save_counter(self):\r\n \"\"\"Create a save counter if it does not yet exist.\"\"\"\r\n if self._save_counter is None:\r\n # Initialized to 0 and incremented before saving.\r\n with ops.device(\"/cpu:0\"):\r\n # add_variable creates a dependency named \"save_counter\"; NoDependency\r\n # prevents creating a second dependency named \"_save_counter\".\r\n self._save_counter = data_structures.NoDependency(\r\n add_variable(self, name=\"save_counter\", initializer=0,\r\n dtype=dtypes.int64))\r\n\r\n def write(self, file_prefix, session=None):\r\n \"\"\"Writes a training checkpoint.\r\n\r\n The checkpoint includes variables created by this object and any\r\n checkpointable objects it depends on at the time `Checkpoint.write()` is\r\n called.\r\n\r\n `write` does not number checkpoints, increment `save_counter`, or update the\r\n metadata used by `tf.train.latest_checkpoint`. It is primarily intended for\r\n use by higher level checkpoint management utilities. `save` provides a very\r\n basic implementation of these features.\r\n\r\n Args:\r\n file_prefix: A prefix to use for the checkpoint filenames\r\n (/path/to/directory/and_a_prefix).\r\n session: The session to evaluate variables in. Ignored when executing\r\n eagerly. If not provided when graph building, the default session is\r\n used.\r\n\r\n Returns:\r\n The full path to the checkpoint (i.e. `file_prefix`).\r\n \"\"\"\r\n return self._saver.save(\r\n file_prefix=file_prefix,\r\n session=session)\r\n\r\n @property\r\n def save_counter(self):\r\n \"\"\"An integer variable which starts at zero and is incremented on save.\r\n\r\n Used to number checkpoints.\r\n\r\n Returns:\r\n The save counter variable.\r\n \"\"\"\r\n self._maybe_create_save_counter()\r\n return self._save_counter\r\n\r\n def save(self, file_prefix, session=None):\r\n \"\"\"Saves a training checkpoint and provides basic checkpoint management.\r\n\r\n The saved checkpoint includes variables created by this object and any\r\n checkpointable objects it depends on at the time `Checkpoint.save()` is\r\n called.\r\n\r\n `save` is a basic convenience wrapper around the `write` method,\r\n sequentially numbering checkpoints using `save_counter` and updating the\r\n metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint\r\n management, for example garbage collection and custom numbering, may be\r\n provided by other utilities which also wrap `write`\r\n (`tf.contrib.checkpoint.CheckpointManager` for example).\r\n\r\n Args:\r\n file_prefix: A prefix to use for the checkpoint filenames\r\n (/path/to/directory/and_a_prefix). Names are generated based on this\r\n prefix and `Checkpoint.save_counter`.\r\n session: The session to evaluate variables in. Ignored when executing\r\n eagerly. If not provided when graph building, the default session is\r\n used.\r\n\r\n Returns:\r\n The full path to the checkpoint.\r\n \"\"\"\r\n graph_building = not context.executing_eagerly()\r\n if graph_building:\r\n if session is None:\r\n session = ops.get_default_session()\r\n if self._save_counter is None:\r\n # When graph building, if this is a new save counter variable then it\r\n # needs to be initialized before assign_add. This is only an issue if\r\n # restore() has not been called first.\r\n session.run(self.save_counter.initializer)\r\n if not graph_building or self._save_assign_op is None:\r\n with ops.colocate_with(self.save_counter):\r\n assign_op = self.save_counter.assign_add(1, read_value=True)\r\n if graph_building:\r\n self._save_assign_op = data_structures.NoDependency(assign_op)\r\n if graph_building:\r\n checkpoint_number = session.run(self._save_assign_op)\r\n else:\r\n checkpoint_number = assign_op.numpy()\r\n file_path = self.write(\"%s-%d\" % (file_prefix, checkpoint_number),\r\n session=session)\r\n checkpoint_management.update_checkpoint_state(\r\n save_dir=os.path.dirname(file_prefix),\r\n model_checkpoint_path=file_path,\r\n all_model_checkpoint_paths=[file_path])\r\n return file_path\r\n\r\n def restore(self, save_path):\r\n \"\"\"Restore a training checkpoint.\r\n\r\n Restores this `Checkpoint` and any objects it depends on.\r\n\r\n When executing eagerly, either assigns values immediately if variables to\r\n restore have been created already, or defers restoration until the variables\r\n are created. Dependencies added after this call will be matched if they have\r\n a corresponding object in the checkpoint (the restore request will queue in\r\n any checkpointable object waiting for the expected dependency to be added).\r\n\r\n When graph building, restoration ops are added to the graph but not run\r\n immediately.\r\n\r\n To ensure that loading is complete and no more assignments will take place,\r\n use the `assert_consumed()` method of the status object returned by\r\n `restore`:\r\n\r\n ```python\r\n checkpoint = tf.train.Checkpoint( ... )\r\n checkpoint.restore(path).assert_consumed()\r\n ```\r\n\r\n An exception will be raised if any Python objects in the dependency graph\r\n were not found in the checkpoint, or if any checkpointed values do not have\r\n a matching Python object.\r\n\r\n When graph building, `assert_consumed()` indicates that all of the restore\r\n ops that will be created for this checkpoint have been created. They can be\r\n run via the `run_restore_ops()` method of the status object:\r\n\r\n ```python\r\n checkpoint.restore(path).assert_consumed().run_restore_ops()\r\n ```\r\n\r\n If the checkpoint has not been consumed completely, then the list of restore\r\n ops will grow as more objects are added to the dependency graph.\r\n\r\n Name-based `tf.train.Saver` checkpoints can be loaded using this\r\n method. Names are used to match variables. No restore ops are created/run\r\n until `run_restore_ops()` or `initialize_or_restore()` are called on the\r\n returned status object when graph building, but there is restore-on-creation\r\n when executing eagerly. Re-encode name-based checkpoints using\r\n `tf.train.Checkpoint.save` as soon as possible.\r\n\r\n Args:\r\n save_path: The path to the checkpoint, as returned by `save` or\r\n `tf.train.latest_checkpoint`. If None (as when there is no latest\r\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\r\n object which may run initializers for objects in the dependency\r\n graph. If the checkpoint was written by the name-based `tf.train.Saver`,\r\n names are used to match variables.\r\n\r\n Returns:\r\n A load status object, which can be used to make assertions about the\r\n status of a checkpoint restoration and run initialization/restore ops.\r\n\r\n The returned status object has the following methods:\r\n - `assert_consumed()`:\r\n Raises an exception if any variables/objects are unmatched: either\r\n checkpointed values which don't have a matching Python object or\r\n Python objects in the dependency graph with no values in the\r\n checkpoint. This method returns the status object, and so may be\r\n chained with `initialize_or_restore` or `run_restore_ops`.\r\n - `assert_existing_objects_matched()`:\r\n Raises an exception if any existing Python objects in the dependency\r\n graph are unmatched. Unlike `assert_consumed`, this assertion will\r\n pass if values in the checkpoint have no corresponding Python\r\n objects. For example a `tf.keras.Layer` object which has not yet been\r\n built, and so has not created any variables, will pass this assertion\r\n but fail `assert_consumed`. Useful when loading part of a larger\r\n checkpoint into a new Python program, e.g. a training checkpoint with\r\n a `tf.train.Optimizer` was saved but only the state required for\r\n inference is being loaded. This method returns the status object, and\r\n so may be chained with `initialize_or_restore` or `run_restore_ops`.\r\n - `initialize_or_restore(session=None)`:\r\n When graph building, runs variable initializers if `save_path` is\r\n `None`, but otherwise runs restore operations. If no `session` is\r\n explicitly specified, the default session is used. No effect when\r\n executing eagerly (variables are initialized or restored eagerly).\r\n - `run_restore_ops(session=None)`:\r\n When graph building, runs restore operations. If no `session` is\r\n explicitly specified, the default session is used. No effect when\r\n executing eagerly (restore operations are run eagerly). May only be\r\n called when `save_path` is not `None`.\r\n \"\"\"\r\n status = self._saver.restore(save_path=save_path)\r\n # Create the save counter now so it gets initialized with other variables\r\n # when graph building. Creating it earlier would lead to double\r\n # initialization when executing eagerly.\r\n self._maybe_create_save_counter()\r\n return status\r\n",
"# This file is part of h5py, a Python interface to the HDF5 library.\n#\n# http://www.h5py.org\n#\n# Copyright 2008-2013 Andrew Collette and contributors\n#\n# License: Standard 3-clause BSD; see \"license.txt\" for full license terms\n# and contributor agreement.\n\nfrom __future__ import absolute_import\n\nimport sys\n\nimport numpy as np\nfrom six import PY2, text_type\n\nimport h5py\nfrom h5py import h5t\n\nfrom ..common import TestCase, ut\n\n\nclass TestCompound(ut.TestCase):\n\n \"\"\"\n Feature: Compound types can be created from Python dtypes\n \"\"\"\n\n def test_ref(self):\n \"\"\" Reference types are correctly stored in compound types (issue 144)\n \"\"\"\n ref = h5py.special_dtype(ref=h5py.Reference)\n dt = np.dtype([('a', ref), ('b', '<f4')])\n tid = h5t.py_create(dt, logical=True)\n t1, t2 = tid.get_member_type(0), tid.get_member_type(1)\n self.assertEqual(t1, h5t.STD_REF_OBJ)\n self.assertEqual(t2, h5t.IEEE_F32LE)\n self.assertEqual(tid.get_member_offset(0), 0)\n self.assertEqual(tid.get_member_offset(1), h5t.STD_REF_OBJ.get_size())\n\n def test_out_of_order_offsets(self):\n size = 20\n type_dict = {\n 'names': ['f1', 'f2', 'f3'],\n 'formats': ['<f4', '<i4', '<f8'],\n 'offsets': [0, 16, 8]\n }\n\n expected_dtype = np.dtype(type_dict)\n\n tid = h5t.create(h5t.COMPOUND, size)\n for name, offset, dt in zip(\n type_dict[\"names\"], type_dict[\"offsets\"], type_dict[\"formats\"]\n ):\n tid.insert(\n name.encode(\"utf8\") if isinstance(name, text_type) else name,\n offset,\n h5t.py_create(dt)\n )\n\n self.assertEqual(tid.dtype, expected_dtype)\n self.assertEqual(tid.dtype.itemsize, size)\n\n\nclass TestTypeFloatID(TestCase):\n \"\"\"Test TypeFloatID.\"\"\"\n\n def test_custom_float_promotion(self):\n \"\"\"Custom floats are correctly promoted to standard floats on read.\"\"\"\n if h5t.MACHINE == 'ppc64le':\n return\n\n test_filename = self.mktemp()\n dataset = 'DS1'\n dataset2 = 'DS2'\n dataset3 = 'DS3'\n dataset4 = 'DS4'\n dataset5 = 'DS5'\n\n # Strings are handled very differently between python2 and python3.\n if not PY2:\n test_filename = test_filename.encode()\n dataset = dataset.encode()\n dataset2 = dataset2.encode()\n dataset3 = dataset3.encode()\n dataset4 = dataset4.encode()\n dataset5 = dataset5.encode()\n\n DIM0 = 4\n DIM1 = 7\n\n wdata = np.array([[-1.50066626e-09, 1.40062184e-09, 1.81216819e-10,\n 4.01087163e-10, 4.27917257e-10, -7.04858394e-11,\n 5.74800652e-10],\n [-1.50066626e-09, 4.86579665e-10, 3.42879503e-10,\n 5.12045517e-10, 5.10226528e-10, 2.24190444e-10,\n 3.93356459e-10],\n [-1.50066626e-09, 5.24778443e-10, 8.19454726e-10,\n 1.28966349e-09, 1.68483894e-10, 5.71276360e-11,\n -1.08684617e-10],\n [-1.50066626e-09, -1.08343556e-10, -1.58934199e-10,\n 8.52196536e-10, 6.18456397e-10, 6.16637408e-10,\n 1.31694833e-09]], dtype=np.float32)\n\n wdata2 = np.array([[-1.50066626e-09, 5.63886715e-10, -8.74251782e-11,\n 1.32558853e-10, 1.59161573e-10, 2.29420039e-10,\n -7.24185156e-11],\n [-1.50066626e-09, 1.87810656e-10, 7.74889486e-10,\n 3.95630195e-10, 9.42236511e-10, 8.38554115e-10,\n -8.71978045e-11],\n [-1.50066626e-09, 6.20275387e-10, 7.34871719e-10,\n 6.64840627e-10, 2.64662958e-10, 1.05319486e-09,\n 1.68256520e-10],\n [-1.50066626e-09, 1.67347025e-10, 5.12045517e-10,\n 3.36513040e-10, 1.02545528e-10, 1.28784450e-09,\n 4.06089384e-10]], dtype=np.float32)\n\n # Create a new file using the default properties.\n fid = h5py.h5f.create(test_filename)\n # Create the dataspace. No maximum size parameter needed.\n dims = (DIM0, DIM1)\n space = h5py.h5s.create_simple(dims)\n\n # create a custom type with larger bias\n mytype = h5t.IEEE_F16LE\n mytype = h5t.IEEE_F16LE.copy()\n mytype.set_fields(14, 9, 5, 0, 9)\n mytype.set_size(2)\n mytype.set_ebias(53)\n mytype.lock()\n\n dset = h5py.h5d.create(fid, dataset, mytype, space)\n dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata)\n\n del dset\n\n # create a custom type with larger exponent\n mytype2 = h5t.IEEE_F16LE\n mytype2 = h5t.IEEE_F16LE.copy()\n mytype2.set_fields(15, 9, 6, 0, 9)\n mytype2.set_size(2)\n mytype2.set_ebias(53)\n mytype2.lock()\n\n dset = h5py.h5d.create(fid, dataset2, mytype2, space)\n dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)\n\n del dset\n\n # create a custom type which reimplements 16-bit floats\n mytype3 = h5t.IEEE_F16LE\n mytype3 = h5t.IEEE_F16LE.copy()\n mytype3.set_fields(15, 10, 5, 0, 10)\n mytype3.set_size(2)\n mytype3.set_ebias(15)\n mytype3.lock()\n\n dset = h5py.h5d.create(fid, dataset3, mytype3, space)\n dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)\n\n del dset\n\n # create a custom type with larger bias\n mytype4 = h5t.IEEE_F16LE\n mytype4 = h5t.IEEE_F16LE.copy()\n mytype4.set_fields(15, 10, 5, 0, 10)\n mytype4.set_size(2)\n mytype4.set_ebias(258)\n mytype4.lock()\n\n dset = h5py.h5d.create(fid, dataset4, mytype4, space)\n dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)\n\n del dset\n\n # create a dataset with long doubles\n dset = h5py.h5d.create(fid, dataset5, h5t.NATIVE_LDOUBLE, space)\n dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)\n\n # Explicitly close and release resources.\n del space\n del dset\n del fid\n\n f = h5py.File(test_filename, 'r')\n\n # ebias promotion to float32\n values = f[dataset][:]\n self.assertTrue(np.all(values == wdata))\n self.assertEqual(values.dtype, np.dtype('<f4'))\n\n # esize promotion to float32\n values = f[dataset2][:]\n self.assertTrue(np.all(values == wdata2))\n self.assertEqual(values.dtype, np.dtype('<f4'))\n\n # regular half floats\n dset = f[dataset3]\n try:\n self.assertEqual(dset.dtype, np.dtype('<f2'))\n except AttributeError:\n self.assertEqual(dset.dtype, np.dtype('<f4'))\n\n # ebias promotion to float64\n dset = f[dataset4]\n self.assertEqual(dset.dtype, np.dtype('<f8'))\n\n # long double floats\n\n dset = f[dataset5]\n self.assertEqual(dset.dtype, np.longdouble)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Gradients for operators defined in sparse_ops.py.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import sparse_tensor\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import gen_sparse_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import sparse_ops\r\n\r\n\r\n# TODO(b/31222613): This op may be differentiable, and there may be\r\n# latent bugs here.\r\nops.NotDifferentiable(\"SparseAddGrad\")\r\nops.NotDifferentiable(\"SparseConcat\")\r\nops.NotDifferentiable(\"SparseToDense\")\r\n\r\n\r\[email protected](\"SparseReorder\")\r\ndef _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):\r\n \"\"\"Gradients for the SparseReorder op.\r\n\r\n Args:\r\n op: the SparseReorder op\r\n unused_output_indices_grad: the incoming gradients of the output indices\r\n output_values_grad: the incoming gradients of the output values\r\n\r\n Returns:\r\n Gradient for each of the 3 input tensors:\r\n (input_indices, input_values, input_shape)\r\n The gradients for input_indices and input_shape is None.\r\n \"\"\"\r\n input_indices = op.inputs[0]\r\n input_shape = op.inputs[2]\r\n\r\n num_entries = array_ops.shape(input_indices)[0]\r\n entry_indices = math_ops.range(num_entries)\r\n sp_unordered = sparse_tensor.SparseTensor(\r\n input_indices, entry_indices, input_shape)\r\n sp_ordered = sparse_ops.sparse_reorder(sp_unordered)\r\n inverted_permutation = array_ops.invert_permutation(sp_ordered.values)\r\n\r\n return (None,\r\n array_ops.gather(output_values_grad, inverted_permutation),\r\n None)\r\n\r\n\r\[email protected](\"SparseAdd\")\r\ndef _SparseAddGrad(op, *grads):\r\n \"\"\"The backward operator for the SparseAdd op.\r\n\r\n The SparseAdd op calculates A + B, where A, B, and the sum are all represented\r\n as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.\r\n non-empty values of the sum, and outputs the gradients w.r.t. the non-empty\r\n values of A and B.\r\n\r\n Args:\r\n op: the SparseAdd op\r\n *grads: the incoming gradients, one element per output of `op`\r\n\r\n Returns:\r\n Gradient for each of the 6 input tensors of SparseAdd:\r\n (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)\r\n The gradients for the indices, shapes, and the threshold are None.\r\n \"\"\"\r\n val_grad = grads[1]\r\n a_indices = op.inputs[0]\r\n b_indices = op.inputs[3]\r\n sum_indices = op.outputs[0]\r\n # NOTE: we do not need to take `thresh` into account, since it simply affects\r\n # the non-zero elements of the sum, and we will peek into `sum_indices` in the\r\n # gradient op.\r\n\r\n a_val_grad, b_val_grad = gen_sparse_ops.sparse_add_grad(\r\n val_grad, a_indices, b_indices, sum_indices)\r\n a_val_grad.set_shape(op.inputs[1].get_shape())\r\n b_val_grad.set_shape(op.inputs[4].get_shape())\r\n # (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)\r\n return (None, a_val_grad, None, None, b_val_grad, None, None)\r\n\r\n\r\[email protected](\"SparseTensorDenseAdd\")\r\ndef _SparseTensorDenseAddGrad(op, out_grad):\r\n sp_indices = op.inputs[0]\r\n # (sparse_indices, sparse_values, sparse_shape, dense)\r\n return (None, array_ops.gather_nd(out_grad, sp_indices), None, out_grad)\r\n\r\n\r\[email protected](\"SparseReduceSum\")\r\ndef _SparseReduceSumGrad(op, out_grad):\r\n \"\"\"Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).\"\"\"\r\n sp_indices = op.inputs[0]\r\n sp_shape = op.inputs[2]\r\n output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])\r\n out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)\r\n scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)\r\n # (sparse_indices, sparse_values, sparse_shape, reduction_axes)\r\n return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),\r\n None, None)\r\n\r\n\r\[email protected](\"SparseSlice\")\r\ndef _SparseSliceGrad(op, *grads):\r\n \"\"\"The backward operator for the SparseSlice op.\r\n\r\n This op takes in the upstream gradient w.r.t. non-empty values of\r\n the sliced `SparseTensor`, and outputs the gradients w.r.t.\r\n the non-empty values of input `SparseTensor`.\r\n\r\n Args:\r\n op: the SparseSlice op\r\n *grads: the incoming gradients, one element per output of `op`\r\n\r\n Returns:\r\n Gradient for each of the 5 input tensors of SparseSlice:\r\n (indices, values, shape, start, size)\r\n The gradients for the indices, shape, start and the size are None.\r\n \"\"\"\r\n backprop_val_grad = grads[1]\r\n input_indices = op.inputs[0]\r\n input_start = op.inputs[3]\r\n output_indices = op.outputs[0]\r\n\r\n val_grad = gen_sparse_ops.sparse_slice_grad(\r\n backprop_val_grad, input_indices, input_start, output_indices)\r\n val_grad.set_shape(op.inputs[1].get_shape())\r\n # (indices, values, shape, start, size)\r\n return (None, val_grad, None, None, None)\r\n\r\n\r\[email protected](\"SparseTensorDenseMatMul\")\r\ndef _SparseTensorDenseMatMulGrad(op, grad):\r\n \"\"\"Gradients for the dense tensor in the SparseTensorDenseMatMul op.\r\n\r\n If either input is complex, no gradient is provided.\r\n\r\n Args:\r\n op: the SparseTensorDenseMatMul op\r\n grad: the incoming gradient\r\n\r\n Returns:\r\n Gradient for each of the 4 input tensors:\r\n (sparse_indices, sparse_values, sparse_shape, dense_tensor)\r\n The gradients for indices and shape are None.\r\n\r\n Raises:\r\n TypeError: When the two operands don't have the same type.\r\n \"\"\"\r\n a_indices, a_values, a_shape = op.inputs[:3]\r\n b = op.inputs[3]\r\n adj_a = op.get_attr(\"adjoint_a\")\r\n adj_b = op.get_attr(\"adjoint_b\")\r\n\r\n a_type = a_values.dtype.base_dtype\r\n b_type = b.dtype.base_dtype\r\n if a_type != b_type:\r\n raise TypeError(\"SparseTensorDenseMatMul op received operands with \"\r\n \"different types: \", a_type, \" and \", b_type)\r\n if a_type in (ops.dtypes.complex64, ops.dtypes.complex128):\r\n raise NotImplementedError(\"SparseTensorDenseMatMul op does not support \"\r\n \"complex gradients.\")\r\n\r\n # gradient w.r.t. dense\r\n b_grad = gen_sparse_ops.sparse_tensor_dense_mat_mul(\r\n a_indices, a_values, a_shape, grad, adjoint_a=not adj_a)\r\n if adj_b:\r\n b_grad = array_ops.transpose(b_grad)\r\n\r\n # gradient w.r.t. sparse values\r\n rows = a_indices[:, 0]\r\n cols = a_indices[:, 1]\r\n\r\n # TODO(zongheng, ebrevdo): add conjugates in the right places when complex\r\n # values are allowed.\r\n # TODO(zongheng): these gather calls could potentially duplicate rows/cols in\r\n # memory. If there is a need, we should look into implementing this more\r\n # intelligently to avoid duplicating data.\r\n parts_a = array_ops.gather(grad, rows if not adj_a else cols)\r\n parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b),\r\n cols if not adj_a else rows)\r\n a_values_grad = math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)\r\n\r\n # gradients w.r.t. (a_indices, a_values, a_shape, b)\r\n return (None, a_values_grad, None, b_grad)\r\n\r\n\r\[email protected](\"SparseDenseCwiseAdd\")\r\ndef _SparseDenseCwiseAddGrad(unused_op, unused_grad):\r\n raise NotImplementedError(\"Gradient for SparseDenseCwiseAdd is currently not\"\r\n \" implemented yet.\")\r\n\r\n\r\ndef _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):\r\n \"\"\"Common code for SparseDenseCwise{Mul,Div} gradients.\"\"\"\r\n x_indices = op.inputs[0]\r\n x_shape = op.inputs[2]\r\n y = op.inputs[3]\r\n\r\n y_shape = math_ops.to_int64(array_ops.shape(y))\r\n num_added_dims = array_ops.expand_dims(\r\n array_ops.size(x_shape) - array_ops.size(y_shape), 0)\r\n augmented_y_shape = array_ops.concat(\r\n [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)\r\n\r\n scaling = x_shape // augmented_y_shape\r\n scaled_indices = x_indices // scaling\r\n scaled_indices = array_ops.slice(scaled_indices,\r\n array_ops.concat([[0], num_added_dims], 0),\r\n [-1, -1])\r\n dense_vals = array_ops.gather_nd(y, scaled_indices)\r\n\r\n if is_mul:\r\n dx = grad * dense_vals\r\n dy_val = grad * op.inputs[1]\r\n else:\r\n dx = grad / dense_vals\r\n dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))\r\n # indices can repeat after scaling, so we can't use sparse_to_dense().\r\n dy = sparse_ops.sparse_add(\r\n array_ops.zeros_like(y),\r\n sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))\r\n\r\n # (sp_indices, sp_vals, sp_shape, dense)\r\n return (None, dx, None, dy)\r\n\r\n\r\[email protected](\"SparseDenseCwiseMul\")\r\ndef _SparseDenseCwiseMulGrad(op, grad):\r\n \"\"\"Gradients for SparseDenseCwiseMul.\"\"\"\r\n return _SparseDenseCwiseMulOrDivGrad(op, grad, True)\r\n\r\n\r\[email protected](\"SparseDenseCwiseDiv\")\r\ndef _SparseDenseCwiseDivGrad(op, grad):\r\n \"\"\"Gradients for SparseDenseCwiseDiv.\"\"\"\r\n return _SparseDenseCwiseMulOrDivGrad(op, grad, False)\r\n\r\n\r\[email protected](\"SparseSoftmax\")\r\ndef _SparseSoftmaxGrad(op, grad):\r\n \"\"\"Gradients for SparseSoftmax.\r\n\r\n The calculation is the same as SoftmaxGrad:\r\n\r\n grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax\r\n\r\n where we now only operate on the non-zero values present in the SparseTensors.\r\n\r\n Args:\r\n op: the SparseSoftmax op.\r\n grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.\r\n\r\n Returns:\r\n Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).\r\n \"\"\"\r\n indices, shape = op.inputs[0], op.inputs[2]\r\n out_vals = op.outputs[0]\r\n sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)\r\n sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)\r\n sp_product = sparse_tensor.SparseTensor(\r\n indices, sp_output.values * sp_grad.values, shape)\r\n\r\n # [..., B, 1], dense.\r\n sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keep_dims=True)\r\n # sparse [..., B, C] + dense [..., B, 1] with broadcast; outputs sparse.\r\n sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)\r\n\r\n grad_x = sp_sum.values * sp_output.values\r\n return [None, grad_x, None]\r\n\r\n\r\[email protected](\"SparseSparseMaximum\")\r\ndef _SparseSparseMaximumGrad(unused_op, unused_grad):\r\n raise NotImplementedError(\"Gradient for SparseSparseMaximum is currently not\"\r\n \" implemented yet.\")\r\n\r\n\r\[email protected](\"SparseSparseMinimum\")\r\ndef _SparseSparseMinimumGrad(unused_op, unused_grad):\r\n raise NotImplementedError(\"Gradient for SparseSparseMinimum is currently not\"\r\n \" implemented yet.\")\r\n\r\n\r\[email protected](\"SparseFillEmptyRows\")\r\ndef _SparseFillEmptyRowsGrad(op, unused_grad_output_indices, output_grad_values,\r\n unused_grad_empty_row_indicator,\r\n unused_grad_reverse_index_map):\r\n \"\"\"Gradients for SparseFillEmptyRows.\"\"\"\r\n reverse_index_map = op.outputs[3]\r\n\r\n d_values, d_default_value = gen_sparse_ops.sparse_fill_empty_rows_grad(\r\n reverse_index_map=reverse_index_map, grad_values=output_grad_values)\r\n\r\n # d_indices, d_values, d_dense_shape, d_default_value.\r\n return [None, d_values, None, d_default_value]\r\n",
"# This file is part of h5py, a Python interface to the HDF5 library.\n#\n# http://www.h5py.org\n#\n# Copyright 2008-2013 Andrew Collette and contributors\n#\n# License: Standard 3-clause BSD; see \"license.txt\" for full license terms\n# and contributor agreement.\n\n\"\"\"\n Common high-level operations test\n\n Tests features common to all high-level objects, like the .name property.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport six\n\nfrom h5py import File\nfrom .common import ut, TestCase, unicode_filenames\n\nimport numpy as np\nimport os\nimport tempfile\n\nclass BaseTest(TestCase):\n\n def setUp(self):\n self.f = File(self.mktemp(), 'w')\n\n def tearDown(self):\n if self.f:\n self.f.close()\n\nclass TestName(BaseTest):\n\n \"\"\"\n Feature: .name attribute returns the object name\n \"\"\"\n\n def test_anonymous(self):\n \"\"\" Anomymous objects have name None \"\"\"\n grp = self.f.create_group(None)\n self.assertIs(grp.name, None)\n\nclass TestRepr(BaseTest):\n\n \"\"\"\n repr() works correctly with Unicode names\n \"\"\"\n\n USTRING = six.unichr(0xfc) + six.unichr(0xdf)\n\n def _check_type(self, obj):\n if six.PY2:\n self.assertIsInstance(repr(obj), bytes)\n else:\n self.assertIsInstance(repr(obj), six.text_type)\n\n def test_group(self):\n \"\"\" Group repr() with unicode \"\"\"\n grp = self.f.create_group(self.USTRING)\n self._check_type(grp)\n\n def test_dataset(self):\n \"\"\" Dataset repr() with unicode \"\"\"\n dset = self.f.create_dataset(self.USTRING, (1,))\n self._check_type(dset)\n\n def test_namedtype(self):\n \"\"\" Named type repr() with unicode \"\"\"\n self.f['type'] = np.dtype('f')\n typ = self.f['type']\n self._check_type(typ)\n\n @ut.skipIf(not unicode_filenames, \"Filesystem unicode support required\")\n def test_file(self):\n \"\"\" File object repr() with unicode \"\"\"\n fname = tempfile.mktemp(self.USTRING+six.u('.hdf5'))\n try:\n with File(fname,'w') as f:\n self._check_type(f)\n finally:\n try:\n os.unlink(fname)\n except Exception:\n pass\n\n \n\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Recurrent Neural Network estimators.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport six\r\n\r\nfrom tensorflow.contrib.estimator.python.estimator import extenders\r\nfrom tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as seq_fc\r\nfrom tensorflow.python.estimator import estimator\r\nfrom tensorflow.python.estimator.canned import head as head_lib\r\nfrom tensorflow.python.estimator.canned import optimizers\r\nfrom tensorflow.python.feature_column import feature_column as feature_column_lib\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.layers import core as core_layers\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import check_ops\r\nfrom tensorflow.python.ops import init_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import partitioned_variables\r\nfrom tensorflow.python.ops import rnn\r\nfrom tensorflow.python.ops import rnn_cell\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.ops.losses import losses\r\nfrom tensorflow.python.summary import summary\r\nfrom tensorflow.python.training import optimizer as optimizer_lib\r\nfrom tensorflow.python.training import training_util\r\n\r\n\r\n# The defaults are historical artifacts of the initial implementation, but seem\r\n# reasonable choices.\r\n_DEFAULT_LEARNING_RATE = 0.05\r\n_DEFAULT_CLIP_NORM = 5.0\r\n\r\n_CELL_TYPES = {'basic_rnn': rnn_cell.BasicRNNCell,\r\n 'lstm': rnn_cell.BasicLSTMCell,\r\n 'gru': rnn_cell.GRUCell}\r\n\r\n# Indicates no value was provided by the user to a kwarg.\r\nUSE_DEFAULT = object()\r\n\r\n\r\ndef _single_rnn_cell(num_units, cell_type):\r\n cell_type = _CELL_TYPES.get(cell_type, cell_type)\r\n if not cell_type or not issubclass(cell_type, rnn_cell.RNNCell):\r\n raise ValueError('Supported cell types are {}; got {}'.format(\r\n list(_CELL_TYPES.keys()), cell_type))\r\n return cell_type(num_units=num_units)\r\n\r\n\r\ndef _make_rnn_cell_fn(num_units, cell_type='basic_rnn'):\r\n \"\"\"Convenience function to create `rnn_cell_fn` for canned RNN Estimators.\r\n\r\n Args:\r\n num_units: Iterable of integer number of hidden units per RNN layer.\r\n cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying\r\n the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and\r\n `'gru'`.\r\n\r\n Returns:\r\n A function that takes a single argument, an instance of\r\n `tf.estimator.ModeKeys`, and returns an instance derived from\r\n `tf.nn.rnn_cell.RNNCell`.\r\n\r\n Raises:\r\n ValueError: If cell_type is not supported.\r\n \"\"\"\r\n def rnn_cell_fn(mode):\r\n # Unused. Part of the rnn_cell_fn interface since user specified functions\r\n # may need different behavior across modes (e.g. dropout).\r\n del mode\r\n cells = [_single_rnn_cell(n, cell_type) for n in num_units]\r\n if len(cells) == 1:\r\n return cells[0]\r\n return rnn_cell.MultiRNNCell(cells)\r\n return rnn_cell_fn\r\n\r\n\r\ndef _concatenate_context_input(sequence_input, context_input):\r\n \"\"\"Replicates `context_input` across all timesteps of `sequence_input`.\r\n\r\n Expands dimension 1 of `context_input` then tiles it `sequence_length` times.\r\n This value is appended to `sequence_input` on dimension 2 and the result is\r\n returned.\r\n\r\n Args:\r\n sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,\r\n padded_length, d0]`.\r\n context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.\r\n\r\n Returns:\r\n A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,\r\n d0 + d1]`.\r\n\r\n Raises:\r\n ValueError: If `sequence_input` does not have rank 3 or `context_input` does\r\n not have rank 2.\r\n \"\"\"\r\n seq_rank_check = check_ops.assert_rank(\r\n sequence_input,\r\n 3,\r\n message='sequence_input must have rank 3',\r\n data=[array_ops.shape(sequence_input)])\r\n seq_type_check = check_ops.assert_type(\r\n sequence_input,\r\n dtypes.float32,\r\n message='sequence_input must have dtype float32; got {}.'.format(\r\n sequence_input.dtype))\r\n ctx_rank_check = check_ops.assert_rank(\r\n context_input,\r\n 2,\r\n message='context_input must have rank 2',\r\n data=[array_ops.shape(context_input)])\r\n ctx_type_check = check_ops.assert_type(\r\n context_input,\r\n dtypes.float32,\r\n message='context_input must have dtype float32; got {}.'.format(\r\n context_input.dtype))\r\n with ops.control_dependencies(\r\n [seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):\r\n padded_length = array_ops.shape(sequence_input)[1]\r\n tiled_context_input = array_ops.tile(\r\n array_ops.expand_dims(context_input, 1),\r\n array_ops.concat([[1], [padded_length], [1]], 0))\r\n return array_ops.concat([sequence_input, tiled_context_input], 2)\r\n\r\n\r\ndef _select_last_activations(activations, sequence_lengths):\r\n \"\"\"Selects the nth set of activations for each n in `sequence_length`.\r\n\r\n Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not\r\n `None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If\r\n `sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.\r\n\r\n Args:\r\n activations: A `Tensor` with shape `[batch_size, padded_length, k]`.\r\n sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.\r\n Returns:\r\n A `Tensor` of shape `[batch_size, k]`.\r\n \"\"\"\r\n with ops.name_scope(\r\n 'select_last_activations', values=[activations, sequence_lengths]):\r\n activations_shape = array_ops.shape(activations)\r\n batch_size = activations_shape[0]\r\n padded_length = activations_shape[1]\r\n output_units = activations_shape[2]\r\n if sequence_lengths is None:\r\n sequence_lengths = padded_length\r\n start_indices = math_ops.to_int64(\r\n math_ops.range(batch_size) * padded_length)\r\n last_indices = start_indices + sequence_lengths - 1\r\n reshaped_activations = array_ops.reshape(\r\n activations, [batch_size * padded_length, output_units])\r\n\r\n last_activations = array_ops.gather(reshaped_activations, last_indices)\r\n last_activations.set_shape([activations.shape[0], activations.shape[2]])\r\n return last_activations\r\n\r\n\r\ndef _rnn_logit_fn_builder(output_units, rnn_cell_fn, sequence_feature_columns,\r\n context_feature_columns, input_layer_partitioner):\r\n \"\"\"Function builder for a rnn logit_fn.\r\n\r\n Args:\r\n output_units: An int indicating the dimension of the logit layer.\r\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\r\n returns an object of type `tf.nn.rnn_cell.RNNCell`.\r\n sequence_feature_columns: An iterable containing the `FeatureColumn`s\r\n that represent sequential input.\r\n context_feature_columns: An iterable containing the `FeatureColumn`s\r\n that represent contextual input.\r\n input_layer_partitioner: Partitioner for input layer.\r\n\r\n Returns:\r\n A logit_fn (see below).\r\n\r\n Raises:\r\n ValueError: If output_units is not an int.\r\n \"\"\"\r\n if not isinstance(output_units, int):\r\n raise ValueError('output_units must be an int. Given type: {}'.format(\r\n type(output_units)))\r\n\r\n def rnn_logit_fn(features, mode):\r\n \"\"\"Recurrent Neural Network logit_fn.\r\n\r\n Args:\r\n features: This is the first item returned from the `input_fn`\r\n passed to `train`, `evaluate`, and `predict`. This should be a\r\n single `Tensor` or `dict` of same.\r\n mode: Optional. Specifies if this training, evaluation or prediction. See\r\n `ModeKeys`.\r\n\r\n Returns:\r\n A `Tensor` representing the logits.\r\n \"\"\"\r\n with variable_scope.variable_scope(\r\n 'sequence_input_layer',\r\n values=tuple(six.itervalues(features)),\r\n partitioner=input_layer_partitioner):\r\n sequence_input, sequence_length = seq_fc.sequence_input_layer(\r\n features=features, feature_columns=sequence_feature_columns)\r\n summary.histogram('sequence_length', sequence_length)\r\n\r\n if context_feature_columns:\r\n context_input = feature_column_lib.input_layer(\r\n features=features,\r\n feature_columns=context_feature_columns)\r\n sequence_input = _concatenate_context_input(sequence_input,\r\n context_input)\r\n\r\n cell = rnn_cell_fn(mode)\r\n # Ignore output state.\r\n rnn_outputs, _ = rnn.dynamic_rnn(\r\n cell=cell,\r\n inputs=sequence_input,\r\n sequence_length=sequence_length,\r\n dtype=dtypes.float32,\r\n time_major=False)\r\n last_activations = _select_last_activations(rnn_outputs, sequence_length)\r\n\r\n with variable_scope.variable_scope('logits', values=(rnn_outputs,)):\r\n logits = core_layers.dense(\r\n last_activations,\r\n units=output_units,\r\n activation=None,\r\n kernel_initializer=init_ops.glorot_uniform_initializer())\r\n return logits\r\n\r\n return rnn_logit_fn\r\n\r\n\r\ndef _rnn_model_fn(features,\r\n labels,\r\n mode,\r\n head,\r\n rnn_cell_fn,\r\n sequence_feature_columns,\r\n context_feature_columns,\r\n optimizer='Adagrad',\r\n input_layer_partitioner=None,\r\n config=None):\r\n \"\"\"Recurrent Neural Net model_fn.\r\n\r\n Args:\r\n features: dict of `Tensor` and `SparseTensor` objects returned from\r\n `input_fn`.\r\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] with labels.\r\n mode: Defines whether this is training, evaluation or prediction.\r\n See `ModeKeys`.\r\n head: A `head_lib._Head` instance.\r\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\r\n returns an object of type `tf.nn.rnn_cell.RNNCell`.\r\n sequence_feature_columns: Iterable containing `FeatureColumn`s that\r\n represent sequential model inputs.\r\n context_feature_columns: Iterable containing `FeatureColumn`s that\r\n represent model inputs not associated with a specific timestep.\r\n optimizer: String, `tf.Optimizer` object, or callable that creates the\r\n optimizer to use for training. If not specified, will use the Adagrad\r\n optimizer with a default learning rate of 0.05 and gradient clip norm of\r\n 5.0.\r\n input_layer_partitioner: Partitioner for input layer. Defaults\r\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\r\n config: `RunConfig` object to configure the runtime settings.\r\n\r\n Returns:\r\n An `EstimatorSpec` instance.\r\n\r\n Raises:\r\n ValueError: If mode or optimizer is invalid, or features has the wrong type.\r\n \"\"\"\r\n if not isinstance(features, dict):\r\n raise ValueError('features should be a dictionary of `Tensor`s. '\r\n 'Given type: {}'.format(type(features)))\r\n\r\n # If user does not provide an optimizer instance, use the optimizer specified\r\n # by the string with default learning rate and gradient clipping.\r\n if not isinstance(optimizer, optimizer_lib.Optimizer):\r\n optimizer = optimizers.get_optimizer_instance(\r\n optimizer, learning_rate=_DEFAULT_LEARNING_RATE)\r\n optimizer = extenders.clip_gradients_by_norm(optimizer, _DEFAULT_CLIP_NORM)\r\n\r\n num_ps_replicas = config.num_ps_replicas if config else 0\r\n partitioner = partitioned_variables.min_max_variable_partitioner(\r\n max_partitions=num_ps_replicas)\r\n with variable_scope.variable_scope(\r\n 'rnn',\r\n values=tuple(six.itervalues(features)),\r\n partitioner=partitioner):\r\n input_layer_partitioner = input_layer_partitioner or (\r\n partitioned_variables.min_max_variable_partitioner(\r\n max_partitions=num_ps_replicas,\r\n min_slice_size=64 << 20))\r\n\r\n logit_fn = _rnn_logit_fn_builder(\r\n output_units=head.logits_dimension,\r\n rnn_cell_fn=rnn_cell_fn,\r\n sequence_feature_columns=sequence_feature_columns,\r\n context_feature_columns=context_feature_columns,\r\n input_layer_partitioner=input_layer_partitioner)\r\n logits = logit_fn(features=features, mode=mode)\r\n\r\n def _train_op_fn(loss):\r\n \"\"\"Returns the op to optimize the loss.\"\"\"\r\n return optimizer.minimize(\r\n loss,\r\n global_step=training_util.get_global_step())\r\n\r\n return head.create_estimator_spec(\r\n features=features,\r\n mode=mode,\r\n labels=labels,\r\n train_op_fn=_train_op_fn,\r\n logits=logits)\r\n\r\n\r\ndef _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type):\r\n \"\"\"Assert arguments are valid and return rnn_cell_fn.\"\"\"\r\n if rnn_cell_fn and (num_units or cell_type != USE_DEFAULT):\r\n raise ValueError(\r\n 'num_units and cell_type must not be specified when using rnn_cell_fn'\r\n )\r\n if not rnn_cell_fn:\r\n if cell_type == USE_DEFAULT:\r\n cell_type = 'basic_rnn'\r\n rnn_cell_fn = _make_rnn_cell_fn(num_units, cell_type)\r\n return rnn_cell_fn\r\n\r\n\r\nclass RNNClassifier(estimator.Estimator):\r\n \"\"\"A classifier for TensorFlow RNN models.\r\n\r\n Trains a recurrent neural network model to classify instances into one of\r\n multiple classes.\r\n\r\n Example:\r\n\r\n ```python\r\n token_sequence = sequence_categorical_column_with_hash_bucket(...)\r\n token_emb = embedding_column(categorical_column=token_sequence, ...)\r\n\r\n estimator = RNNClassifier(\r\n sequence_feature_columns=[token_emb],\r\n num_units=[32, 16], cell_type='lstm')\r\n\r\n # Input builders\r\n def input_fn_train: # returns x, y\r\n pass\r\n estimator.train(input_fn=input_fn_train, steps=100)\r\n\r\n def input_fn_eval: # returns x, y\r\n pass\r\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\r\n def input_fn_predict: # returns x, None\r\n pass\r\n predictions = estimator.predict(input_fn=input_fn_predict)\r\n ```\r\n\r\n Input of `train` and `evaluate` should have following features,\r\n otherwise there will be a `KeyError`:\r\n\r\n * if `weight_column` is not `None`, a feature with\r\n `key=weight_column` whose value is a `Tensor`.\r\n * for each `column` in `sequence_feature_columns`:\r\n - a feature with `key=column.name` whose `value` is a `SparseTensor`.\r\n * for each `column` in `context_feature_columns`:\r\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\r\n whose `value` is a `SparseTensor`.\r\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\r\n with `key` the id column name, the second with `key` the weight column\r\n name. Both features' `value` must be a `SparseTensor`.\r\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\r\n whose `value` is a `Tensor`.\r\n\r\n Loss is calculated by using softmax cross entropy.\r\n\r\n @compatibility(eager)\r\n Estimators are not compatible with eager execution.\r\n @end_compatibility\r\n \"\"\"\r\n\r\n def __init__(self,\r\n sequence_feature_columns,\r\n context_feature_columns=None,\r\n num_units=None,\r\n cell_type=USE_DEFAULT,\r\n rnn_cell_fn=None,\r\n model_dir=None,\r\n n_classes=2,\r\n weight_column=None,\r\n label_vocabulary=None,\r\n optimizer='Adagrad',\r\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,\r\n input_layer_partitioner=None,\r\n config=None):\r\n \"\"\"Initializes a `RNNClassifier` instance.\r\n\r\n Args:\r\n sequence_feature_columns: An iterable containing the `FeatureColumn`s\r\n that represent sequential input. All items in the set should either be\r\n sequence columns (e.g. `sequence_numeric_column`) or constructed from\r\n one (e.g. `embedding_column` with `sequence_categorical_column_*` as\r\n input).\r\n context_feature_columns: An iterable containing the `FeatureColumn`s\r\n for contextual input. The data represented by these columns will be\r\n replicated and given to the RNN at each timestep. These columns must be\r\n instances of classes derived from `_DenseColumn` such as\r\n `numeric_column`, not the sequential variants.\r\n num_units: Iterable of integer number of hidden units per RNN layer. If\r\n set, `cell_type` must also be specified and `rnn_cell_fn` must be\r\n `None`.\r\n cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying\r\n the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and\r\n `'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`\r\n must be `None`.\r\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\r\n returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to\r\n construct the RNN. If set, `num_units` and `cell_type` cannot be set.\r\n This is for advanced users who need additional customization beyond\r\n `num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is\r\n needed for stacked RNNs.\r\n model_dir: Directory to save model parameters, graph and etc. This can\r\n also be used to load checkpoints from the directory into a estimator to\r\n continue training a previously saved model.\r\n n_classes: Number of label classes. Defaults to 2, namely binary\r\n classification. Must be > 1.\r\n weight_column: A string or a `_NumericColumn` created by\r\n `tf.feature_column.numeric_column` defining feature column representing\r\n weights. It is used to down weight or boost examples during training. It\r\n will be multiplied by the loss of the example. If it is a string, it is\r\n used as a key to fetch weight tensor from the `features`. If it is a\r\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\r\n then weight_column.normalizer_fn is applied on it to get weight tensor.\r\n label_vocabulary: A list of strings represents possible label values. If\r\n given, labels must be string type and have any value in\r\n `label_vocabulary`. If it is not given, that means labels are\r\n already encoded as integer or float within [0, 1] for `n_classes=2` and\r\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\r\n Also there will be errors if vocabulary is not provided and labels are\r\n string.\r\n optimizer: An instance of `tf.Optimizer` or string specifying optimizer\r\n type. Defaults to Adagrad optimizer.\r\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\r\n to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.\r\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\r\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\r\n config: `RunConfig` object to configure the runtime settings.\r\n\r\n Raises:\r\n ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not\r\n compatible.\r\n \"\"\"\r\n rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)\r\n\r\n if n_classes == 2:\r\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access\r\n weight_column=weight_column,\r\n label_vocabulary=label_vocabulary,\r\n loss_reduction=loss_reduction)\r\n else:\r\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access\r\n n_classes,\r\n weight_column=weight_column,\r\n label_vocabulary=label_vocabulary,\r\n loss_reduction=loss_reduction)\r\n\r\n def _model_fn(features, labels, mode, config):\r\n return _rnn_model_fn(\r\n features=features,\r\n labels=labels,\r\n mode=mode,\r\n head=head,\r\n rnn_cell_fn=rnn_cell_fn,\r\n sequence_feature_columns=tuple(sequence_feature_columns or []),\r\n context_feature_columns=tuple(context_feature_columns or []),\r\n optimizer=optimizer,\r\n input_layer_partitioner=input_layer_partitioner,\r\n config=config)\r\n super(RNNClassifier, self).__init__(\r\n model_fn=_model_fn, model_dir=model_dir, config=config)\r\n\r\n\r\nclass RNNEstimator(estimator.Estimator):\r\n \"\"\"An Estimator for TensorFlow RNN models with user-specified head.\r\n\r\n Example:\r\n\r\n ```python\r\n token_sequence = sequence_categorical_column_with_hash_bucket(...)\r\n token_emb = embedding_column(categorical_column=token_sequence, ...)\r\n\r\n estimator = RNNEstimator(\r\n head=tf.contrib.estimator.regression_head(),\r\n sequence_feature_columns=[token_emb],\r\n num_units=[32, 16], cell_type='lstm')\r\n\r\n # Or with custom RNN cell:\r\n def rnn_cell_fn(mode):\r\n cells = [ tf.contrib.rnn.LSTMCell(size) for size in [32, 16] ]\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n cells = [ tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=0.5)\r\n for cell in cells ]\r\n return tf.contrib.rnn.MultiRNNCell(cells)\r\n\r\n estimator = RNNEstimator(\r\n head=tf.contrib.estimator.regression_head(),\r\n sequence_feature_columns=[token_emb],\r\n rnn_cell_fn=rnn_cell_fn)\r\n\r\n # Input builders\r\n def input_fn_train: # returns x, y\r\n pass\r\n estimator.train(input_fn=input_fn_train, steps=100)\r\n\r\n def input_fn_eval: # returns x, y\r\n pass\r\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\r\n def input_fn_predict: # returns x, None\r\n pass\r\n predictions = estimator.predict(input_fn=input_fn_predict)\r\n ```\r\n\r\n Input of `train` and `evaluate` should have following features,\r\n otherwise there will be a `KeyError`:\r\n\r\n * if the head's `weight_column` is not `None`, a feature with\r\n `key=weight_column` whose value is a `Tensor`.\r\n * for each `column` in `sequence_feature_columns`:\r\n - a feature with `key=column.name` whose `value` is a `SparseTensor`.\r\n * for each `column` in `context_feature_columns`:\r\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\r\n whose `value` is a `SparseTensor`.\r\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\r\n with `key` the id column name, the second with `key` the weight column\r\n name. Both features' `value` must be a `SparseTensor`.\r\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\r\n whose `value` is a `Tensor`.\r\n\r\n Loss and predicted output are determined by the specified head.\r\n\r\n @compatibility(eager)\r\n Estimators are not compatible with eager execution.\r\n @end_compatibility\r\n \"\"\"\r\n\r\n def __init__(self,\r\n head,\r\n sequence_feature_columns,\r\n context_feature_columns=None,\r\n num_units=None,\r\n cell_type=USE_DEFAULT,\r\n rnn_cell_fn=None,\r\n model_dir=None,\r\n optimizer='Adagrad',\r\n input_layer_partitioner=None,\r\n config=None):\r\n \"\"\"Initializes a `RNNClassifier` instance.\r\n\r\n Args:\r\n head: A `_Head` instance constructed with a method such as\r\n `tf.contrib.estimator.multi_label_head`. This specifies the model's\r\n output and loss function to be optimized.\r\n sequence_feature_columns: An iterable containing the `FeatureColumn`s\r\n that represent sequential input. All items in the set should either be\r\n sequence columns (e.g. `sequence_numeric_column`) or constructed from\r\n one (e.g. `embedding_column` with `sequence_categorical_column_*` as\r\n input).\r\n context_feature_columns: An iterable containing the `FeatureColumn`s\r\n for contextual input. The data represented by these columns will be\r\n replicated and given to the RNN at each timestep. These columns must be\r\n instances of classes derived from `_DenseColumn` such as\r\n `numeric_column`, not the sequential variants.\r\n num_units: Iterable of integer number of hidden units per RNN layer. If\r\n set, `cell_type` must also be specified and `rnn_cell_fn` must be\r\n `None`.\r\n cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying\r\n the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and\r\n `'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`\r\n must be `None`.\r\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\r\n returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to\r\n construct the RNN. If set, `num_units` and `cell_type` cannot be set.\r\n This is for advanced users who need additional customization beyond\r\n `num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is\r\n needed for stacked RNNs.\r\n model_dir: Directory to save model parameters, graph and etc. This can\r\n also be used to load checkpoints from the directory into a estimator to\r\n continue training a previously saved model.\r\n optimizer: An instance of `tf.Optimizer` or string specifying optimizer\r\n type. Defaults to Adagrad optimizer.\r\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\r\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\r\n config: `RunConfig` object to configure the runtime settings.\r\n\r\n Raises:\r\n ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not\r\n compatible.\r\n \"\"\"\r\n rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)\r\n\r\n def _model_fn(features, labels, mode, config):\r\n return _rnn_model_fn(\r\n features=features,\r\n labels=labels,\r\n mode=mode,\r\n head=head,\r\n rnn_cell_fn=rnn_cell_fn,\r\n sequence_feature_columns=tuple(sequence_feature_columns or []),\r\n context_feature_columns=tuple(context_feature_columns or []),\r\n optimizer=optimizer,\r\n input_layer_partitioner=input_layer_partitioner,\r\n config=config)\r\n super(RNNEstimator, self).__init__(\r\n model_fn=_model_fn, model_dir=model_dir, config=config)\r\n",
"# This file is part of h5py, a Python interface to the HDF5 library.\n#\n# http://www.h5py.org\n#\n# Copyright 2008-2013 Andrew Collette and contributors\n#\n# License: Standard 3-clause BSD; see \"license.txt\" for full license terms\n# and contributor agreement.\n\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport shutil\nimport tempfile\nfrom contextlib import contextmanager\n\nfrom six import unichr\n\nimport numpy as np\nimport h5py\n\nif sys.version_info >= (2, 7) or sys.version_info >= (3, 2):\n import unittest as ut\nelse:\n try:\n import unittest2 as ut\n except ImportError:\n raise ImportError(\n 'unittest2 is required to run the test suite with python-%d.%d'\n % (sys.version_info[:2])\n )\n\n\n# Check if non-ascii filenames are supported\n# Evidently this is the most reliable way to check\n# See also h5py issue #263 and ipython #466\n# To test for this, run the testsuite with LC_ALL=C\ntry:\n testfile, fname = tempfile.mkstemp(unichr(0x03b7))\nexcept UnicodeError:\n UNICODE_FILENAMES = False\nelse:\n UNICODE_FILENAMES = True\n os.close(testfile)\n os.unlink(fname)\n del fname\n del testfile\n\n\nclass TestCase(ut.TestCase):\n\n \"\"\"\n Base class for unit tests.\n \"\"\"\n \n @classmethod\n def setUpClass(cls):\n cls.tempdir = tempfile.mkdtemp(prefix='h5py-test_')\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.tempdir)\n\n def mktemp(self, suffix='.hdf5', prefix='', dir=None):\n if dir is None:\n dir = self.tempdir\n return tempfile.mktemp(suffix, prefix, dir=self.tempdir)\n \n def setUp(self):\n self.f = h5py.File(self.mktemp(), 'w')\n \n def tearDown(self):\n try:\n if self.f:\n self.f.close()\n except:\n pass\n\n if not hasattr(ut.TestCase, 'assertSameElements'):\n # shim until this is ported into unittest2\n def assertSameElements(self, a, b):\n for x in a:\n match = False\n for y in b:\n if x == y:\n match = True\n if not match:\n raise AssertionError(\"Item '%s' appears in a but not b\" % x)\n\n for x in b:\n match = False\n for y in a:\n if x == y:\n match = True\n if not match:\n raise AssertionError(\"Item '%s' appears in b but not a\" % x)\n\n def assertArrayEqual(self, dset, arr, message=None, precision=None):\n \"\"\" Make sure dset and arr have the same shape, dtype and contents, to\n within the given precision.\n\n Note that dset may be a NumPy array or an HDF5 dataset.\n \"\"\"\n if precision is None:\n precision = 1e-5\n if message is None:\n message = ''\n else:\n message = ' (%s)' % message\n\n if np.isscalar(dset) or np.isscalar(arr):\n self.assert_(\n np.isscalar(dset) and np.isscalar(arr),\n 'Scalar/array mismatch (\"%r\" vs \"%r\")%s' % (dset, arr, message)\n )\n self.assert_(\n dset - arr < precision,\n \"Scalars differ by more than %.3f%s\" % (precision, message)\n )\n return\n\n self.assert_(\n dset.shape == arr.shape,\n \"Shape mismatch (%s vs %s)%s\" % (dset.shape, arr.shape, message)\n )\n self.assert_(\n dset.dtype == arr.dtype,\n \"Dtype mismatch (%s vs %s)%s\" % (dset.dtype, arr.dtype, message)\n )\n \n if arr.dtype.names is not None:\n for n in arr.dtype.names:\n message = '[FIELD %s] %s' % (n, message)\n self.assertArrayEqual(dset[n], arr[n], message=message, precision=precision)\n elif arr.dtype.kind in ('i', 'f'):\n self.assert_(\n np.all(np.abs(dset[...] - arr[...]) < precision),\n \"Arrays differ by more than %.3f%s\" % (precision, message)\n )\n else:\n self.assert_(\n np.all(dset[...] == arr[...]),\n \"Arrays are not equal (dtype %s) %s\" % (arr.dtype.str, message)\n )\n\n def assertNumpyBehavior(self, dset, arr, s):\n \"\"\" Apply slicing arguments \"s\" to both dset and arr.\n \n Succeeds if the results of the slicing are identical, or the\n exception raised is of the same type for both.\n \n \"arr\" must be a Numpy array; \"dset\" may be a NumPy array or dataset.\n \"\"\"\n exc = None\n try:\n arr_result = arr[s]\n except Exception as e:\n exc = type(e)\n \n if exc is None:\n self.assertArrayEqual(dset[s], arr_result)\n else:\n with self.assertRaises(exc):\n dset[s]\n\nNUMPY_RELEASE_VERSION = tuple([int(i) for i in np.__version__.split(\".\")[0:2]])\n\n@contextmanager\ndef closed_tempfile(suffix='', text=None):\n \"\"\"\n Context manager which yields the path to a closed temporary file with the\n suffix `suffix`. The file will be deleted on exiting the context. An\n additional argument `text` can be provided to have the file contain `text`.\n \"\"\"\n with tempfile.NamedTemporaryFile(\n 'w+t', suffix=suffix, delete=False\n ) as test_file:\n file_name = test_file.name\n if text is not None:\n test_file.write(text)\n test_file.flush()\n yield file_name\n shutil.rmtree(file_name, ignore_errors=True)\n"
] | [
[
"tensorflow.python.ops.io_ops.TFRecordReader",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.io_ops.TextLineReader",
"numpy.squeeze",
"tensorflow.python.training.input.string_input_producer",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.math_ops.reduce_any",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.parsing_ops.decode_csv",
"tensorflow.python.estimator.estimator_lib.inputs.numpy_input_fn",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.contrib.timeseries.python.timeseries.model_utils.canonicalize_times_or_steps_from_output",
"tensorflow.python.training.input.maybe_shuffle_batch",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.state_ops.is_variable_initialized",
"tensorflow.python.ops.array_ops.reverse",
"tensorflow.python.training.input.batch",
"tensorflow.python.training.training.limit_epochs",
"tensorflow.python.ops.parsing_ops.parse_example",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.gen_random_ops.random_uniform",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_random_ops.random_uniform_int",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.ops.gen_random_ops.random_poisson_v2",
"tensorflow.python.framework.random_seed.get_seed",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.ops.gen_random_ops.truncated_normal",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.gen_random_ops.parameterized_truncated_normal",
"numpy.finfo",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.gen_random_ops.random_gamma",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.gen_random_ops.multinomial",
"tensorflow.python.ops.gen_random_ops.random_standard_normal",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.gen_random_ops.random_shuffle"
],
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.lib.io.file_io.stat",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.lib.io.file_io.file_exists",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.lib.io.file_io.delete_file",
"tensorflow.python.lib.io.file_io.read_file_to_string",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.training.training_util.global_step",
"tensorflow.python.lib.io.file_io.get_matching_files",
"tensorflow.python.training.checkpoint_state_pb2.CheckpointState",
"tensorflow.python.eager.context.executing_eagerly"
],
[
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.rnn_cell_impl.assert_like_rnncell",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.init_ops.random_uniform_initializer",
"tensorflow.python.ops.nn_ops.bias_add",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.util.tf_export.tf_export"
],
[
"numpy.f2py.main"
],
[
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.eager.execute.make_bool",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.eager.execute.make_float",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.eager.execute.execute"
],
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_used_handlers",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.contrib.layers.python.layers.feature_column_ops.transform_features",
"tensorflow.contrib.boosted_trees.python.ops.training_ops.tree_ensemble_stats",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.summary.summary.scalar",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.array_ops.shape_n",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.contrib.boosted_trees.python.ops.training_ops.grow_tree_ensemble",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.math_ops.to_int32",
"tensorflow.python.ops.math_ops.logical_and",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.math_ops.to_int64",
"tensorflow.python.training.device_setter.replica_device_setter",
"tensorflow.contrib.boosted_trees.python.ops.prediction_ops.gradient_trees_prediction",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.contrib.boosted_trees.python.ops.gen_model_ops.decision_tree_ensemble_resource_handle_op",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.contrib.boosted_trees.python.ops.prediction_ops.gradient_trees_prediction_verbose",
"tensorflow.python.feature_column.feature_column.input_layer",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_deserialize",
"tensorflow.contrib.boosted_trees.python.ops.training_ops.center_tree_ensemble_bias",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.contrib.boosted_trees.python.ops.prediction_ops.gradient_trees_partition_examples",
"tensorflow.contrib.boosted_trees.python.ops.batch_ops_utils.run_handler_scheduled_ops",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.contrib.boosted_trees.python.ops.gen_model_ops.create_tree_ensemble_variable",
"tensorflow.contrib.boosted_trees.python.ops.stats_accumulator_ops.StatsAccumulator",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.range",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_serialize",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_stamp_token",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.trace",
"tensorflow.python.feature_column.feature_column._transform_features",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.framework.op_def_registry.get_registered_ops"
],
[
"tensorflow.python.training.optimizer.get_filtered_grad_fn",
"tensorflow.python.eager.backprop.implicit_grad"
],
[
"tensorflow.python.estimator.estimator.Estimator",
"tensorflow.python.estimator.canned.boosted_trees._create_classification_head_and_closed_form",
"tensorflow.python.estimator.canned.boosted_trees._bt_model_fn",
"tensorflow.python.estimator.canned.boosted_trees._create_regression_head",
"tensorflow.python.estimator.canned.boosted_trees._TreeHParams"
],
[
"tensorflow.python.autograph.pyct.inspect_utils.getmethodclass",
"tensorflow.python.util.tf_inspect.ismethod",
"tensorflow.python.autograph.pyct.anno.setanno",
"tensorflow.python.autograph.pyct.templates.replace",
"tensorflow.python.autograph.pyct.anno.hasanno",
"tensorflow.python.autograph.pyct.parser.parse_entity",
"tensorflow.python.autograph.pyct.ast_util.keywords_to_dict",
"tensorflow.python.autograph.pyct.ast_util.matches",
"tensorflow.python.autograph.pyct.parser.parse_expression",
"tensorflow.python.autograph.pyct.anno.getanno",
"tensorflow.python.autograph.pyct.inspect_utils.isbuiltin"
],
[
"tensorflow.python.pywrap_tensorflow.IsNamedtuple",
"tensorflow.python.pywrap_tensorflow.RegisterType",
"tensorflow.python.pywrap_tensorflow.AssertSameStructure"
],
[
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.eager.execute.make_int",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.eager.execute.execute"
],
[
"tensorflow.python.util.function_utils.fn_args"
],
[
"tensorflow.python.ops.gen_collective_ops.collective_bcast_recv",
"tensorflow.python.ops.gen_collective_ops.collective_bcast_send",
"tensorflow.python.framework.device.canonical_name",
"tensorflow.python.ops.gen_collective_ops.collective_reduce"
],
[
"tensorflow.python.training.saver.BulkSaverBuilder",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.training.saver.BaseSaverBuilder.SaveableObjectsForOp",
"tensorflow.python.ops.variable_scope._get_default_variable_store",
"tensorflow.core.protobuf.checkpointable_object_graph_pb2.CheckpointableObjectGraph",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gen_io_ops.restore_v2",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.training.checkpointable.base._CheckpointPosition",
"tensorflow.core.protobuf.checkpointable_object_graph_pb2.CheckpointableObjectGraph.CheckpointableObject.SlotVariableReference",
"tensorflow.python.training.saver.BaseSaverBuilder.OpListToDict",
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.training.checkpointable.base.NoRestoreSaveable",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.framework.ops.uid",
"tensorflow.python.training.checkpointable.base._SlotVariableRestoration",
"tensorflow.python.training.checkpointable.data_structures.NoDependency",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.all",
"numpy.array",
"numpy.dtype"
],
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_sparse_ops.sparse_add_grad",
"tensorflow.python.ops.array_ops.invert_permutation",
"tensorflow.python.ops.gen_sparse_ops.sparse_tensor_dense_mat_mul",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.math_ops.to_int64",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.gen_sparse_ops.sparse_slice_grad",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.reduced_shape",
"tensorflow.python.ops.gen_sparse_ops.sparse_fill_empty_rows_grad",
"tensorflow.python.ops.sparse_ops.sparse_reduce_sum",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.sparse_ops.sparse_reorder",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.sparse_ops.sparse_dense_cwise_add",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"numpy.dtype"
],
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.estimator.canned.head._multi_class_head_with_softmax_cross_entropy_loss",
"tensorflow.python.summary.summary.histogram",
"tensorflow.python.training.training_util.get_global_step",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.feature_column.feature_column.input_layer",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss",
"tensorflow.contrib.feature_column.python.feature_column.sequence_feature_column.sequence_input_layer",
"tensorflow.python.ops.rnn_cell.MultiRNNCell",
"tensorflow.contrib.estimator.python.estimator.extenders.clip_gradients_by_norm",
"tensorflow.python.ops.rnn.dynamic_rnn",
"tensorflow.python.estimator.canned.optimizers.get_optimizer_instance",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.init_ops.glorot_uniform_initializer",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.python.ops.array_ops.expand_dims"
],
[
"numpy.__version__.split",
"numpy.all",
"numpy.abs",
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.12",
"2.6",
"2.7",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.7",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ebrunet28/MultiDecoder-DPRNN | [
"36fd6c35e730379e4f676a25eac451409a01f068"
] | [
"src/data.py"
] | [
"\"\"\"\nDataset classes for variable number of speakers\nAuthor: Junzhe Zhu\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom librosa import load\nfrom time import time\nimport glob\nimport os\nimport random\nimport json\nfrom tqdm import tqdm\ndef load_json(filename):\n with open(filename) as f:\n data = json.load(f)\n return data\n\ndef pad_audio(audio, len_samples=4*8000):\n if len(audio) < len_samples:\n audio = np.concatenate([audio, np.zeros(len_samples - len(audio))])\n return audio\n\nclass MixtureDataset(data.Dataset):\n def __init__(self, root, json_folders, sr=8000, seglen=4.0, minlen=2.0, debug=False): # segment and cv_maxlen not implemented\n \"\"\"\n each line of textfile comes in the form of:\n filename1, dB1, filename2, dB2, ...\n args:\n root: folder where dataset/ is located\n json_folders: folders containing json files, **/dataset/#speakers/wav8k/min/tr/**\n sr: sample rate\n seglen: length of each segment in seconds\n minlen: minimum segment length\n \"\"\"\n str_tmp = '_debug' if debug else ''\n seglen = int(seglen * sr)\n minlen = int(minlen * sr)\n self.sr = sr\n self.mixes = []\n for json_folder in json_folders:\n mixfiles, wavlens = list(zip(*load_json(os.path.join(root + str_tmp, json_folder, 'mix.json')))) # list of 20000 filenames, and 20000 lengths\n mixfiles = [os.path.join(root, mixfile.split('dataset/')[1]) for mixfile in mixfiles]\n sig_json = [load_json(file) for file in sorted(glob.glob(os.path.join(root + str_tmp, json_folder, 's*.json')))] # list C, each have 20000 filenames\n for i, spkr_json in enumerate(sig_json):\n sig_json[i] = [os.path.join(root, line[0].split('dataset/')[1]) for line in spkr_json] # list C, each have 20000 filenames\n siglists = list(zip(*sig_json)) # list of 20000, each have C filenames\n self.mixes += list(zip(mixfiles, siglists, wavlens))\n\n self.examples = []\n for i, mix in enumerate(self.mixes):\n if mix[2] < minlen:\n continue\n start = 0\n while start + minlen <= mix[2]:\n end = min(start + seglen, mix[2])\n self.examples.append({'mixfile': mix[0], 'sourcefiles': mix[1], 'start': start, 'end':end})\n start += minlen\n random.seed(0)\n self.examples = random.sample(self.examples, len(self.examples))\n\n # Count.\n example_source_files_len = [len(tmp['sourcefiles'] )for tmp in self.examples]\n unique, counts = np.unique(np.array(example_source_files_len), return_counts=True)\n self.example_weights =[]\n for tmp in example_source_files_len:\n self.example_weights.append(1./counts[tmp-2])\n self.example_weights = torch.Tensor(self.example_weights)\n def __len__(self):\n return len(self.examples)\n def __getitem__(self, idx):\n \"\"\"\n Returns:\n mixture: [T]\n sources: list of C, each [T]\n \"\"\"\n example = self.examples[idx]\n mixfile, sourcefiles, start, end = example['mixfile'], example['sourcefiles'], example['start'], example['end']\n mixture, sr = load(mixfile, sr=self.sr)\n assert sr == self.sr, 'need to resample'\n mixture = mixture[start:end]\n sources = [load(sourcefile, sr=sr)[0][start:end] for sourcefile in sourcefiles]\n return mixture, sources\n\ndef _collate_fn(batch):\n \"\"\"\n Args:\n batch: list, len(batch) = batch_size, each entry is a tuple of (mixture, sources)\n Returns:\n mixtures_list: B x T, torch.Tensor, padded mixtures\n ilens : B, torch.Tensor, length of each mixture before padding\n sources_list: list of B Tensors, each C x T, where C is (variable) number of source audios\n \"\"\"\n ilens = [] # shape of mixtures\n mixtures = [] # mixtures, same length as longest source in whole batch\n sources_list = [] # padded sources, same length as mixtures\n for mixture, sources in batch: # compute length to pad to\n assert len(mixture) == len(sources[0])\n assert len(mixture) <= 32000\n ilens.append(len(mixture))\n mixtures.append(pad_audio(mixture))\n sources = torch.Tensor(np.stack([pad_audio(source) for source in sources], axis=0)).float()\n sources_list.append(sources)\n mixtures = torch.Tensor(np.stack(mixtures, axis=0)).float()\n ilens = torch.Tensor(np.stack(ilens)).int()\n return mixtures, ilens, sources_list\n\nclass TestDataset(data.Dataset):\n def __init__(self, root, json_folders, sr=8000): # segment and cv_maxlen not implemented\n \"\"\"\n each line of textfile comes in the form of:\n filename1, dB1, filename2, dB2, ...\n args:\n root: folder where dataset/ is located\n json_folders: folders containing json files, **/dataset/#speakers/wav8k/min/tr/**\n sr: sample rate\n seglen: length of each segment in seconds\n minlen: minimum segment length\n \"\"\"\n self.sr = sr\n self.mixes = []\n for json_folder in json_folders:\n mixfiles, wavlens = list(zip(*load_json(os.path.join(root, json_folder, 'mix.json')))) # list of 20000 filenames, and 20000 lengths\n mixfiles = [os.path.join(root, mixfile.split('dataset/')[1]) for mixfile in mixfiles]\n sig_json = [load_json(file) for file in sorted(glob.glob(os.path.join(root, json_folder, 's*.json')))] # list C, each have 20000 filenames\n for i, spkr_json in enumerate(sig_json):\n sig_json[i] = [os.path.join(root, line[0].split('dataset/')[1]) for line in spkr_json] # list C, each have 20000 filenames\n siglists = list(zip(*sig_json)) # list of 20000, each have C filenames\n self.mixes += list(zip(mixfiles, siglists, wavlens))\n #printlist(self.mixes)\n self.examples = []\n for i, mix in enumerate(self.mixes):\n self.examples.append({'mixfile': mix[0], 'sourcefiles': mix[1], 'start': 0, 'end': mix[2]})\n random.seed(0)\n self.examples = random.sample(self.examples, len(self.examples))\n def __len__(self):\n return len(self.examples)\n def __getitem__(self, idx):\n \"\"\"\n Returns:\n mixture: [T]\n sources: list of C, each [T]\n \"\"\"\n example = self.examples[idx]\n mixfile, sourcefiles, start, end = example['mixfile'], example['sourcefiles'], example['start'], example['end']\n mixture, sr = load(mixfile, sr=self.sr)\n assert sr == self.sr, 'need to resample'\n mixture = mixture[start:end]\n sources = [load(sourcefile, sr=sr)[0][start:end] for sourcefile in sourcefiles]\n return mixture, sources\n \nif __name__ == \"__main__\":\n root = \"/ws/ifp-10_3/hasegawa/junzhez2/Baseline_Model/dataset\"\n tr_json = [\"2spkr_json/tr/\",\n \"3spkr_json/tr/\",\n \"4spkr_json/tr/\",\n \"5spkr_json/tr/\"]\n val_json = [\"2spkr_json/cv/\",\n \"3spkr_json/cv/\",\n \"4spkr_json/cv/\",\n \"5spkr_json/cv/\"]\n test_json = [\"2spkr_json/tt\",\n \"3spkr_json/tt\",\n \"4spkr_json/tt\",\n \"5spkr_json/tt\"]\n dataset = MixtureDataset(root, tr_json)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=3, collate_fn=_collate_fn)\n print(len(dataset))\n for mixtures, ilens, sources_list in tqdm(dataloader):\n start = time()\n print(mixtures.shape, ilens.shape, [len(sources) for sources in sources_list])\n print(time() - start)\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"torch.Tensor",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cimat-ris/OF-PathPred | [
"85ca275707e5988491d0a510b9d31883824411db"
] | [
"path_prediction/utils/process_file_trajnetplusplus.py"
] | [
"import os, glob, sys, logging, math\nfrom tqdm import tqdm\nimport numpy as np\nfrom .interaction_optical_flow import OpticalFlowSimulator\nfrom .obstacles import load_world_obstacle_polygons\n# Since it is used as a submodule, the trajnetplusplustools directory should be there\nsys.path.append(\"../../trajnetplusplustools\")\nfrom trajnetplusplustools import Reader\n\ndef prepare_data_trajnetplusplus(datasets_path, datasets_names,parameters,keep_neighbors=True):\n \"\"\" Prepares the train/val scenes and corresponding goals\n Parameters\n ----------\n parameters: Experiment_Parameters\n Defines the prediction experiment parameters.\n path:\n Path to the dataset (set of json files)\n\n Returns\n -------\n data : Dictionary\n Contains the different processed data as numpy nd arrays\n \"\"\"\n all_ped_traj_abs = []\n all_neigbors_traj_abs = []\n all_flows = []\n all_visible_neighbors = []\n neighbors_n_max = 0\n primary_path = []\n # Optical flow\n of_sim = OpticalFlowSimulator()\n ## Iterate over file names\n for dataset_name in datasets_names:\n reader = Reader(datasets_path + dataset_name + '.ndjson', scene_type='paths')\n ## Necessary modification of train scene to add filename\n scene = [(dataset_name, s_id, s) for s_id, s in reader.scenes()]\n logging.info(\"File \"+dataset_name+\" with {} scenes.\".format(len(scene)))\n for scene_i, (filename, scene_id, paths) in enumerate(scene):\n # Get the trajectories\n raw_traj_abs = Reader.paths_to_xy(paths)\n ped_traj_abs = raw_traj_abs[:,0,:]\n if ped_traj_abs.shape[0]<1+parameters.obs_len+parameters.pred_len:\n continue\n # Keep the full trajectory of the pedestrian of interest (start at 0)\n all_ped_traj_abs.append(ped_traj_abs)\n # Save info path scene scene_id\n primary_path.append((scene_id, paths[0],reader.scenes_by_id[scene_id]))\n # Neighbors\n neigbors_traj_abs = raw_traj_abs[1:1+parameters.obs_len,1:,:]\n neigbors_traj_abs = np.concatenate([np.ones([neigbors_traj_abs.shape[0],neigbors_traj_abs.shape[1],1]),neigbors_traj_abs],axis=2)\n if keep_neighbors:\n neighbors_n = neigbors_traj_abs.shape[1]\n if neighbors_n>neighbors_n_max:\n neighbors_n_max = neighbors_n\n all_neigbors_traj_abs.append(neigbors_traj_abs)\n # Optical flow\n flow,vis_neigh,__ = of_sim.compute_opticalflow_seq(ped_traj_abs[1:1+parameters.obs_len,:],neigbors_traj_abs[0:parameters.obs_len,:,:], None)\n all_flows.append(flow)\n all_visible_neighbors.append(vis_neigh)\n\n all_ped_traj_abs = np.array(all_ped_traj_abs, dtype=\"float32\")\n all_flows = np.array(all_flows, dtype=\"float32\")\n all_visible_neighbors= np.array(all_visible_neighbors)\n\n # Data sanity check\n logging.debug(\"Checking data consistency\")\n logging.debug(\"Nan in all_ped_traj_abs {} \".format(np.isnan(all_ped_traj_abs).any()))\n logging.debug(\"Nan in all_flows {} \".format(np.isnan(all_flows).any()))\n logging.debug(\"Inf in all_flows {} \".format(np.isinf(all_flows).any()))\n logging.debug(\"Nan in all_visible_neighbors {} \".format(np.isnan(all_visible_neighbors).any()))\n logging.debug(\"Inf in all_visible_neighbors {} \".format(np.isinf(all_visible_neighbors).any()))\n\n if keep_neighbors:\n for i in range(len(all_neigbors_traj_abs)):\n # TODO: avoid using 3 dimensions?\n tmp=np.NaN*np.ones([all_neigbors_traj_abs[i].shape[0],neighbors_n_max,3])\n tmp[:,:all_neigbors_traj_abs[i].shape[1],:]=all_neigbors_traj_abs[i]\n all_neigbors_traj_abs[i]=tmp\n all_neigbors_traj_abs= np.array(all_neigbors_traj_abs)\n logging.info(\"Total trajectories: {}\".format(all_ped_traj_abs.shape[0]))\n\n\n # By broadcasting, center these data\n seq_pos_centered_all = all_ped_traj_abs - all_ped_traj_abs[:,parameters.obs_len:parameters.obs_len+1,0:2]\n # Displacements\n seq_rel_all = np.zeros_like(all_ped_traj_abs)\n seq_rel_all[:,1:,:] = all_ped_traj_abs[:,1:,:]-all_ped_traj_abs[:,:-1,:]\n # All directions\n seq_theta_all = np.zeros_like(all_ped_traj_abs[:,:,0:1])\n seq_theta_all[:,:,0] = np.arctan2(seq_rel_all[:,:,1],seq_rel_all[:,:,0])\n # Cosine and sine of the orientation angle at the last observed point\n costheta = np.cos(seq_theta_all[:,parameters.obs_len:parameters.obs_len+1,0:1])\n sintheta = np.sin(seq_theta_all[:,parameters.obs_len:parameters.obs_len+1,0:1])\n seq_pos_rot_all = np.zeros_like(all_ped_traj_abs)\n seq_pos_rot_all[:,:,0:1]= costheta*(seq_pos_centered_all[:,:,0:1])+sintheta*(seq_pos_centered_all[:,:,1:2])\n seq_pos_rot_all[:,:,1:2]=-sintheta*(seq_pos_centered_all[:,:,0:1])+costheta*(seq_pos_centered_all[:,:,1:2])\n # All the displacements are estimated here.\n seq_rel_rot_all = np.zeros_like(seq_pos_rot_all)\n seq_rel_rot_all[:,1:,:] = seq_pos_rot_all[:,1:,:]-seq_pos_rot_all[:,:-1,:]\n # Save all these data as a dictionary\n data = {\n \"obs_traj\": all_ped_traj_abs[:,1:1+parameters.obs_len,:],\n \"obs_traj_rel\": seq_rel_all[:,1:1+parameters.obs_len,:],\n \"obs_traj_theta\":seq_theta_all[:,1:1+parameters.obs_len,:],\n \"obs_optical_flow\": all_flows[:,1:1+parameters.obs_len,:],\n \"obs_visible_neighbors\": all_visible_neighbors[:,1:1+parameters.obs_len,:],\n \"pred_traj\": all_ped_traj_abs[:,1+parameters.obs_len:,:],\n \"pred_traj_rel\": seq_rel_all[:,1+parameters.obs_len:,:],\n \"index\": np.array(range(len(primary_path)))\n }\n if keep_neighbors:\n data[\"obs_neighbors\"] = all_neigbors_traj_abs[:,1:parameters.obs_len+1,:]\n return data, primary_path\n"
] | [
[
"numpy.isnan",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.ones",
"numpy.zeros_like",
"numpy.array",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jimmy-INL/OpenPNM | [
"1546fa1ac2204443bde916f2037fac383c5069ae",
"1546fa1ac2204443bde916f2037fac383c5069ae"
] | [
"openpnm/io/Pandas.py",
"scripts/example_PNP_2D.py"
] | [
"import numpy as np\nimport scipy as sp\nfrom flatdict import FlatDict\nfrom collections import namedtuple\nfrom openpnm.io import Dict, GenericIO\nfrom openpnm.utils import sanitize_dict, logging\nlogger = logging.getLogger(__name__)\n\n\nclass Pandas(GenericIO):\n r\"\"\"\n Combines all data arrays into a Pandas DataFrame object\n\n The structure of a DataFrame is a very close match to OpenPNMs data\n storage. Each key becomes a column header in the Dataframe, and each\n pore or throat entry becomes a row.\n\n Limitations of the DataFrame are the inability to have multidimensional\n data in a single column. The methods on a DataFrame are also oriented\n towards time-series data.\n\n Nonetheless, Pandas offers many useful features such as performing\n statistical analysis on property. DataFrames also offer *many* options for\n exporting to other file formats, so if a format is not yet supported\n by OpenPNM, this could be an solution.\n\n \"\"\"\n @classmethod\n def to_dataframe(cls, network=None, phases=[], join=False, delim=' | '):\n r\"\"\"\n Convert the Network (and optionally Phase) data to Pandas DataFrames.\n\n Parameters\n ----------\n network: OpenPNM Network Object\n The network containing the data to be stored\n\n phases : list of OpenPNM Phase Objects\n The data on each supplied phase will be added to DataFrame\n\n join : boolean\n If ``False`` (default), two DataFrames are returned with *pore*\n data in one, and *throat* data in the other. If ``True`` the pore\n and throat data are combined into a single DataFrame. This can be\n problematic as it will put NaNs into all the *pore* columns which\n are shorter than the *throat* columns.\n\n Returns\n -------\n Pandas ``DataFrame`` object containing property and label data in each\n column. If ``join`` was False (default) the two DataFrames are\n returned i a named tuple, or else a single DataFrame with pore and\n throat data in the same file, despite the column length being\n different.\n\n \"\"\"\n from pandas import DataFrame\n\n project, network, phases = cls._parse_args(network=network,\n phases=phases)\n\n # Initialize pore and throat data dictionary using Dict class\n pdata = Dict.to_dict(network=network, phases=phases, element='pore',\n interleave=True, flatten=True,\n categorize_by=['object'])\n tdata = Dict.to_dict(network=network, phases=phases, element='throat',\n interleave=True, flatten=True,\n categorize_by=['object'])\n pdata = FlatDict(pdata, delimiter=delim)\n tdata = FlatDict(tdata, delimiter=delim)\n\n # Scan data and convert non-1d arrays to multiple columns\n for key in list(pdata.keys()):\n if np.shape(pdata[key]) != (network[0].Np,):\n arr = pdata.pop(key)\n tmp = np.split(arr, arr.shape[1], axis=1)\n cols = range(len(tmp))\n pdata.update({key+'['+str(i)+']': tmp[i].squeeze()\n for i in cols})\n for key in list(tdata.keys()):\n if np.shape(tdata[key]) != (network[0].Nt,):\n arr = tdata.pop(key)\n tmp = np.split(arr, arr.shape[1], axis=1)\n cols = range(len(tmp))\n tdata.update({key+'['+str(i)+']': tmp[i].squeeze()\n for i in cols})\n\n # Convert sanitized dictionaries to DataFrames\n pdata = DataFrame(sanitize_dict(pdata))\n tdata = DataFrame(sanitize_dict(tdata))\n\n # Prepare DataFrames to be returned\n if join:\n data = tdata.join(other=pdata, how='left')\n else:\n nt = namedtuple('dataframes', ('pore', 'throat'))\n data = nt(pore=pdata, throat=tdata)\n\n return data\n\n @classmethod\n def from_dataframe(cls):\n r\"\"\"\n \"\"\"\n raise NotImplementedError()\n",
"import openpnm as op\nfrom openpnm.phases import mixtures\nimport numpy as np\nws = op.Workspace()\nproj = ws.new_project()\n# ws.settings['loglevel'] = 20\n\n\nscheme = 'powerlaw'\n\n# network, geometry, phase\nnp.random.seed(0)\n\nnet = op.network.Cubic(shape=[23, 15, 1], spacing=1e-6, project=proj)\nprs = (net['pore.back'] * net['pore.right'] + net['pore.back']\n * net['pore.left'] + net['pore.front'] * net['pore.right']\n + net['pore.front'] * net['pore.left'])\nprs = net.Ps[prs]\n\nb_prs_1 = np.append(net.pores('back'), net.pores('front'))\nb_prs_2 = np.append(net.pores('left'), net.pores('right'))\nb_prs = np.append(b_prs_1, b_prs_2)\nb_thrts = net.find_neighbor_throats(b_prs)\n\nthrts_1 = net['throat.surface']\nthrts_1 = net.Ts[thrts_1]\nnp.random.seed(0)\nthrts_i = net.Ts[~net['throat.surface']]\nthrts_sample = [i for i in thrts_i if i not in b_thrts]\nL = int(0.05*len(thrts_sample))\nthrts_2 = np.random.choice(thrts_sample, size=(L,), replace=False)\nthrts_2 = np.array([])\nthrts = np.append(thrts_1, thrts_2)\n\nop.topotools.trim(network=net, pores=prs, throats=thrts)\n\nop.topotools.reduce_coordination(net, 3)\n\ngeo = op.geometry.StickAndBall(network=net, pores=net.Ps, throats=net.Ts)\n\n\nsw = mixtures.SalineWater(network=net)\n# Retrieve handles to each species for use below\nNa = sw.components['Na_' + sw.name]\nCl = sw.components['Cl_' + sw.name]\nH2O = sw.components['H2O_' + sw.name]\n\n# physics\nphys = op.physics.GenericPhysics(network=net, phase=sw, geometry=geo)\n\nflow = op.models.physics.hydraulic_conductance.hagen_poiseuille_2D\nphys.add_model(propname='throat.hydraulic_conductance',\n pore_viscosity='pore.viscosity',\n throat_viscosity='throat.viscosity',\n model=flow, regen_mode='normal')\n\ncurrent = op.models.physics.ionic_conductance.electroneutrality_2D\nphys.add_model(propname='throat.ionic_conductance',\n model=current, regen_mode='normal', ions=[Na.name, Cl.name])\n\neA_dif = op.models.physics.diffusive_conductance.ordinary_diffusion_2D\nphys.add_model(propname='throat.diffusive_conductance.' + Na.name,\n pore_diffusivity='pore.diffusivity.' + Na.name,\n throat_diffusivity='throat.diffusivity.' + Na.name,\n model=eA_dif, regen_mode='normal')\n\neB_dif = op.models.physics.diffusive_conductance.ordinary_diffusion_2D\nphys.add_model(propname='throat.diffusive_conductance.' + Cl.name,\n pore_diffusivity='pore.diffusivity.' + Cl.name,\n throat_diffusivity='throat.diffusivity.' + Cl.name,\n model=eB_dif, regen_mode='normal')\n\n# algorithms\nsf = op.algorithms.StokesFlow(network=net, phase=sw)\nsf.set_value_BC(pores=net.pores('back'), values=2010)\nsf.set_value_BC(pores=net.pores('front'), values=10)\nsf.settings['rxn_tolerance'] = 1e-12\nsf.run()\nsw.update(sf.results())\n\np = op.algorithms.IonicConduction(network=net, phase=sw)\np.set_value_BC(pores=net.pores('left'), values=0.02)\np.set_value_BC(pores=net.pores('right'), values=0.01)\np.settings['rxn_tolerance'] = 1e-12\np.settings['charge_conservation'] = 'electroneutrality_2D'\n\neA = op.algorithms.NernstPlanck(network=net, phase=sw, ion=Na.name)\neA.set_value_BC(pores=net.pores('back'), values=20)\neA.set_value_BC(pores=net.pores('front'), values=10)\neA.settings['rxn_tolerance'] = 1e-12\n\neB = op.algorithms.NernstPlanck(network=net, phase=sw, ion=Cl.name)\neB.set_value_BC(pores=net.pores('back'), values=20)\neB.set_value_BC(pores=net.pores('front'), values=10)\neB.settings['rxn_tolerance'] = 1e-12\n\nad_dif_mig_Na = op.models.physics.ad_dif_mig_conductance.ad_dif_mig\nphys.add_model(propname='throat.ad_dif_mig_conductance.' + Na.name,\n model=ad_dif_mig_Na, ion=Na.name,\n s_scheme=scheme)\n\nad_dif_mig_Cl = op.models.physics.ad_dif_mig_conductance.ad_dif_mig\nphys.add_model(propname='throat.ad_dif_mig_conductance.' + Cl.name,\n pore_pressure=sf.settings['quantity'],\n model=ad_dif_mig_Cl, ion=Cl.name,\n s_scheme=scheme)\n\npnp = op.algorithms.NernstPlanckMultiphysics(network=net, phase=sw)\npnp.setup(potential_field=p.name, ions=[eA.name, eB.name])\npnp.settings['i_max_iter'] = 10\npnp.settings['i_tolerance'] = 1e-04\n\npnp.run()\n\nsw.update(sf.results())\nsw.update(p.results())\nsw.update(eA.results())\nsw.update(eB.results())\n\n# output data\n# proj.export_data(phases=[sw], filename='OUT', filetype='xdmf')\n"
] | [
[
"numpy.split",
"numpy.shape"
],
[
"numpy.append",
"numpy.array",
"numpy.random.seed",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krmurtha/fw-heudiconv | [
"cf41f7e6eb770317ab7c0aec051b4567ab634d01",
"cf41f7e6eb770317ab7c0aec051b4567ab634d01"
] | [
"fw_heudiconv/backend_funcs/convert.py",
"fw_heudiconv/cli/tabulate.py"
] | [
"import logging\nimport re\nimport pdb\nimport operator\nimport pprint\nimport mimetypes\nimport flywheel\nimport json\nimport pandas as pd\nfrom os import path\nfrom pathvalidate import is_valid_filename\nfrom pathlib import Path\nfrom fw_heudiconv.cli.export import get_nested\n\nlogger = logging.getLogger('fw-heudiconv-curator')\n\n\ndef build_intention_path(f):\n \"\"\"Builds a string of the path to the file w.r.t. subject dir\n \"\"\"\n fname = f.info[\"BIDS\"][\"Filename\"]\n folder = f.info[\"BIDS\"][\"Folder\"]\n ses = fname.split(\"_\")[1]\n return(\"/\".join([ses, folder, fname]))\n\n\ndef none_replace(str_input):\n return str_input\n\n\ndef force_template_format(str_input):\n\n # if we get a reproin heuristic, the str format is:\n #\n # {bids_subject_session_dir}/anat/{bids_subject_session_prefix}_scout\n #\n # here we replace the {} with the sub-sess format fw-heudiconv uses\n\n str_input = re.sub(\"{bids_subject_session_dir}\", \"sub-{subject}/ses-{session}\", str_input)\n str_input = re.sub(\"{bids_subject_session_prefix}\", \"sub-{subject}_ses-{session}\", str_input)\n\n # next, we remove extra sub-sub or ses-ses\n str_input = re.sub(\"(?<!ses-){session}\", \"ses-{session}\", str_input)\n str_input = re.sub(\"(?<!sub-){subject}\", \"sub-{subject}\", str_input)\n\n return(str_input)\n\n\ndef force_label_format(str_input):\n\n str_input = re.sub(\"ses-\", \"\", str_input)\n str_input = re.sub(\"sub-\", \"\", str_input)\n\n return(str_input)\n\n\ndef apply_heuristic(client, heur, acquisition_id, dry_run=False, intended_for=[],\n metadata_extras={}, subj_replace=None, ses_replace=None, item_num=1):\n \"\"\" Apply heuristic to rename files\n\n This function applies the specified heuristic to the files given in the\n list of acquisitions.\n\n Args:\n client (Client): The flywheel sdk client\n heur (tuple): 3-tuple, the \"key\" of a seq_info dictionary, where\n the first item of the tuple is the naming convention as a string\n acquisition_ids (list): The \"value\" of a seq_info dictionary, the list\n of acquisitions to which the naming convention applies\n \"\"\"\n suffixes = {'nifti': \".nii.gz\", 'bval': \".bval\", 'bvec': \".bvec\"}\n ftypes = ['nifti', 'bval', 'bvec', 'tsv']\n template, outtype, annotation_classes = heur\n template = force_template_format(template)\n\n subj_replace = none_replace if subj_replace is None else subj_replace\n ses_replace = none_replace if ses_replace is None else ses_replace\n\n acquisition_object = client.get(acquisition_id)\n subj_label = subj_replace(force_label_format(client.get(acquisition_object.parents.subject).label))\n sess_label = ses_replace(force_label_format(client.get(acquisition_object.parents.session).label))\n\n files = [f for f in acquisition_object.files if f.type in ftypes]\n bids_keys = ['sub', 'ses', 'folder', 'name']\n\n files.sort(key=operator.itemgetter(\"name\"))\n for fnum, f in enumerate(files):\n bids_vals = template.format(subject=subj_label, session=sess_label, item=fnum+1, seqitem=item_num).split(\"/\")\n bids_dict = dict(zip(bids_keys, bids_vals))\n suffix = suffixes[f.type]\n\n if 'BIDS' not in f.info:\n f.info['BIDS'] = \"\"\n new_bids = f.info['BIDS']\n if new_bids in (\"NA\", \"\"):\n new_bids = add_empty_bids_fields(bids_dict['folder'], bids_dict['name'])\n new_bids['Filename'] = bids_dict['name']+suffix\n new_bids['Folder'] = bids_dict['folder']\n new_bids['Path'] = \"/\".join([bids_dict['sub'],\n bids_dict['ses'],\n bids_dict['folder']])\n new_bids['error_message'] = \"\"\n new_bids['valid'] = True\n\n infer_params_from_filename(new_bids)\n\n destination = \"\\n\" + f.name + \"\\n\\t\" + new_bids['Filename'] + \" -> \" \\\n + new_bids[\"Path\"] + \"/\" + new_bids['Filename']\n logger.debug(destination)\n\n if not dry_run:\n acquisition_object.update_file_info(f.name, {'BIDS': new_bids})\n acquisition_object = client.get(acquisition_id) # Refresh the acquisition object\n\n if intended_for and (f.name.endswith(\".nii.gz\") or f.name.endswith(\".nii\")):\n\n intendeds = [force_template_format(intend)\n for intend in intended_for]\n intendeds = [intend.format(subject=subj_label, session=sess_label)\n for intend in intendeds]\n\n logger.debug(\"%s IntendedFor: %s\", pprint.pformat(new_bids['Filename']),\n pprint.pformat(intendeds))\n if not dry_run:\n acquisition_object.update_file_info(f.name, {'IntendedFor': intendeds})\n acquisition_object = client.get(acquisition_id)\n # Check that it was applied\n file_info = acquisition_object.get_file(f.name)\n assert file_info['info']['IntendedFor'] == intendeds\n logger.debug(\"Applied!\")\n\n if metadata_extras:\n logger.debug(\"%s metadata: %s\", f.name, metadata_extras)\n if not dry_run:\n acquisition_object.update_file_info(f.name, metadata_extras)\n\n\ndef add_empty_bids_fields(folder, fname=None):\n\n if \"fmap\" in folder:\n if not fname:\n logger.debug(\"No filename given, can't set intentions for this fieldmap!\")\n IntendedFor = \"\"\n Modality = \"\"\n else:\n IntendedFor = \"[{'Folder': 'func'}]\"\n Modality = \"fieldmap\"\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"fmap\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"fieldmap_file\",\n \"valid\": False}\n\n elif \"dwi\" in folder:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"dwi\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"diffusion_file\",\n \"valid\": False}\n\n elif \"func\" in folder:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"\",\n \"valid\": False}\n\n elif \"anat\" in folder:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"anat\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"T1w\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"anat_file\",\n \"valid\": False}\n\n else:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": folder,\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"\",\n \"valid\": False}\n\n return(new_bids)\n\n\ndef infer_params_from_filename(bdict):\n\n fname = bdict['Filename']\n\n params = ['Acq', 'Ce', 'Dir', 'Echo', 'Mod', 'Rec', 'Run', 'Task']\n to_fill = {}\n for x in params:\n search = re.search(r'(?<={}-)[A-Za-z0-9]+(?=_)'.format(x.lower()), fname)\n to_fill[x] = search.group() if search is not None else \"\"\n\n bdict.update(to_fill)\n\n\ndef confirm_intentions(client, session, dry_run=False):\n \"\"\"Ensure that files in \"IntededFor\" will ultimately exist in the BIDS directory.\n \"\"\"\n try:\n acqs = [client.get(s.id) for s in session.acquisitions()]\n acq_files = [f for a in acqs for f in a.files if '.nii' in f.name]\n bids_filenames = [get_nested(f, 'info', 'BIDS', 'Filename') for f in acq_files]\n bids_paths = [get_nested(f, 'info', 'BIDS', 'Path') for f in acq_files]\n full_filenames = []\n for folder, filename in zip(bids_paths, bids_filenames):\n if None in (folder, filename) or '' in (filename, folder):\n continue\n full_filenames.append(folder + \"/\" + filename)\n\n bids_files = [re.sub(\"sub-[a-zA-z0-9]+/\", \"\", x) for x in full_filenames]\n\n # Go through all the acquisitions in the session\n for acq in acqs:\n for acq_file in acq.files:\n if not acq_file.type == 'nifti':\n continue\n intendeds = get_nested(acq_file.to_dict(), 'info', 'IntendedFor')\n if not intendeds:\n continue\n # If there are \"IntendedFor\" values, check that they will exist\n logger.debug(\n \"Ensuring all intentions apply for acquisition %s: %s\",\n acq.label, acq_file.name)\n\n ok_intentions = []\n bad_intentions = []\n for intendedfor in intendeds:\n if intendedfor in bids_files:\n ok_intentions.append(intendedfor)\n else:\n bad_intentions.append(intendedfor)\n\n if bad_intentions:\n logger.warning(\n \"IntendedFor values do not point to a BIDS file: %s\",\n bad_intentions)\n # pdb.set_trace()\n if not dry_run:\n acq.update_file_info(acq_file.name,\n {'IntendedFor': ok_intentions})\n\n except Exception as e:\n logger.warning(\"Trouble updating intentions for this session %s\", session.label)\n logger.warning(e)\n\n\ndef confirm_bids_namespace(project_obj, dry_run):\n\n bids_info = get_nested(project_obj, 'info', 'BIDS')\n if bids_info in (None, ''):\n\n logger.debug(\"{} has no BIDS namespace!\".format(project_obj.label))\n\n if not dry_run:\n\n logger.debug(\"Adding default BIDS namespace...\")\n\n bids = {\n 'BIDS': {'Acknowledgements': '',\n 'Authors': [],\n 'BIDSVersion': '1.0.2',\n 'DatasetDOI': '',\n 'Funding': [],\n 'HowToAcknowledge': '',\n 'License': '',\n 'Name': project_obj.label,\n 'ReferencesAndLinks': [],\n 'template': 'project'}\n }\n\n project_obj.update_info(bids)\n project_obj = project_obj.reload()\n\n return project_obj\n\n\ndef verify_attachment(name, data, dtype='text/tab-separated-values'):\n\n types = mimetypes.types_map\n\n # check for extension\n # if found, check its dtype matches\n ext = path.splitext(name)[1]\n valid_fname = is_valid_filename(name)\n\n if ext:\n\n output_dtype = types.get(ext, None)\n if dtype == output_dtype:\n valid_dtype = True\n else:\n valid_dtype = False\n else:\n # no extension, just check dtype\n valid_dtype = dtype in list(mimetypes.types_map.values())\n\n valid_data = isinstance(data, str)\n\n return valid_fname, valid_data, valid_dtype\n\n\ndef upload_attachment(\n client, target_object, level, attachment_dict,\n subject_rename=None, session_rename=None,\n folders=['anat', 'dwi', 'func', 'fmap', 'perf'],\n dry_run=True\n ):\n '''processes and uploads the attachment\n '''\n\n bids = {\n \"Filename\": None,\n \"Folder\": None,\n \"Path\": None\n }\n\n if level == 'project':\n bids.update({\n \"Filename\": attachment_dict['name'],\n \"Path\": '.'\n })\n else:\n\n # manipulate sub and ses labels\n subj_replace = none_replace if subject_rename is None else subject_rename\n subj_label = subj_replace(force_label_format(target_object.subject.label))\n\n ses_replace = none_replace if session_rename is None else session_rename\n sess_label = ses_replace(force_label_format(target_object.label))\n\n attachment_dict['name'] = force_template_format(attachment_dict['name'])\n attachment_dict['name'] = attachment_dict['name'].format(subject=subj_label, session=sess_label)\n\n # get the dir/folder/path\n dirs = Path(attachment_dict['name']).parts\n folder = [x for x in dirs if x in folders]\n if not folder:\n folder = None\n else:\n folder = folder[0]\n\n path = str(Path(attachment_dict['name']).parent)\n\n # get filename\n attachment_dict['name'] = str(Path(attachment_dict['name']).name)\n\n # get BIDS ready\n bids.update({\n \"Filename\": str(Path(attachment_dict['name']).name),\n \"Folder\": folder,\n \"Path\": path\n })\n logger.debug(\n \"Attachment details:\\n\\tFilename: {}\\n\\tData: {}\\n\\tMIMEType: {}\".format(\n attachment_dict['name'], attachment_dict['data'], attachment_dict['type']\n )\n )\n logger.debug(\n \"Updating BIDS: \\n\\t{}\".format(bids)\n )\n\n verify_name, verify_data, verify_type = verify_attachment(\n attachment_dict['name'], attachment_dict['data'], attachment_dict['type']\n )\n\n if not all([verify_name, verify_data, verify_type]):\n\n logger.warning(\"Attachments may not be valid for upload!\")\n logger.debug(\n \"\\tFilename valid: {}\\n\\tData valid: {}\\n\\tMIMEType valid: {}\".format(\n verify_name, verify_data, verify_type\n )\n )\n\n if not dry_run:\n file_spec = flywheel.FileSpec(\n attachment_dict['name'], attachment_dict['data'], attachment_dict['type']\n )\n target_object.upload_file(file_spec)\n target_object = target_object.reload()\n target_object.update_file_info(attachment_dict['name'], {'BIDS': bids})\n logger.info(\"Attachment uploaded!\")\n\ndef parse_validator(path):\n\n with open(path, 'r') as read_file:\n data = json.load(read_file)\n\n issues = data['issues']\n\n def parse_issue(issue_dict):\n\n return_dict = {}\n return_dict['files'] = [get_nested(x, 'file', 'relativePath') for x in issue_dict.get('files', '')]\n return_dict['type'] = issue_dict.get('key' '')\n return_dict['severity'] = issue_dict.get('severity', '')\n return_dict['description'] = issue_dict.get('reason', '')\n return_dict['code'] = issue_dict.get('code', '')\n return_dict['url'] = issue_dict.get('helpUrl', '')\n\n return(return_dict)\n\n df = pd.DataFrame()\n\n for warn in issues['warnings']:\n\n parsed = parse_issue(warn)\n parsed = pd.DataFrame(parsed)\n df = df.append(parsed, ignore_index=True)\n\n for err in issues['errors']:\n\n parsed = parse_issue(err)\n parsed = pd.DataFrame(parsed)\n df = df.append(parsed, ignore_index=True)\n\n return df\n",
"import argparse\nimport warnings\nimport logging\nimport flywheel\nimport pandas as pd\nfrom fw_heudiconv.backend_funcs.query import get_seq_info\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('fw-heudiconv-tabulator')\n\n\ndef tabulate_bids(client, project_label, path=\".\", subject_labels=None,\n session_labels=None, dry_run=False, unique=True):\n \"\"\"Writes out a tabular form of the Seq Info objects\n\n Args:\n client (Client): The flywheel sdk client\n project_label (str): The label of the project\n heuristic_path (str): The path to the heuristic file or the name of a\n known heuristic\n subject_code (str): The subject code\n session_label (str): The session label\n dry_run (bool): Print the changes, don't apply them on flywheel\n \"\"\"\n\n logger.info(\"Querying Flywheel server...\")\n project_obj = client.projects.find_first('label=\"{}\"'.format(project_label))\n assert project_obj, \"Project not found! Maybe check spelling...?\"\n\n logger.debug('Found project: %s (%s)', project_obj['label'], project_obj.id)\n sessions = client.get_project_sessions(project_obj.id)\n assert sessions, \"No sessions found!\"\n\n # filters\n if subject_labels:\n sessions = [s for s in sessions if s.subject['label'] in subject_labels]\n if session_labels:\n sessions = [s for s in sessions if s.label in session_labels]\n logger.debug('Found sessions:\\n\\t%s',\n \"\\n\\t\".join(['%s (%s)' % (ses['label'], ses.id) for ses in sessions]))\n\n # Find SeqInfos to apply the heuristic to\n seq_infos = get_seq_info(client, project_label, sessions)\n seq_info_dicts = [seq._asdict() for seq in seq_infos]\n df = pd.DataFrame.from_dict(seq_info_dicts)\n\n if unique:\n df = df.drop_duplicates(subset=['TR', 'TE', 'protocol_name', 'is_motion_corrected', 'is_derived', 'series_description'])\n df = df.drop(['total_files_till_now', 'dcm_dir_name'], 1)\n\n return df\n\n\ndef output_result(df, path, project_label, dry_run):\n\n if dry_run:\n print(df)\n else:\n df.to_csv(\"{}/{}_SeqInfo.tsv\".format(path, project_label),\n sep=\"\\t\", index=False)\n\n\ndef get_parser():\n\n parser = argparse.ArgumentParser(\n description=\"Tabulate DICOM header info from a project on Flywheel\")\n parser.add_argument(\n \"--project\",\n help=\"The project in flywheel\",\n required=True\n )\n parser.add_argument(\n \"--path\",\n help=\"Path to download .tsv file\",\n default=\".\",\n required=False\n )\n parser.add_argument(\n \"--subject\",\n help=\"The subject label(s)\",\n nargs=\"+\",\n default=None\n )\n parser.add_argument(\n \"--session\",\n help=\"The session label(s)\",\n nargs=\"+\",\n default=None\n )\n parser.add_argument(\n \"--verbose\",\n help=\"Print ongoing messages of progress\",\n action='store_true',\n default=False\n )\n parser.add_argument(\n \"--dry-run\",\n help=\"Don't apply changes\",\n action='store_true',\n default=False\n )\n unique = parser.add_mutually_exclusive_group()\n\n unique.add_argument(\n '--unique',\n dest='unique',\n action='store_true'\n )\n unique.add_argument(\n '--no-unique',\n dest='unique',\n action='store_false'\n )\n parser.add_argument(\n \"--api-key\",\n help=\"API Key\",\n action='store',\n default=None\n )\n\n return parser\n\n\ndef main():\n\n logger.info(\"{:=^70}\\n\".format(\": fw-heudiconv tabulator starting up :\"))\n\n parser = get_parser()\n args = parser.parse_args()\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n if args.api_key:\n fw = flywheel.Client(args.api_key)\n else:\n fw = flywheel.Client()\n assert fw, \"Your Flywheel CLI credentials aren't set!\"\n\n # Print a lot if requested\n if args.verbose or args.dry_run:\n logger.setLevel(logging.DEBUG)\n\n result = tabulate_bids(client=fw,\n project_label=args.project,\n path=args.path,\n session_labels=args.session,\n subject_labels=args.subject,\n dry_run=args.dry_run,\n unique=args.unique)\n\n output_result(result, path=args.path, project_label=args.project, dry_run=args.dry_run)\n\n logger.info(\"Done!\")\n logger.info(\"{:=^70}\".format(\": Exiting fw-heudiconv tabulator :\"))\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.DataFrame"
],
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
StadlerMaximilian/Detectron.pytorch | [
"b7a7c053b15da21418f53d9e97f4652d0d139523"
] | [
"tools/train_net.py"
] | [
"\"\"\" Training Script \"\"\"\n\nimport argparse\nimport distutils.util\nimport os\nimport sys\nimport pickle\nimport resource\nimport traceback\nimport logging\nfrom collections import defaultdict\n\nimport numpy as np\nimport yaml\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport cv2\ncv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader\n\nimport _init_paths # pylint: disable=unused-import\nimport nn as mynn\nimport utils.net as net_utils\nimport utils.misc as misc_utils\nfrom core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\nfrom datasets.roidb import combined_roidb_for_training\nfrom modeling.model_builder import Generalized_RCNN\nfrom roi_data.loader import RoiDataLoader, MinibatchSampler, collate_minibatch\nfrom utils.detectron_weight_helper import load_caffe2_detectron_weights\nfrom utils.logging import log_stats\nfrom utils.timer import Timer\nfrom utils.training_stats import TrainingStats\n\n# OpenCL may be enabled by default in OpenCV3; disable it because it's not\n# thread safe and causes unwanted GPU memory allocations.\ncv2.ocl.setUseOpenCL(False)\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973\nrlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\nresource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))\n\n\ndef parse_args():\n \"\"\"Parse input arguments\"\"\"\n parser = argparse.ArgumentParser(description='Train a X-RCNN network')\n\n parser.add_argument(\n '--dataset', dest='dataset', required=True,\n help='Dataset to use')\n parser.add_argument(\n '--cfg', dest='cfg_file', required=True,\n help='Config file for training (and optionally testing)')\n parser.add_argument(\n '--set', dest='set_cfgs',\n help='Set config keys. Key value sequence seperate by whitespace.'\n 'e.g. [key] [value] [key] [value]',\n default=[], nargs='+')\n\n parser.add_argument(\n '--disp_interval',\n help='Display training info every N iterations',\n default=100, type=int)\n parser.add_argument(\n '--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')\n\n # Optimization\n # These options has the highest prioity and can overwrite the values in config file\n # or values set by set_cfgs. `None` means do not overwrite.\n parser.add_argument(\n '--bs', dest='batch_size',\n help='Explicitly specify to overwrite the value comed from cfg_file.',\n type=int)\n parser.add_argument(\n '--nw', dest='num_workers',\n help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',\n type=int)\n\n parser.add_argument(\n '--o', dest='optimizer', help='Training optimizer.',\n default=None)\n parser.add_argument(\n '--lr', help='Base learning rate.',\n default=None, type=float)\n parser.add_argument(\n '--lr_decay_gamma',\n help='Learning rate decay rate.',\n default=None, type=float)\n parser.add_argument(\n '--lr_decay_epochs',\n help='Epochs to decay the learning rate on. '\n 'Decay happens on the beginning of a epoch. '\n 'Epoch is 0-indexed.',\n default=[4, 5], nargs='+', type=int)\n\n # Epoch\n parser.add_argument(\n '--start_iter',\n help='Starting iteration for first training epoch. 0-indexed.',\n default=0, type=int)\n parser.add_argument(\n '--start_epoch',\n help='Starting epoch count. Epoch is 0-indexed.',\n default=0, type=int)\n parser.add_argument(\n '--epochs', dest='num_epochs',\n help='Number of epochs to train',\n default=6, type=int)\n\n # Resume training: requires same iterations per epoch\n parser.add_argument(\n '--resume',\n help='resume to training on a checkpoint',\n action='store_true')\n\n parser.add_argument(\n '--no_save', help='do not save anything', action='store_true')\n\n parser.add_argument(\n '--ckpt_num_per_epoch',\n help='number of checkpoints to save in each epoch. '\n 'Not include the one at the end of an epoch.',\n default=3, type=int)\n\n parser.add_argument(\n '--load_ckpt', help='checkpoint path to load')\n parser.add_argument(\n '--load_detectron', help='path to the detectron weight pickle file')\n\n parser.add_argument(\n '--use_tfboard', help='Use tensorflow tensorboard to log training info',\n action='store_true')\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n args = parse_args()\n print('Called with args:')\n print(args)\n\n if not torch.cuda.is_available():\n sys.exit(\"Need a CUDA device to run the code.\")\n\n if args.cuda or cfg.NUM_GPUS > 0:\n cfg.CUDA = True\n else:\n raise ValueError(\"Need Cuda device to run !\")\n\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n ### Adaptively adjust some configs ###\n original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH\n if args.batch_size is None:\n args.batch_size = original_batch_size\n cfg.NUM_GPUS = torch.cuda.device_count()\n assert (args.batch_size % cfg.NUM_GPUS) == 0, \\\n 'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)\n cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS\n print('Batch size change from {} (in config file) to {}'.format(\n original_batch_size, args.batch_size))\n print('NUM_GPUs: %d, TRAIN.IMS_PER_BATCH: %d' % (cfg.NUM_GPUS, cfg.TRAIN.IMS_PER_BATCH))\n\n if args.num_workers is not None:\n cfg.DATA_LOADER.NUM_THREADS = args.num_workers\n print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)\n\n ### Adjust learning based on batch size change linearly\n old_base_lr = cfg.SOLVER.BASE_LR\n cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size\n print('Adjust BASE_LR linearly according to batch size change: {} --> {}'.format(\n old_base_lr, cfg.SOLVER.BASE_LR))\n\n ### Overwrite some solver settings from command line arguments\n if args.optimizer is not None:\n cfg.SOLVER.TYPE = args.optimizer\n if args.lr is not None:\n cfg.SOLVER.BASE_LR = args.lr\n if args.lr_decay_gamma is not None:\n cfg.SOLVER.GAMMA = args.lr_decay_gamma\n\n timers = defaultdict(Timer)\n\n ### Dataset ###\n timers['roidb'].tic()\n roidb, ratio_list, ratio_index = combined_roidb_for_training(\n cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)\n timers['roidb'].toc()\n train_size = len(roidb)\n logger.info('{:d} roidb entries'.format(train_size))\n logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)\n\n sampler = MinibatchSampler(ratio_list, ratio_index)\n dataset = RoiDataLoader(\n roidb,\n cfg.MODEL.NUM_CLASSES,\n training=True)\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n sampler=sampler,\n num_workers=cfg.DATA_LOADER.NUM_THREADS,\n collate_fn=collate_minibatch)\n\n assert_and_infer_cfg()\n\n ### Model ###\n maskRCNN = Generalized_RCNN()\n\n if cfg.CUDA:\n maskRCNN.cuda()\n\n ### Optimizer ###\n bias_params = []\n nonbias_params = []\n for key, value in dict(maskRCNN.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n bias_params.append(value)\n else:\n nonbias_params.append(value)\n params = [\n {'params': nonbias_params,\n 'lr': cfg.SOLVER.BASE_LR,\n 'weight_decay': cfg.SOLVER.WEIGHT_DECAY},\n {'params': bias_params,\n 'lr': cfg.SOLVER.BASE_LR * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),\n 'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0}\n ]\n\n if cfg.SOLVER.TYPE == \"SGD\":\n optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)\n elif cfg.SOLVER.TYPE == \"Adam\":\n optimizer = torch.optim.Adam(params)\n\n ### Load checkpoint\n if args.load_ckpt:\n load_name = args.load_ckpt\n logging.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(maskRCNN, checkpoint['model'])\n if args.resume:\n assert checkpoint['iters_per_epoch'] == train_size // args.batch_size, \\\n \"iters_per_epoch should match for resume\"\n # There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.\n # However it's fixed on master.\n # optimizer.load_state_dict(checkpoint['optimizer'])\n misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])\n if checkpoint['step'] == (checkpoint['iters_per_epoch'] - 1):\n # Resume from end of an epoch\n args.start_epoch = checkpoint['epoch'] + 1\n args.start_iter = 0\n else:\n # Resume from the middle of an epoch.\n # NOTE: dataloader is not synced with previous state\n args.start_epoch = checkpoint['epoch']\n args.start_iter = checkpoint['step'] + 1\n del checkpoint\n torch.cuda.empty_cache()\n\n if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)\n logging.info(\"loading Detectron weights %s\", args.load_detectron)\n load_caffe2_detectron_weights(maskRCNN, args.load_detectron)\n\n lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.\n\n maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],\n minibatch=True)\n\n ### Training Setups ###\n args.run_name = misc_utils.get_run_name()\n output_dir = misc_utils.get_output_dir(args, args.run_name)\n args.cfg_filename = os.path.basename(args.cfg_file)\n\n if not args.no_save:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n blob = {'cfg': yaml.dump(cfg), 'args': args}\n with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:\n pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)\n\n if args.use_tfboard:\n from tensorboardX import SummaryWriter\n # Set the Tensorboard logger\n tblogger = SummaryWriter(output_dir)\n\n ### Training Loop ###\n maskRCNN.train()\n\n training_stats = TrainingStats(\n args,\n args.disp_interval,\n tblogger if args.use_tfboard and not args.no_save else None)\n\n iters_per_epoch = int(train_size / args.batch_size) # drop last\n args.iters_per_epoch = iters_per_epoch\n ckpt_interval_per_epoch = iters_per_epoch // args.ckpt_num_per_epoch\n try:\n logger.info('Training starts !')\n args.step = args.start_iter\n global_step = iters_per_epoch * args.start_epoch + args.step\n for args.epoch in range(args.start_epoch, args.start_epoch + args.num_epochs):\n # ---- Start of epoch ----\n\n # adjust learning rate\n if args.lr_decay_epochs and args.epoch == args.lr_decay_epochs[0] and args.start_iter == 0:\n args.lr_decay_epochs.pop(0)\n net_utils.decay_learning_rate(optimizer, lr, cfg.SOLVER.GAMMA)\n lr *= cfg.SOLVER.GAMMA\n\n for args.step, input_data in zip(range(args.start_iter, iters_per_epoch), dataloader):\n\n for key in input_data:\n if key != 'roidb': # roidb is a list of ndarrays with inconsistent length\n input_data[key] = list(map(Variable, input_data[key]))\n\n training_stats.IterTic()\n net_outputs = maskRCNN(**input_data)\n training_stats.UpdateIterStats(net_outputs)\n loss = net_outputs['total_loss']\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n training_stats.IterToc()\n\n if (args.step+1) % ckpt_interval_per_epoch == 0:\n net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)\n\n if args.step % args.disp_interval == 0:\n log_training_stats(training_stats, global_step, lr)\n\n global_step += 1\n\n # ---- End of epoch ----\n # save checkpoint\n net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)\n # reset starting iter number after first epoch\n args.start_iter = 0\n\n # ---- Training ends ----\n if iters_per_epoch % args.disp_interval != 0:\n # log last stats at the end\n log_training_stats(training_stats, global_step, lr)\n\n except (RuntimeError, KeyboardInterrupt):\n logger.info('Save ckpt on exception ...')\n net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)\n logger.info('Save ckpt done.')\n stack_trace = traceback.format_exc()\n print(stack_trace)\n\n finally:\n if args.use_tfboard and not args.no_save:\n tblogger.close()\n\n\ndef log_training_stats(training_stats, global_step, lr):\n stats = training_stats.GetStats(global_step, lr)\n log_stats(stats, training_stats.misc_args)\n if training_stats.tblogger:\n training_stats.tb_log_stats(stats, global_step)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.optim.Adam",
"torch.load",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.optim.SGD",
"torch.cuda.device_count"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
inspurer/ImageProcess | [
"f826c36f3ae17bee5694c3f1748f9e5319a46fd9",
"f826c36f3ae17bee5694c3f1748f9e5319a46fd9"
] | [
"codes/3_1.py",
"codes/4_1.py"
] | [
"# -*- coding: utf-8 -*-\n# pc_type lenovo\n# create_time: 2019/11/9 15:15\n# file_name: 3_1.py\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\n# 设置中文字体和负号正常显示\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\nsns.set_context(\"paper\") # 背景\nsns.set_style('whitegrid') # 主题\nsns.set(font='SimHei') # 解决Seaborn中文显示问题,这一句必须放在前两后面\n\ndef sp_noise(image,prob):\n '''\n 添加椒盐噪声\n prob:噪声比例\n '''\n output = np.zeros(image.shape,np.uint8)\n thres = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n output[i][j] = 0\n elif rdn > thres:\n output[i][j] = 255\n else:\n output[i][j] = image[i][j]\n return output\n\ndef gauss_noise(image, mean=0, var=0.001):\n '''\n 添加高斯噪声\n mean : 均值 mean = 0 是高斯白噪声\n var : 方差 方差越大,图像越模糊\n '''\n image = np.array(image / 255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.\n else:\n low_clip = 0.\n # 把 out 的元素限制在 low_clip 和 1 之间\n out = np.clip(out, low_clip, 1.0)\n out = out*255\n #cv.imshow(\"gasuss\", out)\n return out\nfrom PIL import Image\n\n# 解决 opencv 不能读取 gif\ngif = cv2.VideoCapture('img/test3.gif')\nret,frame = gif.read()\nimg = Image.fromarray(frame)\n# L : 灰度图 , RGB : RGB 彩色图\nimg = img.convert('L')\nimg = np.array(img)\n\nsp_img = sp_noise(img,0.015)\n\ngs_img = gauss_noise(img,var=0.02)\n\n# 邻域平均法\ndef fspeical_average(image,kernel):\n a = len(kernel)\n kernel = kernel/(a**2)\n step = a//2\n h,w = image.shape[0],image.shape[1]\n nh,nw = h+2*step,w+2*step\n lbimg = np.zeros((nh,nw), np.float32)\n tmpimg = np.zeros((nh,nw))\n newimg = np.array(image)\n tmpimg[step:nh - step, step:nw - step] = newimg[0:h, 0:w]\n for y in range(step, nh - step):\n for x in range(step, nw - step):\n lbimg[y, x] = np.sum(kernel * tmpimg[y - step:y + step + 1, x - step:x + step + 1])\n resultimg = np.array(lbimg[step:nh - step, step:nw - step], np.uint8)\n return resultimg\n# 中值滤波法\ndef fspeical_medium(image,a):\n step = a // 2\n h, w = image.shape[0], image.shape[1]\n nh, nw = h + 2 * step, w + 2 * step\n lbimg = np.zeros((nh, nw), np.float32)\n tmpimg = np.zeros((nh, nw))\n newimg = np.array(image)\n tmpimg[step:nh - step, step:nw - step] = newimg[0:h, 0:w]\n for y in range(step, nh - step):\n for x in range(step, nw - step):\n lbimg[y, x] = np.median(tmpimg[y - step:y + step + 1, x - step:x + step + 1])\n resultimg = np.array(lbimg[step:nh - step, step:nw - step], np.uint8)\n return resultimg\n\nplt.figure()\nplt.subplot(2,4,1)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,5)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,2)\nplt.imshow(sp_img,cmap='gray')\nplt.title(\"加椒盐噪声\")\nplt.subplot(2,4,3)\nplt.imshow(fspeical_average(sp_img,kernel=np.array([[1,1,1],[1,1,1],[1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去椒盐噪声(3x3)\")\nplt.subplot(2,4,4)\nplt.imshow(fspeical_average(sp_img,kernel=np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去椒盐噪声(5x5)\")\nplt.subplot(2,4,6)\nplt.imshow(gs_img,cmap='gray')\nplt.title(\"加高斯噪声\")\nplt.subplot(2,4,7)\nplt.imshow(fspeical_average(gs_img,kernel=np.array([[1,1,1],[1,1,1],[1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去高斯噪声(3x3)\")\nplt.subplot(2,4,8)\nplt.imshow(fspeical_average(gs_img,kernel=np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去高斯噪声(5x5)\")\n\n\nplt.figure()\nplt.subplot(2,4,1)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,5)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,2)\nplt.imshow(sp_img,cmap='gray')\nplt.title(\"加椒盐噪声\")\nplt.subplot(2,4,3)\nplt.imshow(cv2.medianBlur(sp_img,3),cmap='gray')\nplt.title(\"中值滤波法去椒盐噪声(3x3)\")\nplt.subplot(2,4,4)\nplt.imshow(cv2.medianBlur(sp_img,5),cmap='gray')\nplt.title(\"中值滤波法去椒盐噪声(5x5)\")\nplt.subplot(2,4,6)\nplt.imshow(gs_img,cmap='gray')\nplt.title(\"加高斯噪声\")\n\nplt.subplot(2,4,7)\nplt.imshow(fspeical_medium(gs_img,3),cmap='gray')\nplt.title(\"中值滤波法去高斯噪声(3x3)\")\nplt.subplot(2,4,8)\nplt.imshow(fspeical_medium(gs_img,5),cmap='gray')\nplt.title(\"中值滤波法去高斯噪声(5x5)\")\n\n# for h in range(gs_img.shape[0]):\n# for w in range(gs_img.shape[1]):\n# if gs_img[h][w]<0:\n# gs_img[h][w] = -gs_img[h][w]\n\n# medianBlur 仅接收无符号整数类型元素\n# gs_img = np.uint8(gs_img)\n# print(gs_img)\n# plt.subplot(2,4,7)\n# print(sp_img,gs_img)\n# plt.imshow(cv2.medianBlur(gs_img,3),cmap='gray')\n# plt.title(\"中值滤波法去高斯噪声(3x3)\")\n# plt.subplot(2,4,8)\n# plt.imshow(cv2.medianBlur(gs_img,5),cmap='gray')\n# plt.title(\"中值滤波法去高斯噪声(5x5)\")\n\n\nplt.show()\n\n",
"# -*- coding: utf-8 -*-\n# pc_type lenovo\n# create_time: 2019/11/10 9:13\n# file_name: 4_1.py\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# 设置中文字体和负号正常显示\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\nsns.set_context(\"paper\") # 背景\nsns.set_style('whitegrid') # 主题\nsns.set(font='SimHei') # 解决Seaborn中文显示问题,这一句必须放在前两后面\n\n\nimg1 = cv2.imread(\"img/test7.tif\",flags=0)\nimg2 = cv2.imread(\"img/test8.tif\",flags=0)\n\ndef my_grad(img):\n height,width = img.shape[0],img.shape[1]\n Grad = np.zeros(img.shape,dtype=np.int8)\n # 不对右、下边界作处理、影响不大\n for h in range(height-1):\n for w in range(width-1):\n gx = int(img[h,w+1])- int(img[h,w])\n gy = int(img[h+1,w]) -int(img[h,w])\n Grad[h][w] = abs(gx) + abs(gy)\n return Grad\n\ndef my_roberts(img):\n # Roberts算子\n kernelx = np.array([[-1, 0], [0, 1]], dtype=int)\n kernely = np.array([[0, -1], [1, 0]], dtype=int)\n x = cv2.filter2D(img, cv2.CV_16S, kernelx)\n y = cv2.filter2D(img, cv2.CV_16S, kernely)\n # 转uint8\n absX = cv2.convertScaleAbs(x)\n absY = cv2.convertScaleAbs(y)\n Roberts = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\n return Roberts\n\ndef my_prewitt(img):\n # Prewitt算子\n kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]], dtype=int)\n kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], dtype=int)\n x = cv2.filter2D(img, cv2.CV_16S, kernelx)\n y = cv2.filter2D(img, cv2.CV_16S, kernely)\n # 转uint8\n absX = cv2.convertScaleAbs(x)\n absY = cv2.convertScaleAbs(y)\n Prewitt = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\n return Prewitt\n\ndef my_sobel(img):\n # Sobel算子\n x = cv2.Sobel(img, cv2.CV_16S, 1, 0) # 对x求一阶导\n y = cv2.Sobel(img, cv2.CV_16S, 0, 1) # 对y求一阶导\n absX = cv2.convertScaleAbs(x)\n absY = cv2.convertScaleAbs(y)\n Sobel = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\n return Sobel\n\nplt.figure()\nplt.subplot(2,5,1)\nplt.imshow(img1,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,5,2)\nmg1 = my_grad(img1)\nplt.imshow(mg1,cmap='gray')\nplt.title(\"梯度算子\")\nplt.subplot(2,5,3)\nmr1 = my_roberts(img1)\nplt.imshow(mr1,cmap='gray')\nplt.title(\"Roberts算子\")\nplt.subplot(2,5,4)\nmp1 = my_prewitt(img1)\nplt.imshow(mp1,cmap='gray')\nplt.title(\"Prewitt算子\")\nplt.subplot(2,5,5)\nms1 = my_sobel(img1)\nplt.imshow(ms1,cmap='gray')\nplt.title(\"Sobel算子\")\n\nplt.subplot(2,5,6)\nplt.imshow(img2,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,5,7)\nmg2 = my_grad(img2)\nplt.imshow(mg2,cmap='gray')\nplt.title(\"梯度算子\")\nplt.subplot(2,5,8)\nmr2 = my_roberts(img2)\nplt.imshow(mr2,cmap='gray')\nplt.title(\"Roberts算子\")\nplt.subplot(2,5,9)\nmp2 = my_prewitt(img2)\nplt.imshow(mp2,cmap='gray')\nplt.title(\"Prewitt算子\")\nplt.subplot(2,5,10)\nms2 = my_sobel(img2)\nplt.imshow(ms2,cmap='gray')\nplt.title(\"Sobel算子\")\n\ndef my_laplacian(img):\n dst = cv2.Laplacian(img, cv2.CV_16S, ksize=3)\n Laplacian = cv2.convertScaleAbs(dst)\n return Laplacian\n\nplt.figure()\nplt.subplot(2,2,1)\nplt.imshow(img1,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,2,2)\nml1 = my_laplacian(img1)\nplt.imshow(ml1,cmap='gray')\nplt.title(\"Laplacian算子\")\nplt.subplot(2,2,3)\nplt.imshow(img2,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,2,4)\nml2 = my_laplacian(img2)\nplt.imshow(ml2,cmap='gray')\nplt.title(\"Laplacian算子\")\n\nplt.figure()\n\ndef my_adaptivethreshold(img):\n result_img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)\n return result_img\n\nplt.subplot(2,5,1)\nplt.imshow(mg1,cmap='gray')\nplt.title(\"梯度算子\")\nplt.subplot(2,5,2)\nplt.imshow(mr1,cmap='gray')\nplt.title(\"Roberts算子\")\nplt.subplot(2,5,3)\nplt.imshow(mp1,cmap='gray')\nplt.title(\"Prewitt算子\")\nplt.subplot(2,5,4)\nplt.imshow(ms1,cmap='gray')\nplt.title(\"Sobel算子\")\nplt.subplot(2,5,5)\nplt.imshow(ml1,cmap='gray')\nplt.title(\"Laplacian算子\")\nplt.subplot(2,5,6)\nmg1 = np.uint8(mg1)\nplt.imshow(my_adaptivethreshold(mg1),cmap='gray')\nplt.title(\"梯度算子(自适应阈值处理)\")\nplt.subplot(2,5,7)\nplt.imshow(my_adaptivethreshold(mr1),cmap='gray')\nplt.title(\"Roberts算子(自适应阈值处理)\")\nplt.subplot(2,5,8)\nplt.imshow(my_adaptivethreshold(mp1),cmap='gray')\nplt.title(\"Prewitt算子(自适应阈值处理)\")\nplt.subplot(2,5,9)\nplt.imshow(my_adaptivethreshold(ms1),cmap='gray')\nplt.title(\"Sobel算子(自适应阈值处理)\")\nplt.subplot(2,5,10)\nplt.imshow(my_adaptivethreshold(ml1),cmap='gray')\nplt.title(\"Laplacian算子(自适应阈值处理)\")\nplt.show()\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.clip",
"numpy.median",
"numpy.random.normal",
"matplotlib.pyplot.subplot",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.uint8",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
koheikawata/objectdetectiontest | [
"a4cb01911fa3d0e10bd2c9aa3fd985113af10b1b"
] | [
"research/object_detection_inference_test1.py"
] | [
"import numpy as np\nimport os\nimport tensorflow as tf\nimport time\nimport json\n\nfrom PIL import Image\nfrom object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\n\nTHRESHOLD = 0.6\nLABEL_PATH = 'object_detection/test1/pascal_label_map.pbtxt'\nMODEL_PATH = 'object_detection/test1/output/frozen_inference_graph.pb'\nIMAGE_PATH = 'object_detection/test1/JPEGImages/IMG_00000.jpg'\n\nimage = Image.open(IMAGE_PATH)\n(im_width, im_height) = image.size\nimage_np = np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\nimage_np_expanded = np.expand_dims(image_np, axis=0)\n\nwith tf.gfile.GFile(MODEL_PATH, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\ntf.import_graph_def(graph_def, name='')\nops = tf.get_default_graph().get_operations()\nall_tensor_names = {output.name for op in ops for output in op.outputs}\ntensor_dict = {}\nfor key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)\nimage_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\nstart_time = time.time()\nwith tf.Session() as sess:\n output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image_np_expanded})\nend_time = time.time()\nprint('Inference takes {:.4f} sec'.format(end_time - start_time))\n\noutput_dict['num_detections'] = int(output_dict['num_detections'][0])\noutput_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8).tolist()\noutput_dict['detection_boxes'] = output_dict['detection_boxes'][0].tolist()\noutput_dict['detection_scores'] = output_dict['detection_scores'][0].tolist()\n\ncategory_index = label_map_util.create_category_index_from_labelmap(LABEL_PATH, use_display_name=True)\n\nresult = []\nfor idx, score in enumerate(output_dict['detection_scores']):\n if score > THRESHOLD:\n result.append({\n 'class': output_dict['detection_classes'][idx],\n 'label': category_index[output_dict['detection_classes'][idx]]['name'],\n 'confidence': output_dict['detection_scores'][idx],\n 'bounding_box': output_dict['detection_boxes'][idx]\n })\n\njson.dumps(result)"
] | [
[
"tensorflow.import_graph_def",
"numpy.expand_dims",
"tensorflow.gfile.GFile",
"tensorflow.Session",
"tensorflow.get_default_graph",
"tensorflow.GraphDef"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ManeeshaPerera/forecast-framework | [
"60a22af4a97aec10c8bbea7f3f833061283382cb"
] | [
"run_combinations.py"
] | [
"from combinations.equal_weight import EqualWeight\nfrom combinations.pso_model import PSO\nfrom combinations.recursive_method import RecursiveEnsemble\nimport constants as const\nimport pandas as pd\nimport numpy as np\n\n\ndef run_combinations(horizon, forecast, forecast_test, data_train, data_out_sample):\n weights = {'weight': [], 'method': [], 'comb_method': []}\n horizon_info = const.HORIZON_INFO[horizon]\n seasonality = horizon_info['arima_params'][\n 'seasonal_freq']\n methods = forecast.columns.tolist()\n\n pso_initial_options = {'c1': [0, 10],\n 'c2': [0, 10],\n 'w': [0, 10],\n 'k': [1, 20],\n 'p': 2}\n num_pso_particles = 100\n\n # Run equal weight\n equal_weight = EqualWeight(forecast)\n equal_weight.find_weights()\n\n add_weights(weights, equal_weight.weights, methods, 'average')\n\n eq_fc = equal_weight.get_forecast(forecast)\n eq_fc_test = equal_weight.get_forecast(forecast_test)\n\n # Run PSO\n dimension = len(forecast.columns)\n pso = PSO(forecast, data_train, data_out_sample, dimension, num_pso_particles,\n horizon_info['horizon_as_int'],\n seasonality, options=pso_initial_options)\n pso.hyper_parameter_search()\n pso.find_weights()\n add_weights(weights, pso.weights, methods, 'pso- unconstrained')\n pso_fc = pso.get_forecast(forecast)\n pso_fc_test = pso.get_forecast(forecast_test)\n\n # PSO with bounds\n pso_b = PSO(forecast, data_train, data_out_sample, dimension, num_pso_particles,\n horizon_info['horizon_as_int'],\n seasonality, options=pso_initial_options, bounds=(np.array([0, 0, 0, 0, 0]), np.array([1, 1, 1, 1, 1])))\n pso_b.hyper_parameter_search()\n pso_b.find_weights()\n add_weights(weights, pso_b.weights, methods, 'pso [0,1]')\n pso_b_fc = pso_b.get_forecast(forecast)\n pso_b_fc_test = pso_b.get_forecast(forecast_test)\n\n # Add to Unity\n pso_b.weights = pso_b.weights / pso_b.weights.sum()\n add_weights(weights, pso_b.weights, methods, 'pso- convex')\n pso_b_fc_scaled = pso_b.get_forecast(forecast)\n pso_b_fc_test_scaled = pso_b.get_forecast(forecast_test)\n\n # Run recursive ensemble\n print(\"start recursive ensemble\")\n matrix = np.identity(len(forecast.columns))\n re = RecursiveEnsemble(forecast, data_train, data_out_sample, horizon_info['horizon_as_int'], matrix, seasonality,\n 0.001)\n re.find_weights()\n add_weights(weights, re.weights, methods, 're')\n re_fc = re.get_forecast(forecast)\n re_fc_test = re.get_forecast(forecast_test)\n\n train = pd.concat([pso_fc, pso_b_fc, pso_b_fc_scaled, eq_fc, re_fc], axis=1)\n train.columns = ['pso- unconstrained', 'pso [0,1]', 'pso- convex', 'average', 're']\n\n test = pd.concat([pso_fc_test, pso_b_fc_test, pso_b_fc_test_scaled, eq_fc_test, re_fc_test], axis=1)\n test.columns = ['pso- unconstrained', 'pso [0,1]', 'pso- convex', 'average', 're']\n\n return train, test, pd.DataFrame(weights)\n\n\ndef add_weights(dic, weights, methods, comb_name):\n for w in range(0, len(weights)):\n dic['weight'].append(weights[w])\n dic['method'].append(methods[w])\n dic['comb_method'].append(comb_name)\n"
] | [
[
"pandas.concat",
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.