repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
tdchaitanya/kornia
[ "6dd16563f66f979c7a95846ef86678894b7d54fd" ]
[ "kornia/filters/filter.py" ]
[ "from typing import Tuple, List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom kornia.filters.kernels import normalize_kernel2d\n\n\ndef compute_padding(kernel_size: Tuple[int, int]) -> List[int]:\n \"\"\"Computes padding tuple.\"\"\"\n # 4 ints: (padding_left, padding_right,padding_top,padding_bottom)\n # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad\n assert len(kernel_size) == 2, kernel_size\n computed = [(k - 1) // 2 for k in kernel_size]\n return [computed[1], computed[1], computed[0], computed[0]]\n\n\ndef filter2D(input: torch.Tensor, kernel: torch.Tensor,\n border_type: str = 'reflect',\n normalized: bool = False) -> torch.Tensor:\n r\"\"\"Function that convolves a tensor with a kernel.\n\n The function applies a given kernel to a tensor. The kernel is applied\n independently at each depth channel of the tensor. Before applying the\n kernel, the function applies padding according to the specified mode so\n that the output remains in the same shape.\n\n Args:\n input (torch.Tensor): the input tensor with shape of\n :math:`(B, C, H, W)`.\n kernel (torch.Tensor): the kernel to be convolved with the input\n tensor. The kernel shape must be :math:`(B, kH, kW)`.\n border_type (str): the padding mode to be applied before convolving.\n The expected modes are: ``'constant'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'reflect'``.\n normalized (bool): If True, kernel will be L1 normalized.\n\n Return:\n torch.Tensor: the convolved tensor of same size and numbers of channels\n as the input.\n \"\"\"\n if not isinstance(input, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n\n if not isinstance(kernel, torch.Tensor):\n raise TypeError(\"Input kernel type is not a torch.Tensor. Got {}\"\n .format(type(kernel)))\n\n if not isinstance(border_type, str):\n raise TypeError(\"Input border_type is not string. Got {}\"\n .format(type(kernel)))\n\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxCxHxW. Got: {}\"\n .format(input.shape))\n\n if not len(kernel.shape) == 3:\n raise ValueError(\"Invalid kernel shape, we expect BxHxW. Got: {}\"\n .format(kernel.shape))\n\n borders_list: List[str] = ['constant', 'reflect', 'replicate', 'circular']\n if border_type not in borders_list:\n raise ValueError(\"Invalid border_type, we expect the following: {0}.\"\n \"Got: {1}\".format(borders_list, border_type))\n\n # prepare kernel\n b, c, h, w = input.shape\n tmp_kernel: torch.Tensor = kernel.to(input.device).to(input.dtype)\n tmp_kernel = tmp_kernel.repeat(c, 1, 1, 1)\n if normalized:\n tmp_kernel = normalize_kernel2d(tmp_kernel)\n # pad the input tensor\n height, width = tmp_kernel.shape[-2:]\n padding_shape: List[int] = compute_padding((height, width))\n input_pad: torch.Tensor = F.pad(input, padding_shape, mode=border_type)\n\n # convolve the tensor with the kernel\n return F.conv2d(input_pad, tmp_kernel, padding=0, stride=1, groups=c)\n" ]
[ [ "torch.nn.functional.conv2d", "torch.nn.functional.pad" ] ]
rryan/transformers
[ "f382a8decda82062bb6911f05b646f404eacfdd4" ]
[ "examples/run_glue.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept:\n from tensorboardX import SummaryWriter\n\nfrom tqdm import tqdm, trange\n\nfrom transformers import (WEIGHTS_NAME, BertConfig,\n BertForSequenceClassification, BertTokenizer,\n RobertaConfig,\n RobertaForSequenceClassification,\n RobertaTokenizer,\n XLMConfig, XLMForSequenceClassification,\n XLMTokenizer, XLNetConfig,\n XLNetForSequenceClassification,\n XLNetTokenizer,\n DistilBertConfig,\n DistilBertForSequenceClassification,\n DistilBertTokenizer)\n\nfrom transformers import AdamW, WarmupLinearSchedule\n\nfrom transformers import glue_compute_metrics as compute_metrics\nfrom transformers import glue_output_modes as output_modes\nfrom transformers import glue_processors as processors\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, \n RobertaConfig, DistilBertConfig)), ())\n\nMODEL_CLASSES = {\n 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),\n 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),\n 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\n 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\n 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n set_seed(args) # Added here for reproductibility (even between python 2 and 3)\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if args.model_type != 'distilbert':\n inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar('eval_{}'.format(key), value, global_step)\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == \"mnli\" else (args.output_dir,)\n\n results = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if args.model_type != 'distilbert':\n inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if args.output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(eval_task, preds, out_label_ids)\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return results\n\n\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n processor = processors[task]()\n output_mode = output_modes[task]\n # Load data features from cache or dataset file\n cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(\n 'dev' if evaluate else 'train',\n list(filter(None, args.model_name_or_path.split('/'))).pop(),\n str(args.max_seq_length),\n str(task)))\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1] \n examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)\n features = convert_examples_to_features(examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,\n )\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()))\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS))\n parser.add_argument(\"--task_name\", default=None, type=str, required=True,\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()))\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\", default=128, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--evaluate_during_training\", action='store_true',\n help=\"Rul evaluation during training at each logging step.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument('--logging_steps', type=int, default=50,\n help=\"Log every X updates steps.\")\n parser.add_argument('--save_steps', type=int, default=50,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\")\n parser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument('--overwrite_cache', action='store_true',\n help=\"Overwrite the cached training and evaluation sets\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\n parser.add_argument('--fp16_opt_level', type=str, default='O1',\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\")\n parser.add_argument('--server_ip', type=str, default='', help=\"For distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(args.output_dir))\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\n\n # Set seed\n set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)\n model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n model.to(args.device)\n\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else \"\"\n \n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, tokenizer, prefix=prefix)\n result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.manual_seed_all", "torch.no_grad", "numpy.random.seed", "torch.cuda.is_available", "torch.distributed.init_process_group", "torch.save", "torch.cuda.device_count", "torch.nn.DataParallel", "torch.utils.data.RandomSampler", "torch.device", "torch.cuda.set_device", "torch.load", "torch.distributed.get_world_size", "torch.distributed.get_rank", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.tensor", "numpy.argmax", "torch.distributed.barrier", "torch.nn.parallel.DistributedDataParallel", "torch.utils.data.TensorDataset", "torch.utils.data.distributed.DistributedSampler", "numpy.squeeze" ] ]
amanaster2/landlab
[ "ea17f8314eb12e3fc76df66c9b6ff32078caa75c", "ea17f8314eb12e3fc76df66c9b6ff32078caa75c" ]
[ "tests/components/erosion_deposition/test_general_erodep.py", "landlab/components/profiler/base_profiler.py" ]
[ "import numpy as np\nimport pytest\nfrom numpy import testing\n\nfrom landlab import RasterModelGrid\nfrom landlab.components import ErosionDeposition, FlowAccumulator\n\n\ndef test_Ff_too_high_vals():\n \"\"\"\n Test that instantiating ErosionDeposition with a F_f value > 1 throws a\n ValueError.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n with pytest.raises(ValueError):\n ErosionDeposition(\n mg,\n K=0.01,\n F_f=2.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n solver=\"basic\",\n )\n\n\ndef test_Ff_too_low_vals():\n \"\"\"\n Test that instantiating ErosionDeposition with a F_f value < 0 throws a\n ValueError.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n with pytest.raises(ValueError):\n ErosionDeposition(\n mg,\n K=0.01,\n F_f=-0.5,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n solver=\"basic\",\n )\n\n\ndef test_q_as_field():\n \"\"\"\n Test that passing in water discharge as a grid field results in self.q\n holding correct values\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n q = mg.add_zeros(\"user_imposed_discharge\", at=\"node\")\n q[:] += 1.0 # add 1.0 m3/yr of water\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n ed = ErosionDeposition(\n mg,\n K=0.01,\n F_f=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n discharge_field=\"user_imposed_discharge\",\n solver=\"basic\",\n )\n\n # ensure that ed._q is everywhere equal to 1.0 m3/yr.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n ed._q,\n err_msg=\"E/D discharge field test failed\",\n verbose=True,\n )\n\n\ndef test_q_as_array():\n \"\"\"\n Test that passing in water discharge as an array results in self.q\n holding correct values\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n q = np.zeros(mg.number_of_nodes)\n q[:] += 1.0 # add 1.0 m3/yr of water\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n ed = ErosionDeposition(\n mg,\n K=0.01,\n F_f=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n discharge_field=q,\n solver=\"basic\",\n )\n\n # ensure that ed._q is everywhere equal to 1.0 m3/yr.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n ed._q,\n err_msg=\"E/D discharge array test failed\",\n verbose=True,\n )\n\n\ndef test_sediment__flux_already_created():\n \"\"\"\n Test that an existing sediment flux grid field is not changed by\n instantiating ErosionDeposition.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n mg.add_zeros(\"topographic__elevation\", at=\"node\")\n qs = mg.add_zeros(\"sediment__flux\", at=\"node\")\n qs[:] += 1.0 # add 1.0 m3/yr of flux\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n\n # Create a D8 flow handler\n FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Instantiate the ErosionDeposition component...\n ed = ErosionDeposition(\n mg,\n K=0.01,\n F_f=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit=0.0,\n solver=\"basic\",\n )\n\n # ensure that 'sediment__flux' field is everywhere equal to 1.0 m3/yr.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n ed._qs,\n err_msg=\"E/D sediment flux field test failed\",\n verbose=True,\n )\n", "# coding: utf8\n# ! /usr/env/python\n\"\"\"Base class for profile constructors.\"\"\"\n\nfrom abc import ABC, abstractmethod\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nfrom landlab import Component\nfrom landlab.plot import imshow_grid\nfrom landlab.utils.return_array import return_array_at_node\n\n\ndef _recursive_max(jagged):\n \"\"\"\n Examples\n --------\n from landlab.components.profiler.base_profiler import _recursive_max\n >>> struct = [[1, 2, 3, 4],\n ... [[2, 3, 4, 5],\n ... [3, 4, 5, 6]],\n ... [4, 5, 6, 7]]\n >>> _recursive_max(struct)\n 7\n >>> _recursive_max([100])\n 100\n \"\"\"\n return max(_recursive_max(j) if hasattr(j, \"__iter__\") else j for j in jagged)\n\n\ndef _recursive_min(jagged):\n \"\"\"\n Examples\n --------\n from landlab.components.profiler.base_profiler import _recursive_min\n >>> struct = [[1, 2, 3, 4],\n ... [[2, 3, 4, 5],\n ... [3, 4, 5, 6]],\n ... [4, 5, 6, 7]]\n >>> _recursive_min(struct)\n 1\n >>> _recursive_min([100])\n 100\n \"\"\"\n return min(_recursive_min(j) if hasattr(j, \"__iter__\") else j for j in jagged)\n\n\nclass _BaseProfiler(ABC, Component):\n \"\"\"Base class to handle profilers.\n\n Primarily exists to handle plotting.\n \"\"\"\n\n _name = \"_BaseProfiler\"\n\n _unit_agnostic = True\n\n _info = {}\n\n def __init__(self, grid):\n super().__init__(grid)\n\n def run_one_step(self):\n \"\"\"Calculate the profile data structure and distances along it.\"\"\"\n # calculate the profile IDs data structure.\n self._create_profile_structure()\n\n @abstractmethod\n def _create_profile_structure(self):\n \"\"\"Private class for creating profile structure.\n\n Expectation is that this will be overridden to create the following\n three private attributes:\n\n self._nodes\n self._distance_along_profile\n\n are each lists of numpy arrays, one array per segment.\n\n self._colors\n\n is a list of RGBA tuples, one tuple per segment.\n\n The order of segments is expected to be consistent between each of the\n three data structures.\n \"\"\"\n ... # pragma: no cover\n\n @property\n def distance_along_profile(self):\n \"\"\"List of distances along profile for each segment.\n\n Examples\n --------\n >>> import numpy as np\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import (\n ... FastscapeEroder,\n ... FlowAccumulator,\n ... ChannelProfiler\n ... )\n >>> mg = RasterModelGrid((10, 10), xy_spacing=10)\n >>> np.random.seed(42)\n >>> z = mg.add_zeros('topographic__elevation', at='node')\n >>> z[mg.core_nodes] += np.random.randn(mg.core_nodes.size)\n >>> fa = FlowAccumulator(mg)\n >>> sp = FastscapeEroder(mg, K_sp=0.0001)\n >>> dt = 1000\n >>> for i in range(200):\n ... fa.run_one_step()\n ... sp.run_one_step(dt=dt)\n ... z[mg.core_nodes] += 0.001 * dt\n >>> profiler = ChannelProfiler(mg)\n >>> profiler.run_one_step()\n >>> profiler.distance_along_profile\n [array([ 0., 10., 20., 30., 40., 50.])]\n \"\"\"\n return self._distance_along_profile\n\n @property\n def nodes(self):\n \"\"\"List of node ids for each segment.\n\n Examples\n --------\n >>> import numpy as np\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import (\n ... FastscapeEroder,\n ... FlowAccumulator,\n ... ChannelProfiler\n ... )\n >>> mg = RasterModelGrid((10, 10), xy_spacing=10)\n >>> np.random.seed(42)\n >>> z = mg.add_zeros('topographic__elevation', at='node')\n >>> z[mg.core_nodes] += np.random.randn(mg.core_nodes.size)\n >>> fa = FlowAccumulator(mg)\n >>> sp = FastscapeEroder(mg, K_sp=0.0001)\n >>> dt = 1000\n >>> for i in range(200):\n ... fa.run_one_step()\n ... sp.run_one_step(dt=dt)\n ... z[mg.core_nodes] += 0.001 * dt\n >>> profiler = ChannelProfiler(mg)\n >>> profiler.run_one_step()\n >>> profiler.nodes\n [array([59, 58, 57, 56, 46, 45])]\n \"\"\"\n return self._nodes\n\n @property\n def colors(self):\n \"\"\"List of colors for each segment.\n\n Examples\n --------\n >>> import numpy as np\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import (\n ... FastscapeEroder,\n ... FlowAccumulator,\n ... ChannelProfiler\n ... )\n >>> mg = RasterModelGrid((10, 10), xy_spacing=10)\n >>> np.random.seed(42)\n >>> z = mg.add_zeros('topographic__elevation', at='node')\n >>> z[mg.core_nodes] += np.random.randn(mg.core_nodes.size)\n >>> fa = FlowAccumulator(mg)\n >>> sp = FastscapeEroder(mg, K_sp=0.0001)\n >>> dt = 1000\n >>> for i in range(200):\n ... fa.run_one_step()\n ... sp.run_one_step(dt=dt)\n ... z[mg.core_nodes] += 0.001 * dt\n >>> profiler = ChannelProfiler(mg)\n >>> profiler.run_one_step()\n >>> np.round(profiler.colors, decimals=2)\n array([[ 0.27, 0. , 0.33, 1. ]])\n \"\"\"\n return self._colors\n\n def plot_profiles(\n self,\n field=\"topographic__elevation\",\n xlabel=\"Distance Along Profile\",\n ylabel=\"Plotted Quantity\",\n title=\"Extracted Profiles\",\n color=None,\n ):\n \"\"\"Plot distance-upstream vs at at-node or size (nnodes,) quantity.\n\n Parameters\n ----------\n field : field name or nnode array\n Array of the at-node-field to plot against distance upstream.\n Default value is the at-node field 'topographic__elevation'.\n xlabel : str, optional\n X-axis label, default is \"Distance Along Profile\".\n ylabel : str, optional\n Y-axis label, default value is \"Plotted Quantity\".\n title : str, optional\n Plot title, default value is \"Extracted Profiles\".\n color : RGBA tuple or color string\n Color to use in order to plot all profiles the same color. Default\n is None, and the colors assigned to each profile are used.\n \"\"\"\n quantity = return_array_at_node(self._grid, field)\n\n # create segments the way that line collection likes them.\n segments = []\n qmin = []\n qmax = []\n for idx, nodes in enumerate(self._nodes):\n segments.append(\n list(zip(self._distance_along_profile[idx], quantity[nodes]))\n )\n qmin.append(min(quantity[nodes]))\n qmax.append(max(quantity[nodes]))\n\n # We need to set the plot limits.\n ax = plt.gca()\n ax.set_xlim(\n _recursive_min(self._distance_along_profile),\n _recursive_max(self._distance_along_profile),\n )\n ax.set_ylim(min(qmin), max(qmax))\n\n line_segments = LineCollection(segments)\n colors = color or self._colors\n line_segments.set_color(colors)\n ax.add_collection(line_segments)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(title)\n\n def plot_profiles_in_map_view(\n self, field=\"topographic__elevation\", endpoints_only=False, color=None, **kwds\n ):\n \"\"\"Plot profile locations in map view.\n\n Parameters\n ----------\n field : field name or nnode array\n Array of the at-node-field to plot as the 2D map values.\n Default value is the at-node field 'topographic__elevation'.\n endpoints_only : boolean\n Boolean where False (default) indicates every node along the\n profile is plotted, or True indicating only segment endpoints are\n plotted.\n color : RGBA tuple or color string\n Color to use in order to plot all profiles the same color. Default\n is None, and the colors assigned to each profile are used.\n **kwds : dictionary\n Keyword arguments to pass to imshow_grid.\n \"\"\"\n # make imshow_grid background\n imshow_grid(self._grid, field, **kwds)\n ax = plt.gca()\n\n # create segments the way that line collection likes them.\n segments = []\n for idx, nodes in enumerate(self._nodes):\n if endpoints_only:\n select_nodes = [nodes[0], nodes[-1]]\n segments.append(\n list(\n zip(\n self._grid.x_of_node[select_nodes],\n self._grid.y_of_node[select_nodes],\n )\n )\n )\n\n else:\n segments.append(\n list(zip(self._grid.x_of_node[nodes], self._grid.y_of_node[nodes]))\n )\n\n line_segments = LineCollection(segments)\n colors = color or self._colors\n line_segments.set_color(colors)\n ax.add_collection(line_segments)\n" ]
[ [ "numpy.ones", "numpy.zeros" ], [ "matplotlib.pyplot.gca", "matplotlib.collections.LineCollection" ] ]
lasdasdas/tensorflow
[ "673b993983f37f332ff70cdb642305f69089337d" ]
[ "tensorflow/python/distribute/parameter_server_strategy_v2.py" ]
[ "# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Parameter server strategy V2 class.\n\nThis is currently under development and the API is subject to change.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import parameter_server_strategy\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.eager import remote\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"distribute.experimental.ParameterServerStrategy\", v1=[])\nclass ParameterServerStrategyV2(distribute_lib.Strategy):\n \"\"\"An multi-worker tf.distribute strategy with parameter servers.\n\n Parameter server training refers to the distributed training architecture that\n requires two types of tasks in the cluster: workers (referred to as \"worker\"\n task) and parameter servers (referred to as \"ps\" task). The variables and\n updates to those variables are placed on ps, and most computation intensive\n operations are placed on workers.\n\n In TF2, parameter server training makes use of one coordinator, with some\n number of workers, and (usually fewer) ps. The coordinator uses a\n `tf.distribute.experimental.coordinator.ClusterCoordinator` to coordinate the\n cluster, and a `tf.distribute.experimental.ParameterServerStrategy` for\n variable distribution. The coordinator does not perform the actual training.\n Each of the workers and ps runs a `tf.distribute.Server`, which the\n coordinator connects to through the use of aforementioned two APIs.\n\n For the training to work, the coordinator sends requests to workers for the\n `tf.function`s to be executed on remote workers. Upon receiving requests from\n the coordinator, a worker executes the `tf.function` by reading the variables\n from parameter servers, executing the ops, and updating the variables on the\n parameter servers. Each of the worker only processes the requests from the\n coordinator, and communicates with parameter servers, without direct\n interactions with any of the other workers in the cluster.\n\n As a result, failures of some workers do not prevent the cluster from\n continuing the work, and this allows the cluster to train with instances that\n can be occasionally unavailable (e.g. preemptible or spot instances). The\n coordinator and parameter servers though, must be available at all times for\n the cluster to make progress.\n\n Note that the coordinator is not one of the training worker. Instead, its\n responsibility includes placing variables on ps, remotely executing\n `tf.function`s on workers, and saving checkpoints. Parameter server training\n thus consists of a server cluster with worker and ps, and a coordinator which\n connects to them to coordinate. Optionally, an evaluator can be run on the\n side that periodically reads the checkpoints saved by the coordinator, and\n saves summaries for example.\n\n `tf.distribute.experimental.ParameterServerStrategy` works closely with the\n associated `tf.distribute.experimental.coordinator.ClusterCoordinator` object,\n and should be used in conjunction with it. Standalone usage of\n `tf.distribute.experimental.ParameterServerStrategy` without a\n `tf.distribute.experimental.coordinator.ClusterCoordinator` indicates\n a parameter server training scheme without a centralized coordinator, which is\n not supported at this time.\n\n __Example code for coordinator__\n\n Here's an example usage of the API, with a custom training loop to train a\n model. This code snippet is intended to be run on (the only) one machine that\n is designated as the coordinator. Note that `cluster_resolver`,\n `variable_partitioner`, and `dataset_fn` arguments are explained in the\n following \"Cluster setup\", \"Variable partitioning\", and \"Dataset preparation\"\n sections.\n\n Currently, environment variable `GRPC_FAIL_FAST` needs to be set in all tasks\n to work around a known hanging issue as the following code illustrates:\n\n ```python\n # Set the environment variable to allow reporting worker and ps failure to the\n # coordinator.\n os.environ[\"GRPC_FAIL_FAST\"] = \"use_caller\"\n\n # Prepare a strategy to use with the cluster and variable partitioning info.\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver=...,\n variable_partitioner=...)\n coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(\n strategy=strategy)\n\n # Prepare a distribute dataset that will place datasets on the workers.\n distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn=...)\n\n with strategy.scope():\n model = ... # Variables created can possibly be container of variables\n optimizer, metrics = ... # Keras optimizer/metrics are great choices\n checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint, checkpoint_dir, max_to_keep=2)\n # `load_checkpoint` infers initial epoch from `optimizer.iterations`.\n initial_epoch = load_checkpoint(checkpoint_manager) or 0\n\n @tf.function\n def worker_fn(iterator):\n\n def replica_fn(inputs):\n batch_data, labels = inputs\n # calculate gradient, applying gradient, metrics update etc.\n\n strategy.run(replica_fn, args=(next(iterator),))\n\n for epoch in range(initial_epoch, num_epoch):\n distributed_iterator = iter(distributed_dataset) # Reset iterator state.\n for step in range(steps_per_epoch):\n\n # Asynchronously schedule the `worker_fn` to be executed on an arbitrary\n # worker. This call returns immediately.\n coordinator.schedule(worker_fn, args=(distributed_iterator,))\n\n # `join` blocks until all scheduled `worker_fn`s finish execution. Once it\n # returns, we can read the metrics and save checkpoints as needed.\n coordinator.join()\n logging.info('Metric result: %r', metrics.result())\n train_accuracy.reset_states()\n checkpoint_manager.save()\n ```\n\n __Example code for worker and parameter servers__\n\n In addition to the coordinator, there should be multiple machines designated\n as \"worker\" or \"ps\". They should run the following code to start a TensorFlow\n server, waiting for coordinator's request to execute functions or place\n variables:\n\n ```python\n # Set the environment variable to allow reporting worker and ps failure to the\n # coordinator.\n os.environ[\"GRPC_FAIL_FAST\"] = \"use_caller\"\n\n # Provide a `tf.distribute.cluster_resolver.ClusterResolver` that serves\n # the cluster information. See below \"Cluster setup\" section.\n cluster_resolver = ...\n\n server = tf.distribute.Server(\n cluster_resolver.cluster_spec().as_cluster_def(),\n job_name=cluster_resolver.task_type,\n task_index=cluster_resolver.task_id,\n protocol=protocol)\n\n # Blocking the process that starts a server from exiting.\n server.join()\n ```\n\n __Cluster setup__\n\n In order for the tasks in the cluster to know other tasks' addresses,\n a `tf.distribute.cluster_resolver.ClusterResolver` is required to be used\n in coordinator, worker, and ps. The\n `tf.distribute.cluster_resolver.ClusterResolver` is responsible for providing\n the cluster information, as well as the task type and id of the current task.\n See `tf.distribute.cluster_resolver.ClusterResolver` for more information.\n\n If `TF_CONFIG` environment variable is used for the processes to know the\n cluster information, a\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` should be used. Note\n that for legacy reason, \"chief\" should be used as the task type for the\n coordinator, as the following example demonstrates. Here we set `TF_CONFIG`\n in environment variable, intended to be run by the process of the machine\n designated as the parameter server (task type \"ps\") and index 1 (the second),\n in a cluster with 1 chief, 2 parameter servers, and 3 workers. Note that the\n it needs to be set before the use of\n `tf.distribute.cluster_resolver.TFConfigClusterResolver`.\n\n Example code for cluster setup:\n ```python\n os.environ['TF_CONFIG'] = '''\n {\n \"cluster\": {\n \"chief\": [\"chief.example.com:2222\"],\n \"ps\": [\"ps0.example.com:2222\", \"ps1.example.com:2222\"],\n \"worker\": [\"worker0.example.com:2222\", \"worker1.example.com:2222\",\n \"worker2.example.com:2222\"]\n },\n \"task\": {\n \"type\": \"ps\",\n \"index\": 1\n }\n }\n '''\n os.environ[\"GRPC_FAIL_FAST\"] = \"use_caller\"\n cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()\n\n # If coordinator (\"chief\" task type), create a strategy\n if cluster_resolver.task_type == 'chief':\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver)\n ...\n\n # If worker/ps, create a server\n elif cluster_resolver.task_type in (\"worker\", \"ps\"):\n server = tf.distribute.Server(...)\n ...\n ```\n\n __Variable creation with `strategy.scope()`__\n\n `tf.distribute.experimental.ParameterServerStrategy` follows the\n `tf.distribute` API contract where variable creation is expected to be inside\n the context manager returned by `strategy.scope()`, in order to be correctly\n placed on parameter servers in a round-robin manner:\n\n ```python\n # In this example, we're assuming having 3 ps.\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver=...)\n coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(\n strategy=strategy)\n\n # Variables should be created inside scope to be placed on parameter servers.\n # If created outside scope such as `v1` here, it would be placed on\n coordinator.\n v1 = tf.Variable(initial_value=0.0)\n\n with strategy.scope():\n v2 = tf.Variable(initial_value=1.0)\n v3 = tf.Variable(initial_value=2.0)\n v4 = tf.Variable(initial_value=3.0)\n v5 = tf.Variable(initial_value=4.0)\n\n # v2 through v5 are created in scope and are distributed on parameter servers.\n # Default placement is round-robin but the order should not be relied on.\n assert v2.device == \"/job:ps/replica:0/task:0/device:CPU:0\"\n assert v3.device == \"/job:ps/replica:0/task:1/device:CPU:0\"\n assert v4.device == \"/job:ps/replica:0/task:2/device:CPU:0\"\n assert v5.device == \"/job:ps/replica:0/task:0/device:CPU:0\"\n ```\n\n See `distribute.Strategy.scope` for more information.\n\n __Variable partitioning__\n\n Having dedicated servers to store variables means being able to divide up, or\n \"shard\" the variables across the ps. Large embeddings that would otherwise\n exceed memory limit of a single machine can be used in a cluster with enough\n number of ps.\n\n With `tf.distribute.experimental.ParameterServerStrategy`, if a\n `variable_partitioner` is provided to `__init__` and certain conditions are\n satisfied, the resulting variables created in scope are sharded across the\n parameter servers, in a round-robin fashion. The variable reference returned\n from `tf.Variable` becomes a type that serves as the container of the sharded\n variables. Access `variables` attribute of this container for the actual\n variable components. See arguments section of\n `tf.distribute.experimental.ParameterServerStrategy.__init__` for more\n information.\n\n To initialize the sharded variables in a more memory-efficient way, use an\n initializer whose `__call__` accepts a `shard_info` argument, and use\n `shard_info.offset` and `shard_info.shape` to create and return a\n partition-aware `tf.Tensor` to initialize the variable components.\n\n ```python\n class PartitionAwareIdentity(object):\n\n def __call__(self, shape, dtype, shard_info):\n value = tf.eye(*shape, dtype=dtype)\n if shard_info is not None:\n value = tf.slice(value, shard_info.offset, shard_info.shape)\n return value\n\n cluster_resolver = ...\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver, tf.fixed_size_partitioner(2))\n with strategy.scope():\n initializer = PartitionAwareIdentity()\n initial_value = functools.partial(initializer, shape=(4, 4), dtype=tf.int64)\n v = tf.Variable(\n initial_value=initial_value, shape=(4, 4), dtype=tf.int64)\n\n # `v.variables` gives the actual variable components.\n assert len(v.variables) == 2\n assert v.variables[0].device == \"/job:ps/replica:0/task:0/device:CPU:0\"\n assert v.variables[1].device == \"/job:ps/replica:0/task:1/device:CPU:0\"\n assert np.array_equal(v.variables[0].numpy(), [[1, 0, 0, 0], [0, 1, 0, 0]])\n assert np.array_equal(v.variables[1].numpy(), [[0, 0, 1, 0], [0, 0, 0, 1]])\n ```\n\n __Dataset preparation__\n\n With `tf.distribute.experimental.ParameterServerStrategy`, a dataset is\n created in each of the workers to be used for training. This is done by\n creating a `dataset_fn` that takes no argument and returns a\n `tf.data.Dataset`, and passing the `dataset_fn` into\n `tf.distribute.experimental.coordinator.\n ClusterCoordinator.create_per_worker_dataset`. We recommend the dataset to be\n shuffled and repeated to have the examples run through the training as evenly\n as possible.\n\n ```python\n def dataset_fn():\n filenames = ...\n dataset = tf.data.Dataset.from_tensor_slices(filenames)\n\n # Dataset is recommended to be shuffled, and repeated.\n return dataset.shuffle(buffer_size=...).repeat().batch(batch_size=...)\n\n coordinator =\n tf.distribute.experimental.coordinator.ClusterCoordinator(strategy=...)\n distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn)\n\n ```\n\n __Limitations__\n\n * `tf.distribute.experimental.ParameterServerStrategy` in TF2 is experimental,\n and the API is subject to further changes.\n\n * `tf.distribute.experimental.ParameterServerStrategy` does not yet support\n training with GPU(s). This is a feature request being developed.\n\n * `tf.distribute.experimental.ParameterServerStrategy` only supports\n [custom training loop\n API](https://www.tensorflow.org/tutorials/distribute/custom_training)\n currently in TF2. Usage of it with Keras `compile`/`fit` API is being\n developed.\n\n * `tf.distribute.experimental.ParameterServerStrategy` must be used with\n `tf.distribute.experimental.coordinator.ClusterCoordinator`.\n\n * This strategy is not intended for TPU. Use\n `tf.distribute.experimental.TPUStrategy` instead.\n \"\"\"\n\n # pyformat: disable\n def __init__(self, cluster_resolver, variable_partitioner=None):\n \"\"\"Initializes the TF2 parameter server strategy.\n\n This initializes the `tf.distribute.experimental.ParameterServerStrategy`\n object to be ready for use with\n `tf.distribute.experimental.coordinator.ClusterCoordinator`.\n\n Args:\n cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`\n object.\n variable_partitioner:\n a callable with the signature `num_partitions = fn(shape, dtype)`, where\n `num_partitions` is a list/tuple representing the number of partitions\n on each axis, and `shape` and `dtype` are of types `tf.TensorShape` and\n `tf.dtypes.Dtype`. If `None`, variables will not be partitioned.\n\n * `variable_partitioner` will be called for all variables created under\n strategy `scope` to instruct how the variables should be partitioned.\n Variables will be created in multiple partitions if there are more than\n one partition along the partitioning axis, otherwise it falls back to\n normal `tf.Variable`.\n\n * Only the first / outermost axis partitioning is supported, namely,\n elements in `num_partitions` must be 1 other than the first element.\n\n * Partitioner like `tf.compat.v1.min_max_variable_partitioner`,\n `tf.compat.v1.variable_axis_size_partitioner` and\n `tf.compat.v1.fixed_size_partitioner` are also supported since they\n conform to the required signature.\n\n * Div partition\n strategy is used to partition variables. Assuming we assign consecutive\n integer ids along the first axis of a variable, then ids are assigned to\n shards in a contiguous manner, while attempting to keep each shard size\n identical. If the ids do not evenly divide the number of shards, each of\n the first several shards will be assigned one more id. For instance, a\n variable whose first dimension is 13 has 13 ids, and they are split\n across 5 shards as:\n `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`.\n\n * Variables created under `strategy.extended.colocate_vars_with` will\n not be partitioned, e.g, optimizer's slot variables.\n \"\"\"\n # pyformat: enable\n self._cluster_resolver = cluster_resolver\n self._extended = ParameterServerStrategyV2Extended(self, cluster_resolver,\n variable_partitioner)\n self._verify_args_and_config(cluster_resolver)\n logging.info(\n \"`tf.distribute.experimental.ParameterServerStrategy` is initialized \"\n \"with cluster_spec: %s\", cluster_resolver.cluster_spec())\n\n # TODO(b/167894802): Make coordinator, worker, and ps names customizable.\n self._connect_to_cluster(coordinator_name=\"chief\")\n super(ParameterServerStrategyV2, self).__init__(self._extended)\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"ParameterServerStrategy\")\n\n def _connect_to_cluster(self, coordinator_name):\n if coordinator_name in [\"worker\", \"ps\"]:\n raise ValueError(\"coordinator name should not be 'worker' or 'ps'.\")\n cluster_spec = self._cluster_resolver.cluster_spec()\n self._num_workers = len(cluster_spec.as_dict().get(\"worker\", ()))\n self._num_ps = len(cluster_spec.as_dict().get(\"ps\", ()))\n\n device_filters = server_lib.ClusterDeviceFilters()\n # For any worker, only the devices on ps and coordinator nodes are visible\n for i in range(self._num_workers):\n device_filters.set_device_filters(\n \"worker\", i, [\"/job:ps\", \"/job:%s\" % coordinator_name])\n # Similarly for any ps, only the devices on workers and coordinator are\n # visible\n for i in range(self._num_ps):\n device_filters.set_device_filters(\n \"ps\", i, [\"/job:worker\", \"/job:%s\" % coordinator_name])\n\n # Allow at most one outstanding RPC for each worker at a certain time. This\n # is to simplify worker failure handling in the runtime\n os.environ[\"TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE\"] = \"False\"\n\n logging.info(\"%s is now connecting to cluster with cluster_spec: %r\",\n self.__class__.__name__, cluster_spec)\n remote.connect_to_cluster(\n cluster_spec,\n job_name=coordinator_name,\n protocol=self._cluster_resolver.rpc_layer,\n cluster_device_filters=device_filters)\n\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"ps_strategy_num_workers\").set(self._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"ps_strategy_num_ps\").set(self._num_ps)\n\n def _verify_args_and_config(self, cluster_resolver):\n if not cluster_resolver.cluster_spec():\n raise ValueError(\"Cluster spec must be non-empty in `cluster_resolver`.\")\n if self.extended._num_gpus_per_worker > 1: # pylint: disable=protected-access\n raise NotImplementedError(\"Multi-gpu is not supported yet.\")\n\n\nclass ParameterServerStrategyV2Extended(\n parameter_server_strategy.ParameterServerStrategyExtended):\n \"\"\"Extended class for ParameterServerStrategyV2.\n\n Please see `tf.distribute.StrategyExtended` doc for more information.\n \"\"\"\n\n def __init__(self, container_strategy, cluster_resolver,\n variable_partitioner):\n \"\"\"Initialization of ParameterServerStrategyV2Extended.\"\"\"\n super(ParameterServerStrategyV2Extended, self).__init__(container_strategy)\n self._num_ps = len(cluster_resolver.cluster_spec().as_dict().get(\"ps\", []))\n self._variable_count = 0\n self._variable_partitioner = variable_partitioner\n\n def _create_variable(self, next_creator, **kwargs):\n \"\"\"Implements StrategyExtendedV2._create_variable.\n\n Creates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be\n created if satisfying all the following criteria:\n 1. `self._variable_partitioner` results in more than one partition on the\n first axis.\n 2. variable's rank is greater than 0.\n 3. variable is not colocated with another variable.\n Otherwise a `Variable` will be created.\n\n Args:\n next_creator: See `variable_scope.variable_creator_scope`; the next\n creator in the chain.\n **kwargs: Passed through to the next creator.\n\n Returns:\n A `Variable` or `ShardedVariable`.\n \"\"\"\n\n if \"colocate_with\" in kwargs: # Never partition colocated_with variables.\n colocate_with = kwargs[\"colocate_with\"]\n # Clear the variable scope to avoid possible conflicts between device\n # scope and colocation scope.\n with ops.device(None):\n with ops.colocate_with(colocate_with):\n var = next_creator(**kwargs)\n logging.debug(\n \"Creating variable (name:%s, shape:%r) that colocates with %s\",\n var.name, var.shape, kwargs[\"colocate_with\"].name)\n return var\n\n if self._variable_partitioner is None:\n return self._create_variable_round_robin(next_creator, **kwargs)\n\n name = kwargs.get(\"name\", None)\n initial_value = kwargs.get(\"initial_value\", None)\n if initial_value is None:\n raise ValueError(\"initial_value must be specified.\")\n\n # Two cases where initial_value can be a callable:\n # 1. initial_value is passed as a callable, e.g, an `initializer` class.\n # 2. restoring from checkpoint, initial_value is a\n # \"CheckpointInitialValueCallable\".\n init_from_fn = callable(initial_value)\n\n dtype = kwargs.get(\"dtype\", None)\n shape = kwargs.get(\"shape\", None)\n if init_from_fn and (shape is None or dtype is None):\n init_from_fn = False\n initial_value = initial_value()\n if not init_from_fn:\n # The initial_value is created on coordinator, it will need to be sent to\n # ps for variable initialization, which can be inefficient and can\n # potentially hit the 2GB limit on protobuf serialization.\n initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)\n dtype = initial_value.dtype\n shape = initial_value.shape\n else:\n shape = tensor_shape.as_shape(shape)\n\n if shape.rank == 0: # Skip partitioning rank-0 variable.\n return self._create_variable_round_robin(next_creator, **kwargs)\n\n num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)\n if not num_partitions or num_partitions[0] == 0 or any(\n v != 1 for v in num_partitions[1:]):\n raise ValueError(\n \"variable_partitioner must return a list/tuple whose elements are 1\"\n \" besides the first element (non-zero), got: %r\" % num_partitions)\n\n if num_partitions[0] == 1: # no partition\n return self._create_variable_round_robin(next_creator, **kwargs)\n\n # Use \"div\" partition strategy to partition the variable.\n num_partitions = min(num_partitions[0], shape[0])\n base = shape[0] // num_partitions\n extra = shape[0] % num_partitions\n # An example: num_partitions=4, shape[0]=10, partitions: [3, 3, 2, 2]\n # offsets: [0, 3, 6, 8, 10]\n offsets = []\n for i in range(num_partitions):\n if i == 0:\n offsets.append(0)\n else:\n prev_shard_size = base + (1 if i - 1 < extra else 0)\n offsets.append(offsets[i - 1] + prev_shard_size)\n offsets.append(shape[0])\n\n def init_shard_fn(shard_index):\n if not init_from_fn:\n logging.log_if(\n logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and\n shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)\n return initial_value[offsets[shard_index]:offsets[shard_index + 1]]\n arg_spec = tf_inspect.getfullargspec(initial_value)\n if (\"shard_info\" not in arg_spec.args and\n \"shard_info\" not in arg_spec.kwonlyargs):\n # `initial_value` is a callable that doesn't accept `shard_info`.\n logging.log_if(\n logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and\n shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)\n full_value = initial_value()\n return full_value[offsets[shard_index]:offsets[shard_index + 1]]\n else:\n # Memory-efficient way of initializing sharded variable. It requires\n # the `init_fn` to accept a namedtuple `shard_info`.\n component_shape = (offsets[shard_index + 1] -\n offsets[shard_index],) + shape[1:]\n offsets_all_axes = (offsets[shard_index],) + (0,) * len(shape[1:])\n return initial_value(\n shard_info=trackable.ShardInfo(\n shape=tensor_shape.as_shape(component_shape),\n offset=offsets_all_axes))\n\n var_list = []\n for i in range(num_partitions):\n kwargs[\"shape\"] = (offsets[i + 1] - offsets[i],) + shape[1:]\n kwargs[\"initial_value\"] = lambda: init_shard_fn(i)\n if name is not None:\n kwargs[\"name\"] = \"{}/part_{}\".format(name, i)\n var_list.append(self._create_variable_round_robin(next_creator, **kwargs))\n\n result = sharded_variable.ShardedVariable(var_list)\n return result\n\n def _create_variable_round_robin(self, next_creator, **kwargs):\n # Clear the colocation scope to avoid possible conflicts between device\n # scope and colocation scope.\n with ops.colocate_with(None, ignore_existing=True):\n with ops.device(\"/job:ps/task:%d\" %\n (self._variable_count % self._num_ps)):\n var = next_creator(**kwargs)\n logging.debug(\n \"Creating variable (name:%s, shape:%r) on /job:ps/task:%d\",\n var.name, var.shape, (self._variable_count % self._num_ps))\n self._variable_count += 1\n return var\n\n def _call_for_each_replica(self, fn, args, kwargs):\n with distribute_lib.ReplicaContext(\n self._container_strategy(),\n replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):\n # TODO(rchao): Support multi-replica per worker or sync-group.\n return distribute_utils.regroup((fn(*args, **kwargs),))\n\n\n# The warning that will be logged if the way we initialize sharded variables\n# is memory-inefficient.\n_INEFFICIENT_INIT_WARNING = (\n \"Large variable %s is partitioned but not initialized in a memory-efficient\"\n \" way. The full value is first being created and then sliced into smaller \"\n \"values. To reduce the memory footprint, explicitly specify `dtype` and \"\n \"`shape` when creating variables, and pass a callable to Variable's \"\n \"`initial_value`. The callable should take only one argument which is a \"\n \"namedtuple (shape: `tf.TensorShape`, offsets: list/tuple) where shape is \"\n \"the shape of the component variable, and offsets is the offsets of the \"\n \"smaller variable on each axis.\")\n\n_LARGE_VARIABLE_NUM_ELEMENTS = 1e9\n" ]
[ [ "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.eager.remote.connect_to_cluster", "tensorflow.python.platform.tf_logging.debug", "tensorflow.python.training.server_lib.ClusterDeviceFilters", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.util.tf_inspect.getfullargspec", "tensorflow.python.distribute.sharded_variable.ShardedVariable", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell" ] ]
SeanTater/rsvp
[ "2f136ee8aac2d9401427d4d4f0d78f74eb757f15" ]
[ "demo.py" ]
[ "#!/usr/bin/python3\nimport cv2\nimport numpy as np\nimport sqlite3\nimport time\n\nclass QualityCheck:\n def __init__(self):\n \"\"\" Record model runs \"\"\"\n self.db = sqlite3.connect(\"logs.db\", isolation_level=None)\n self.db.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ModelRun(\n run_id INT,\n num_embed INT,\n original_size INT,\n compressed_size INT,\n mse REAL,\n psnr REAL,\n cube_shape TEXT\n );\n \"\"\")\n self.run_id = int(time.time())\n \n \n def quality_check(self, original, compressed, new, num_embed, cube_shape):\n \"\"\" Compare pixel to pixel quality of each frame\n\n Accepts\n -------\n original (np.array): array containing original video\n compressed (bytes): compressed video bytes\n new (np.array): array containing new video\n num_embed (int): dimensionality of embedding\n\n \"\"\" \n comp_ratio = len(compressed) / original.size\n mse = float(((new - original)**2).mean())\n psnr = 20 * np.log(255) / np.log(10) - 10 * np.log(mse) / np.log(10)\n\n self.db.execute(\n \"INSERT INTO ModelRun(run_id, num_embed, original_size, compressed_size, mse, psnr, cube_shape)\"\n \" VALUES (?,?,?,?,?,?,?);\",\n (self.run_id, num_embed, original.size, len(compressed), mse, psnr, cube_shape)\n )\n\nclass CubeStep:\n def __init__(self, cube_shape=(8,8,8), video_shape=None):\n self.video_shape = video_shape\n self.depth, self.width, self.height = self.cube_shape = cube_shape\n self.colors = 3 # Purely for clarity\n self.cube_size = 3\n for d in cube_shape:\n self.cube_size *= d\n \n def to_cubes(self, images):\n \"\"\" Shatter a series of frames into 8x8x8x3 tensors\n\n Accepts\n -------\n images : np.array of shape (frame_count, width, height, colors) and dtype uint8\n The sequence of images to analyze.\n The image shape must be a multiple of the cube size, no padding will be done.\n \n Returns\n -------\n np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \"\"\"\n if self.video_shape is not None and self.video_shape != images.shape:\n raise ValueError(\"Video shape does not match input shape.\")\n else:\n self.video_shape = images.shape\n frame_count, width, height, colors = self.video_shape\n t = images.reshape((\n frame_count // self.depth,\n self.depth,\n width // self.width,\n self.width,\n height // self.height,\n self.height,\n colors\n ))\n # Make it a (x,y,z,8,8,8,3) tensor so it's a prism of cubes, by color\n return np.moveaxis(t, [0,2,4,1,3,5,6], [0,1,2,3,4,5,6])\n\n def to_matrix(self, cubes):\n \"\"\" Flatten a cube of cubes into a single matrix\n\n Accepts\n -------\n cubes : np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \n Returns\n -------\n np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \"\"\"\n # Copy improves locality later\n return cubes.reshape((-1, self.cube_size)).copy()\n\n def from_cubes(self, cubes):\n \"\"\" Convert cubes back into normal frames\n \n Accepts\n -------\n cubes : np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \n Returns\n -------\n np.array of shape (frame_count, width, height, colors) and dtype float32 (not uint8)\n The sequence of images to analyze.\n The image shape must be a multiple of the cube size, no padding will be done.\n \"\"\"\n d,w,h = cubes.shape[:3]\n uncube = np.moveaxis(cubes, [0,1,2,3,4,5,6], [0,2,4,1,3,5,6])\n return uncube.reshape((d*self.depth, w*self.width, h*self.height, self.colors))\n \n def from_matrix(self, samples):\n \"\"\" Convert cubes back into normal frames\n \n Accepts\n -------\n samples : np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n \n Returns\n -------\n np.array of (d, w, h, self.depth, self.width, self.height, self.colors)\n where d, w, h are the time, width, and height of the video measured in cubes\n \"\"\"\n if self.video_shape is None:\n raise ValueError(\"Video shape unspecified when converting from matrix.\")\n frame_count, width, height, colors = self.video_shape\n return samples.reshape((\n frame_count // self.depth,\n width // self.width,\n height // self.height,\n self.depth,\n self.width,\n self.height,\n colors\n ))\n\n\n\ndef webm_forward():\n vc = cv2.VideoCapture()\n vc.open(\"snippet.webm\")\n images = np.stack([vc.read()[1] for i in range(24)])\n # Divide into 8x8x8 cubes\n frame_count, width, height, colors = images.shape\n \n assert width % 8 == 0 and height % 8 == 0, \"The image dimensions must be divisible by 8\"\n vc.release()\n return images.astype(np.float32)\n\ndef webm_backward(images):\n \"\"\" Save images to a video file as VP8\n\n Accepts\n -------\n images : np.array of shape (frame_count, width, height, colors) and dtype uint8\n The sequence of images to analyze.\n The width, height, and frame count must all be multiples of 8.\n \"\"\"\n d,w,h = images.shape[:3]\n vw = cv2.VideoWriter(\"undemo.mkv\", cv2.VideoWriter_fourcc(*'VP80'), 5, (h,w))\n for frameid in range(d):\n vw.write(images[frameid])\n vw.release()\n\n\n\n\ndef svd_prep(samples, embed=50, subsample=13):\n \"\"\" Create a base to summarize video segments\n\n Accepts\n -------\n samples : np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n Returns\n -------\n np.array of shape (cube_volume, embed)\n \"\"\"\n\n # Try a basic SVD approximation\n #base = np.random.uniform(0, 1, (8*8*8*colors, embed))\n #breakpoint()\n #base = np.linalg.qr(np.dot(samples, base))[0]\n #base = np.linalg.qr(np.dot(samples, base))[0]\n subsample = samples[np.random.randint(0, samples.shape[0], 250)]\n return np.linalg.svd(subsample, full_matrices=False)[2][:embed].T\n\ndef svd_forward(base, cubes):\n \"\"\" Encode images with eigenvectors, using an existing base\n\n Accepts\n -------\n base : np.array of shape (cube_volume, embed)\n Linear basis for description of video segments, generated by svd_prep()\n cubes : np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n\n Returns\n -------\n np.array of (cube_count, embed)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \"\"\"\n return np.dot(cubes, base)\n\ndef svd_backward(base, proj):\n \"\"\" Decode cubes back from embedding\n \n Accepts\n -------\n base : np.array of shape (cube_volume, embed)\n Linear basis for description of video segments, generated by svd_prep()\n proj : np.array of (cube_count, embed)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n Returns\n -------\n np.array of (cube_count, cube_volume)\n where\n cube_count = d*w*h,\n d, w, h = the time, width, and height of the video measured in cubes,\n cube_volume = self.depth * self.width * self.height * self.colors\n \n \"\"\"\n reproj = np.dot(proj, base.T)\n return np.clip(reproj, 0, 255)\n\ndef buf_forward(proj):\n \"\"\" Save projections to a bytestring for later reconstruction\n \n Accepts\n -------\n proj : np.array\n The projection to store as a byte array\n \n Returns\n -------\n A byte array representing that file. In machine order.\n \"\"\"\n sign = np.sign(proj)\n proj = sign * np.clip(np.log1p(np.abs(proj)) * 14, 0, 127)\n return (\n np.array(proj.shape).astype(np.uint32).tobytes().ljust(32, b\"\\x00\")\n + proj.astype(np.int8).tobytes()\n )\n\ndef buf_backward(buf):\n \"\"\" load projections from a bytestring\n \n Accepts\n -------\n proj : np.array\n The projection to store as a byte array\n \n Returns\n -------\n proj : np.array\n the projection previously saves\n \"\"\"\n shape = tuple(x for x in np.frombuffer(buf[:32], dtype=np.uint32) if x > 0)\n proj = (\n np.frombuffer(buf, dtype=np.int8, offset=32)\n .reshape(shape)\n .astype(np.float32)\n )\n return np.sign(proj) * np.expm1(np.abs(proj)/14)\n\ndef run_pipeline(num_embed):\n cube_step = CubeStep(cube_shape=(2,2,2))\n cubes = cube_step.to_matrix(cube_step.to_cubes(webm_forward()))\n base = svd_prep(cubes, embed=num_embed)\n proj = svd_forward(base, cubes)\n buf = buf_forward(proj)\n base_buf = buf_forward(base)\n open(\"proj.arr\", \"wb\").write(buf)\n open(\"base.arr\", \"wb\").write(base_buf)\n \n # At this point everything is stored in buf and base_buf\n proj = buf_backward(buf)\n #base = buf_backward(base_buf)\n reproj = svd_backward(base, proj)\n newimages = cube_step.from_cubes(cube_step.from_matrix(reproj))\n #webm_backward(newimages.astype(np.uint8))\n return (cubes, base_buf+buf, reproj, num_embed)\n\n \nif __name__ == \"__main__\":\n qlog = QualityCheck()\n for n in range(1,50,4):\n qlog.quality_check(*run_pipeline(n), cube_shape=\"2,2,2\")\n print(f\"Finished {n}\")\n\n" ]
[ [ "numpy.sign", "numpy.moveaxis", "numpy.abs", "numpy.linalg.svd", "numpy.clip", "numpy.log", "numpy.array", "numpy.dot", "numpy.random.randint", "numpy.frombuffer" ] ]
pschafhalter/pylot
[ "712fd504f9e2669cfc9876eaed4954fbf2b31f20" ]
[ "pylot/perception/detection/detection_operator.py" ]
[ "\"\"\"Implements an operator that detects obstacles.\"\"\"\nimport logging\nimport time\n\nimport erdos\n\nimport numpy as np\n\nimport pylot.utils\nfrom pylot.perception.detection.obstacle import Obstacle\nfrom pylot.perception.detection.utils import BoundingBox2D, \\\n OBSTACLE_LABELS, load_coco_bbox_colors, load_coco_labels\nfrom pylot.perception.messages import ObstaclesMessage\n\nimport tensorflow as tf\n\n\nclass DetectionOperator(erdos.Operator):\n \"\"\"Detects obstacles using a TensorFlow model.\n\n The operator receives frames on a camera stream, and runs a model for each\n frame.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which the\n operator sends\n :py:class:`~pylot.perception.messages.ObstaclesMessage` messages.\n model_path(:obj:`str`): Path to the model pb file.\n flags (absl.flags): Object to be used to access absl flags.\n \"\"\"\n def __init__(self, camera_stream, time_to_decision_stream,\n obstacles_stream, model_path, flags):\n camera_stream.add_callback(self.on_msg_camera_stream,\n [obstacles_stream])\n time_to_decision_stream.add_callback(self.on_time_to_decision_update)\n self._flags = flags\n self._logger = erdos.utils.setup_logging(self.config.name,\n self.config.log_file_name)\n self._detection_graph = tf.Graph()\n # Load the model from the model file.\n pylot.utils.set_tf_loglevel(logging.ERROR)\n with self._detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(model_path, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self._gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=flags.\n obstacle_detection_gpu_memory_fraction)\n # Create a TensorFlow session.\n self._tf_session = tf.Session(\n graph=self._detection_graph,\n config=tf.ConfigProto(gpu_options=self._gpu_options))\n # Get the tensors we're interested in.\n self._image_tensor = self._detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n self._detection_boxes = self._detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n self._detection_scores = self._detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self._detection_classes = self._detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self._num_detections = self._detection_graph.get_tensor_by_name(\n 'num_detections:0')\n self._coco_labels = load_coco_labels(self._flags.path_coco_labels)\n self._bbox_colors = load_coco_bbox_colors(self._coco_labels)\n # Unique bounding box id. Incremented for each bounding box.\n self._unique_id = 0\n # Serve some junk image to load up the model.\n self.__run_model(np.zeros((108, 192, 3)))\n\n @staticmethod\n def connect(camera_stream, time_to_decision_stream):\n \"\"\"Connects the operator to other streams.\n\n Args:\n camera_stream (:py:class:`erdos.ReadStream`): The stream on which\n camera frames are received.\n\n Returns:\n :py:class:`erdos.WriteStream`: Stream on which the operator sends\n :py:class:`~pylot.perception.messages.ObstaclesMessage` messages.\n \"\"\"\n obstacles_stream = erdos.WriteStream()\n return [obstacles_stream]\n\n def on_time_to_decision_update(self, msg):\n self._logger.debug('@{}: {} received ttd update {}'.format(\n msg.timestamp, self.config.name, msg))\n\n @erdos.profile_method()\n def on_msg_camera_stream(self, msg, obstacles_stream):\n \"\"\"Invoked whenever a frame message is received on the stream.\n\n Args:\n msg (:py:class:`~pylot.perception.messages.FrameMessage`): Message\n received.\n obstacles_stream (:py:class:`erdos.WriteStream`): Stream on which\n the operator sends\n :py:class:`~pylot.perception.messages.ObstaclesMessage`\n messages.\n \"\"\"\n self._logger.debug('@{}: {} received message'.format(\n msg.timestamp, self.config.name))\n start_time = time.time()\n # The models expect BGR images.\n assert msg.frame.encoding == 'BGR', 'Expects BGR frames'\n num_detections, res_boxes, res_scores, res_classes = self.__run_model(\n msg.frame.frame)\n obstacles = []\n for i in range(0, num_detections):\n if res_classes[i] in self._coco_labels:\n if (res_scores[i] >=\n self._flags.obstacle_detection_min_score_threshold):\n if (self._coco_labels[res_classes[i]] in OBSTACLE_LABELS):\n obstacles.append(\n Obstacle(BoundingBox2D(\n int(res_boxes[i][1] *\n msg.frame.camera_setup.width),\n int(res_boxes[i][3] *\n msg.frame.camera_setup.width),\n int(res_boxes[i][0] *\n msg.frame.camera_setup.height),\n int(res_boxes[i][2] *\n msg.frame.camera_setup.height)),\n res_scores[i],\n self._coco_labels[res_classes[i]],\n id=self._unique_id))\n self._unique_id += 1\n else:\n self._logger.warning(\n 'Ignoring non essential detection {}'.format(\n self._coco_labels[res_classes[i]]))\n else:\n self._logger.warning('Filtering unknown class: {}'.format(\n res_classes[i]))\n\n self._logger.debug('@{}: {} obstacles: {}'.format(\n msg.timestamp, self.config.name, obstacles))\n\n # Get runtime in ms.\n runtime = (time.time() - start_time) * 1000\n # Send out obstacles.\n obstacles_stream.send(\n ObstaclesMessage(msg.timestamp, obstacles, runtime))\n obstacles_stream.send(erdos.WatermarkMessage(msg.timestamp))\n\n if self._flags.log_detector_output:\n msg.frame.annotate_with_bounding_boxes(msg.timestamp, obstacles,\n None, self._bbox_colors)\n msg.frame.save(msg.timestamp.coordinates[0], self._flags.data_path,\n 'detector-{}'.format(self.config.name))\n\n def __run_model(self, image_np):\n # Expand dimensions since the model expects images to have\n # shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n (boxes, scores, classes, num_detections) = self._tf_session.run(\n [\n self._detection_boxes, self._detection_scores,\n self._detection_classes, self._num_detections\n ],\n feed_dict={self._image_tensor: image_np_expanded})\n\n num_detections = int(num_detections[0])\n res_classes = [int(cls) for cls in classes[0][:num_detections]]\n res_boxes = boxes[0][:num_detections]\n res_scores = scores[0][:num_detections]\n return num_detections, res_boxes, res_scores, res_classes\n" ]
[ [ "numpy.zeros", "tensorflow.gfile.GFile", "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.GPUOptions", "tensorflow.ConfigProto", "tensorflow.GraphDef" ] ]
Sohl-Dickstein/learned_optimization
[ "cd929359a51d09444665021387c058aac11b63ba" ]
[ "learned_optimization/baselines/run_archive.py" ]
[ "# coding=utf-8\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script to create aggregated results from an hparam set.\n\nThis script can be run after the corresponding baselines have been created,\nor while the baselines are being run. In the case that the baselines are being\nrun this will continuously retry until all baselines are finished and only\nfinish at this point.\n\"\"\"\nfrom concurrent import futures\nimport time\nfrom typing import Any, Mapping, Optional\n\nfrom absl import app\nfrom absl import logging\nimport gin\nimport jax\nfrom learned_optimization import setup_experiment\nfrom learned_optimization.baselines import hparam_sets # pylint: disable=unused-import\nfrom learned_optimization.baselines import utils\nimport numpy as onp\n\n\ndef maybe_get_hparam_set(task_name,\n hparam_set_name) -> Optional[Mapping[str, Any]]:\n \"\"\"Attempt to get the data for a given task_name and hparam set.\"\"\"\n hparam_set_fn = gin.get_configurable(hparam_set_name)\n unused_cfgs, paths_reps = hparam_set_fn(task_name)\n paths, unused_reps = zip(*paths_reps)\n\n def load_one(p):\n return utils.load_baseline_results_from_dir(\n save_dir=p, output_type=\"curves\")\n\n with futures.ThreadPoolExecutor(32) as executor:\n results = list(executor.map(load_one, paths))\n\n def stack(*xs):\n if isinstance(xs[0], str):\n return xs\n elif isinstance(xs[0], (onp.ndarray, int, float)):\n return onp.asarray(xs)\n else:\n raise ValueError(f\"Unsupported type: {type(xs[0])}.\")\n\n # ensure that we have the right amount of data for each.\n trimmed_results = []\n for (path, rep), res in zip(paths_reps, results):\n if len(res) < rep:\n logging.info(f\"Failed to find enough results in dir {path}. \" # pylint: disable=logging-fstring-interpolation\n f\"Expected {len(res)}\")\n return None\n trimmed_results.append(jax.tree_map(stack, *res[0:rep]))\n stacked = jax.tree_map(stack, *trimmed_results)\n return stacked\n\n\ndef maybe_archive_hparam_set(task_name: str, hparam_set_name: str) -> bool:\n data = maybe_get_hparam_set(task_name, hparam_set_name)\n if data is None:\n return False\n\n utils.write_archive(task_name, hparam_set_name, data)\n return True\n\n\[email protected]\ndef wait_until_ready_then_archive_task(task_name: str = gin.REQUIRED,\n hparam_set_name: str = gin.REQUIRED):\n \"\"\"Continually try to create and save an archive of hparam set + task_name.\n\n This function is designed to be run while the baselines are being computed\n and will finish once all the baseline data has been run. By blocking in this\n function we can run all baselines and an archive job at the same time instead\n of leveraging a more sophisticated dependency system.\n\n Args:\n task_name: Name of task to archive\n hparam_set_name: the name of the hparam set to archive.\n \"\"\"\n while True:\n r = maybe_archive_hparam_set(task_name, hparam_set_name)\n if r:\n logging.info(f\"Saved success! Wrote {hparam_set_name} {task_name}.\") # pylint: disable=logging-fstring-interpolation\n return\n else:\n logging.info(f\"Saved Failed! {hparam_set_name} {task_name}.\") # pylint: disable=logging-fstring-interpolation\n logging.info(\"Waiting 10 seconds and trying again.\")\n time.sleep(10)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError(\"Too many command-line arguments.\")\n unused_dir = setup_experiment.setup_experiment(make_dir=False)\n\n wait_until_ready_then_archive_task()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "numpy.asarray" ] ]
wull566/tensorflow_demo
[ "c2c45050867cb056b8193eb53466d26b80b0ec13" ]
[ "tutorials/2_tensorflow_old/numpy&pandas/17_merge.py" ]
[ "# View more 3_python 2_tensorflow_old on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n\"\"\"\nPlease note, this code is only for 3_python 3+. If you are using 3_python 2+, please modify the code accordingly.\n\"\"\"\nfrom __future__ import print_function\nimport pandas as pd\n\n# merging two df by key/keys. (may be used in database)\n# simple example\nleft = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']})\nright = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\nprint(left)\nprint(right)\nres = pd.merge(left, right, on='key')\nprint(res)\n\n# consider two keys\nleft = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],\n 'key2': ['K0', 'K1', 'K0', 'K1'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']})\nright = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],\n 'key2': ['K0', 'K0', 'K0', 'K0'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\nprint(left)\nprint(right)\nres = pd.merge(left, right, on=['key1', 'key2'], how='inner') # default for how='inner'\n# how = ['left', 'right', 'outer', 'inner']\nres = pd.merge(left, right, on=['key1', 'key2'], how='left')\nprint(res)\n\n# indicator\ndf1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']})\ndf2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]})\nprint(df1)\nprint(df2)\nres = pd.merge(df1, df2, on='col1', how='outer', indicator=True)\n# give the indicator a custom name\nres = pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column')\n\n\n# merged by index\nleft = pd.DataFrame({'A': ['A0', 'A1', 'A2'],\n 'B': ['B0', 'B1', 'B2']},\n index=['K0', 'K1', 'K2'])\nright = pd.DataFrame({'C': ['C0', 'C2', 'C3'],\n 'D': ['D0', 'D2', 'D3']},\n index=['K0', 'K2', 'K3'])\nprint(left)\nprint(right)\n# left_index and right_index\nres = pd.merge(left, right, left_index=True, right_index=True, how='outer')\nres = pd.merge(left, right, left_index=True, right_index=True, how='inner')\n\n# handle overlapping\nboys = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'age': [1, 2, 3]})\ngirls = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'age': [4, 5, 6]})\nres = pd.merge(boys, girls, on='k', suffixes=['_boy', '_girl'], how='inner')\nprint(res)\n\n# join function in pandas is similar with merge. If know merge, you will understand join\n" ]
[ [ "pandas.DataFrame", "pandas.merge" ] ]
dohmatob/adversarial-robustness-toolbox
[ "7d3ba7d2d6690be69c08754fbc632947c2d10a97" ]
[ "art/classifiers/classifier.py" ]
[ "# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements abstract base classes defining to properties for all classifiers.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport abc\nimport sys\n\nimport numpy as np\n\nfrom art.utils import check_and_transform_label_format\n\n# Ensure compatibility with Python 2 and 3 when using ABCMeta\nif sys.version_info >= (3, 4):\n ABC = abc.ABC\nelse:\n ABC = abc.ABCMeta(str('ABC'), (), {})\n\n\nclass Classifier(ABC):\n \"\"\"\n Base class defining the minimum classifier functionality and is required for all classifiers. A classifier of this\n type can be combined with black-box attacks.\n \"\"\"\n\n def __init__(self, clip_values=None, defences=None, preprocessing=None, **kwargs):\n \"\"\"\n Initialize a `Classifier` object.\n\n :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and\n maximum values allowed for features. If floats are provided, these will be used as the range of all\n features. If arrays are provided, each value will be considered the bound for a feature, thus\n the shape of clip values needs to match the total number of features.\n :type clip_values: `tuple`\n :param defences: Defence(s) to be activated with the classifier.\n :type defences: :class:`.Preprocessor` or `list(Preprocessor)` instances\n :param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be\n used for data preprocessing. The first value will be subtracted from the input. The input will then\n be divided by the second one.\n :type preprocessing: `tuple`\n \"\"\"\n from art.defences.preprocessor import Preprocessor\n\n self._clip_values = clip_values\n if clip_values is not None:\n if len(clip_values) != 2:\n raise ValueError('`clip_values` should be a tuple of 2 floats or arrays containing the allowed'\n 'data range.')\n if np.array(clip_values[0] >= clip_values[1]).any():\n raise ValueError('Invalid `clip_values`: min >= max.')\n\n if isinstance(defences, Preprocessor):\n self.defences = [defences]\n else:\n self.defences = defences\n\n if preprocessing is not None and len(preprocessing) != 2:\n raise ValueError('`preprocessing` should be a tuple of 2 floats with the values to subtract and divide'\n 'the model inputs.')\n self.preprocessing = preprocessing\n\n super().__init__(**kwargs)\n\n @abc.abstractmethod\n def predict(self, x, **kwargs): # lgtm [py/inheritance/incorrect-overridden-signature]\n \"\"\"\n Perform prediction of the classifier for input `x`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :type x: `np.ndarray`\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fit(self, x, y, **kwargs): # lgtm [py/inheritance/incorrect-overridden-signature]\n \"\"\"\n Fit the classifier using the training data `(x, y)`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :type x: `np.ndarray`\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,).\n :type y: `np.ndarray`\n :param kwargs: Dictionary of framework-specific arguments.\n :type kwargs: `dict`\n :return: `None`\n \"\"\"\n raise NotImplementedError\n\n @property\n def clip_values(self):\n \"\"\"\n :return: Tuple of form `(min, max)` containing the minimum and maximum values allowed for the input features.\n :rtype: `tuple`\n \"\"\"\n return self._clip_values\n\n @property\n def input_shape(self):\n \"\"\"\n Return the shape of one input.\n\n :return: Shape of one input for the classifier.\n :rtype: `tuple`\n \"\"\"\n return self._input_shape\n\n @abc.abstractmethod\n def nb_classes(self):\n \"\"\"\n Return the number of output classes.\n\n :return: Number of classes in the data.\n :rtype: `int`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def save(self, filename, path=None):\n \"\"\"\n Save a model to file specific to the backend framework.\n\n :param filename: Name of the file where to save the model.\n :type filename: `str`\n :param path: Path of the directory where to save the model. If no path is specified, the model will be stored in\n the default data location of ART at `ART_DATA_PATH`.\n :type path: `str`\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def _apply_preprocessing(self, x, y, fit):\n \"\"\"\n Apply all defences and preprocessing operations on the inputs `(x, y)`. This function has to be applied to all\n raw inputs (x, y) provided to the classifier.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param y: Target values (class labels), where first dimension is the number of samples.\n :type y: `np.ndarray` or `None`\n :param fit: `True` if the defences are applied during training.\n :return: Value of the data after applying the defences.\n :rtype: `np.ndarray`\n \"\"\"\n y = check_and_transform_label_format(y, self.nb_classes())\n x_preprocessed, y_preprocessed = self._apply_preprocessing_defences(x, y, fit=fit)\n x_preprocessed = self._apply_preprocessing_standardisation(x_preprocessed)\n return x_preprocessed, y_preprocessed\n\n def _apply_preprocessing_defences(self, x, y, fit=False):\n \"\"\"\n Apply all defences of the classifier on the raw inputs `(x, y)`. This function is intended to only be called\n from function `_apply_defences_and_preprocessing`.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param y: Target values (class labels), where first dimension is the number of samples.\n :type y: `np.ndarray`\n :param fit: `True` if the function is call before fit/training and `False` if the function is called before a\n predict operation\n :return: Arrays for `x` and `y` after applying the defences.\n :rtype: `np.ndarray`\n \"\"\"\n if self.defences is not None:\n for defence in self.defences:\n if fit:\n if defence.apply_fit:\n x, y = defence(x, y)\n else:\n if defence.apply_predict:\n x, y = defence(x, y)\n\n return x, y\n\n def _apply_preprocessing_standardisation(self, x):\n \"\"\"\n Apply standardisation to input data `x`.\n\n :param x: Input data, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :return: Array for `x` with the standardized data.\n :rtype: `np.ndarray`\n :raises: `TypeError`\n \"\"\"\n if x.dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:\n raise TypeError('The data type of input data `x` is {} and cannot represent negative values. Consider '\n 'changing the data type of the input data `x` to a type that supports negative values e.g. '\n 'np.float32.'.format(x.dtype))\n\n if self.preprocessing is not None:\n sub, div = self.preprocessing\n sub = np.asarray(sub, dtype=x.dtype)\n div = np.asarray(div, dtype=x.dtype)\n\n res = x - sub\n res = res / div\n\n else:\n res = x\n\n return res\n\n def __repr__(self):\n class_name = self.__class__.__name__\n attributes = {(k[1:], v) if k[0] == '_' else (k, v) for (k, v) in self.__dict__.items()}\n attributes = ['{}={}'.format(k, v) for (k, v) in attributes]\n repr_string = class_name + '(' + ', '.join(attributes) + ')'\n return repr_string\n\n\nclass ClassifierNeuralNetwork(ABC):\n \"\"\"\n Base class defining additional classifier functionality required for neural network classifiers. This base class\n has to be mixed in with class `Classifier` to extend the minimum classifier functionality.\n \"\"\"\n\n def __init__(self, channel_index=None, **kwargs):\n \"\"\"\n Initialize a `ClassifierNeuralNetwork` object.\n\n :param channel_index: Index of the axis in input (feature) array `x` representing the color channels.\n :type channel_index: `int`\n \"\"\"\n self._channel_index = channel_index\n super().__init__(**kwargs)\n\n @abc.abstractmethod\n def predict(self, x, batch_size=128, **kwargs):\n \"\"\"\n Perform prediction of the classifier for input `x`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :param batch_size: The batch size used for evaluating the classifer's `model`.\n :type batch_size: `int`\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs):\n \"\"\"\n Fit the classifier on the training set `(x, y)`.\n\n :param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2)\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,).\n :type y: `np.ndarray`\n :param batch_size: The batch size used for evaluating the classifer's `model`.\n :type batch_size: `int`\n :param nb_epochs: Number of epochs to use for training.\n :type nb_epochs: `int`\n :param kwargs: Dictionary of framework-specific arguments.\n :type kwargs: `dict`\n :return: `None`\n \"\"\"\n raise NotImplementedError\n\n def fit_generator(self, generator, nb_epochs=20, **kwargs):\n \"\"\"\n Fit the classifier using `generator` yielding training batches as specified. Framework implementations can\n provide framework-specific versions of this function to speed-up computation.\n\n :param generator: Batch generator providing `(x, y)` for each epoch.\n :type generator: :class:`.DataGenerator`\n :param nb_epochs: Number of epochs to use for training.\n :type nb_epochs: `int`\n :param kwargs: Dictionary of framework-specific arguments.\n :type kwargs: `dict`\n :return: `None`\n \"\"\"\n from art.data_generators import DataGenerator\n\n if not isinstance(generator, DataGenerator):\n raise ValueError('Expected instance of `DataGenerator` for `fit_generator`, got %s instead.'\n % str(type(generator)))\n\n for _ in range(nb_epochs):\n x, y = generator.get_batch()\n\n # Apply preprocessing and defences\n x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)\n\n # Fit for current batch\n self.fit(x_preprocessed, y_preprocessed, nb_epochs=1, batch_size=len(x), **kwargs)\n\n @property\n def channel_index(self):\n \"\"\"\n :return: Index of the axis in input data containing the color channels.\n :rtype: `int`\n \"\"\"\n return self._channel_index\n\n @property\n def learning_phase(self):\n \"\"\"\n Return the learning phase set by the user for the current classifier. Possible values are `True` for training,\n `False` for prediction and `None` if it has not been set through the library. In the latter case, the library\n does not do any explicit learning phase manipulation and the current value of the backend framework is used.\n If a value has been set by the user for this property, it will impact all following computations for\n model fitting, prediction and gradients.\n\n :return: Value of the learning phase.\n :rtype: `bool` or `None`\n \"\"\"\n return self._learning_phase if hasattr(self, '_learning_phase') else None\n\n @property\n def layer_names(self):\n \"\"\"\n Return the hidden layers in the model, if applicable.\n\n :return: The hidden layers in the model, input and output layers excluded.\n :rtype: `list`\n\n .. warning:: `layer_names` tries to infer the internal structure of the model.\n This feature comes with no guarantees on the correctness of the result.\n The intended order of the layers tries to match their order in the model, but this is not\n guaranteed either.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_activations(self, x, layer, batch_size):\n \"\"\"\n Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and\n `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by\n calling `layer_names`.\n\n :param x: Input for computing the activations.\n :type x: `np.ndarray`\n :param layer: Layer for computing the activations\n :type layer: `int` or `str`\n :param batch_size: Size of batches.\n :type batch_size: `int`\n :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def set_learning_phase(self, train):\n \"\"\"\n Set the learning phase for the backend framework.\n\n :param train: `True` if the learning phase is training, `False` if learning phase is not training.\n :type train: `bool`\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n name = self.__class__.__name__\n\n attributes = {(k[1:], v) if k[0] == '_' else (k, v) for (k, v) in self.__dict__.items()}\n attrs = ['{}={}'.format(k, v) for (k, v) in attributes]\n repr_ = name + '(' + ', '.join(attrs) + ')'\n\n return repr_\n\n\nclass ClassifierGradients(ABC):\n \"\"\"\n Base class defining additional classifier functionality for classifiers providing access to loss and class\n gradients. A classifier of this type can be combined with white-box attacks. This base class has to be mixed in with\n class `Classifier` and optionally class `ClassifierNeuralNetwork` to extend the minimum classifier functionality.\n \"\"\"\n\n @abc.abstractmethod\n def class_gradient(self, x, label=None, **kwargs):\n \"\"\"\n Compute per-class derivatives w.r.t. `x`.\n\n :param x: Input with shape as expected by the classifier's model.\n :type x: `np.ndarray`\n :param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class\n output is computed for all samples. If multiple values as provided, the first dimension should\n match the batch size of `x`, and each value will be used as target for its corresponding sample in\n `x`. If `None`, then gradients for all classes will be computed for each sample.\n :type label: `int` or `list`\n :return: Array of gradients of input features w.r.t. each class in the form\n `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes\n `(batch_size, 1, input_shape)` when `label` parameter is specified.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def loss_gradient(self, x, y, **kwargs):\n \"\"\"\n Compute the gradient of the loss function w.r.t. `x`.\n\n :param x: Input with shape as expected by the classifier's model.\n :type x: `np.ndarray`\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,).\n :type y: `np.ndarray`\n :return: Array of gradients of the same shape as `x`.\n :rtype: `np.ndarray`\n \"\"\"\n raise NotImplementedError\n\n def _apply_preprocessing_gradient(self, x, gradients):\n \"\"\"\n Apply the backward pass through all preprocessing operations to the gradients.\n\n Apply the backward pass through all preprocessing operations and defences on the inputs `(x, y)`. This function\n has to be applied to all gradients returned by the classifier.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param gradients: Input gradients.\n :type gradients: `np.ndarray`\n :return: Gradients after backward step through preprocessing operations and defences.\n :rtype: `np.ndarray`\n \"\"\"\n gradients = self._apply_preprocessing_normalization_gradient(gradients)\n gradients = self._apply_preprocessing_defences_gradient(x, gradients)\n return gradients\n\n def _apply_preprocessing_defences_gradient(self, x, gradients, fit=False):\n \"\"\"\n Apply the backward pass through the preprocessing defences.\n\n Apply the backward pass through all defences of the classifier on the gradients. This function is intended to\n only be called from function `_apply_preprocessing_gradient`.\n\n :param x: Features, where first dimension is the number of samples.\n :type x: `np.ndarray`\n :param gradients: Input gradient.\n :type gradients: `np.ndarray`\n :param fit: `True` if the gradient is computed during training.\n :return: Gradients after backward step through defences.\n :rtype: `np.ndarray`\n \"\"\"\n if self.defences is not None:\n for defence in self.defences[::-1]:\n if fit:\n if defence.apply_fit:\n gradients = defence.estimate_gradient(x, gradients)\n else:\n if defence.apply_predict:\n gradients = defence.estimate_gradient(x, gradients)\n\n return gradients\n\n def _apply_preprocessing_normalization_gradient(self, gradients):\n \"\"\"\n Apply the backward pass through standardisation of `x` to `gradients`.\n\n :param gradients: Input gradients.\n :type gradients: `np.ndarray`\n :return: Gradients after backward step through standardisation.\n :rtype: `np.ndarray\n \"\"\"\n _, div = self.preprocessing\n div = np.asarray(div, dtype=gradients.dtype)\n res = gradients / div\n return res\n\n\nclass ClassifierDecisionTree(ABC):\n \"\"\"\n Base class defining additional classifier functionality for decision-tree-based classifiers This base class has to\n be mixed in with class `Classifier` to extend the minimum classifier functionality.\n \"\"\"\n\n @abc.abstractmethod\n def get_trees(self):\n \"\"\"\n Get the decision trees.\n\n :return: A list of decision trees.\n :rtype: `[Tree]`\n \"\"\"\n raise NotImplementedError\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
kashif/spinningup-pytorch
[ "8f3389c239c94b3ff46453f359061ae30d851ce8" ]
[ "fireup/algos/ddpg/ddpg.py" ]
[ "import time\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom fireup.algos.ddpg import core\nfrom fireup.utils.logx import EpochLogger\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for DDPG agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(\n obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs],\n )\n\n\n\"\"\"\n\nDeep Deterministic Policy Gradient (DDPG)\n\n\"\"\"\n\n\ndef ddpg(\n env_fn,\n actor_critic=core.ActorCritic,\n ac_kwargs=dict(),\n seed=0,\n steps_per_epoch=5000,\n epochs=100,\n replay_size=int(1e6),\n gamma=0.99,\n polyak=0.995,\n pi_lr=1e-3,\n q_lr=1e-3,\n batch_size=100,\n start_steps=10000,\n act_noise=0.1,\n max_ep_len=1000,\n logger_kwargs=dict(),\n save_freq=1,\n):\n \"\"\"\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_critic: The agent's main model which takes some states ``x`` and\n and actions ``a`` and returns a tuple of:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``pi`` (batch, act_dim) | Deterministically computes actions\n | from policy given states.\n ``q`` (batch,) | Gives the current estimate of Q* for\n | states ``x`` and actions in\n | ``a``.\n ``q_pi`` (batch,) | Gives the composition of ``q`` and\n | ``pi`` for states in ``x``:\n | q(x, pi(x)).\n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the actor_critic\n class you provided to DDPG.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs)\n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target\n networks. Target networks are updated towards main networks\n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow\n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually\n close to 1.)\n\n pi_lr (float): Learning rate for policy.\n\n q_lr (float): Learning rate for Q-networks.\n\n batch_size (int): Minibatch size for SGD.\n\n start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n act_noise (float): Stddev for Gaussian exploration noise added to\n policy at training time. (At test time, no noise is added.)\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n \"\"\"\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n env, test_env = env_fn(), env_fn()\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs[\"action_space\"] = env.action_space\n\n # Main outputs from computation graph\n main = actor_critic(in_features=obs_dim, **ac_kwargs)\n\n # Target networks\n target = actor_critic(in_features=obs_dim, **ac_kwargs)\n target.eval()\n\n # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Count variables\n var_counts = tuple(\n core.count_vars(module) for module in [main.policy, main.q, main]\n )\n print(\"\\nNumber of parameters: \\t pi: %d, \\t q: %d, \\t total: %d\\n\" % var_counts)\n\n # Separate train ops for pi, q\n pi_optimizer = torch.optim.Adam(main.policy.parameters(), lr=pi_lr)\n q_optimizer = torch.optim.Adam(main.q.parameters(), lr=q_lr)\n\n # Initializing targets to match main variables\n target.load_state_dict(main.state_dict())\n\n def get_action(o, noise_scale):\n pi = main.policy(torch.Tensor(o.reshape(1, -1)))\n a = pi.detach().numpy()[0] + noise_scale * np.random.randn(act_dim)\n return np.clip(a, -act_limit, act_limit)\n\n def test_agent(n=10):\n for _ in range(n):\n o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0\n while not (d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time (noise_scale=0)\n o, r, d, _ = test_env.step(get_action(o, 0))\n ep_ret += r\n ep_len += 1\n logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)\n\n start_time = time.time()\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n total_steps = steps_per_epoch * epochs\n\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n main.eval()\n \"\"\"\n Until start_steps have elapsed, randomly sample actions\n from a uniform distribution for better exploration. Afterwards,\n use the learned policy (with some noise, via act_noise).\n \"\"\"\n if t > start_steps:\n a = get_action(o, act_noise)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len == max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update\n # most recent observation!\n o = o2\n\n if d or (ep_len == max_ep_len):\n main.train()\n \"\"\"\n Perform all DDPG updates at the end of the trajectory,\n in accordance with tuning done by TD3 paper authors.\n \"\"\"\n for _ in range(ep_len):\n batch = replay_buffer.sample_batch(batch_size)\n (obs1, obs2, acts, rews, done) = (\n torch.Tensor(batch[\"obs1\"]),\n torch.Tensor(batch[\"obs2\"]),\n torch.Tensor(batch[\"acts\"]),\n torch.Tensor(batch[\"rews\"]),\n torch.Tensor(batch[\"done\"]),\n )\n _, _, q_pi_targ = target(obs2, acts)\n\n # Bellman backup for Q function\n backup = (rews + gamma * (1 - done) * q_pi_targ).detach()\n\n # DDPG Q loss\n _, q, _ = main(obs1, acts)\n q_loss = F.mse_loss(q, backup)\n\n # Q-learning update\n q_optimizer.zero_grad()\n q_loss.backward()\n q_optimizer.step()\n logger.store(LossQ=q_loss.item(), QVals=q.data.numpy())\n\n # DDPG Policy loss\n _, _, q_pi = main(obs1, acts)\n pi_loss = -q_pi.mean()\n\n # Policy update\n pi_optimizer.zero_grad()\n pi_loss.backward()\n pi_optimizer.step()\n logger.store(LossPi=pi_loss.item())\n\n # Polyak averaging for target parameters\n for p_main, p_target in zip(main.parameters(), target.parameters()):\n p_target.data.copy_(\n polyak * p_target.data + (1 - polyak) * p_main.data\n )\n\n logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n # End of epoch wrap-up\n if t > 0 and t % steps_per_epoch == 0:\n epoch = t // steps_per_epoch\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs - 1):\n logger.save_state({\"env\": env}, main, None)\n\n # Test the performance of the deterministic version of the agent.\n test_agent()\n\n # Log info about epoch\n logger.log_tabular(\"Epoch\", epoch)\n logger.log_tabular(\"EpRet\", with_min_and_max=True)\n logger.log_tabular(\"TestEpRet\", with_min_and_max=True)\n logger.log_tabular(\"EpLen\", average_only=True)\n logger.log_tabular(\"TestEpLen\", average_only=True)\n logger.log_tabular(\"TotalEnvInteracts\", t)\n logger.log_tabular(\"QVals\", with_min_and_max=True)\n logger.log_tabular(\"LossPi\", average_only=True)\n logger.log_tabular(\"LossQ\", average_only=True)\n logger.log_tabular(\"Time\", time.time() - start_time)\n logger.dump_tabular()\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--env\", type=str, default=\"HalfCheetah-v2\")\n parser.add_argument(\"--hid\", type=int, default=300)\n parser.add_argument(\"--l\", type=int, default=1)\n parser.add_argument(\"--gamma\", type=float, default=0.99)\n parser.add_argument(\"--seed\", \"-s\", type=int, default=0)\n parser.add_argument(\"--epochs\", type=int, default=50)\n parser.add_argument(\"--exp_name\", type=str, default=\"ddpg\")\n args = parser.parse_args()\n\n from fireup.utils.run_utils import setup_logger_kwargs\n\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n ddpg(\n lambda: gym.make(args.env),\n actor_critic=core.ActorCritic,\n ac_kwargs=dict(hidden_sizes=[args.hid] * args.l),\n gamma=args.gamma,\n seed=args.seed,\n epochs=args.epochs,\n logger_kwargs=logger_kwargs,\n )\n" ]
[ [ "torch.nn.functional.mse_loss", "numpy.zeros", "torch.manual_seed", "numpy.random.seed", "numpy.random.randn", "numpy.clip", "numpy.random.randint", "torch.Tensor" ] ]
marinarierav-uab/foveabox
[ "1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5" ]
[ "tools/challenge_validation.py" ]
[ "import os\n\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom scipy.ndimage.measurements import label\n\n\ndef calculate_average_classif_results(results_dict: dict, thresholds, output_file):\n avg = pd.DataFrame(columns=[\"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", \"Accuracy\", \"Conf\"])\n for threshold in thresholds:\n # TP, FP, FN, TN, RT\n results = [0, 0, 0, 0]\n mean_conf = 0\n for vid, res_dict in results_dict.items(): # for each video\n results = [res + new for res, new in zip(results, res_dict[threshold][:4])]\n\n mean_conf += res_dict[threshold][-1]\n\n # switched values fn <-> tn, as requested by J.B.\n tp, fp, tn, fn = results[0], results[1], results[2], results[3]\n\n try:\n acc = (tp + tn) / (tp + fp + fn + tn)\n except:\n acc = -1\n try:\n mean_conf /= len(results_dict.items())\n except:\n mean_conf = 0\n\n # switched values fn <-> tn, as requested by J.B.\n row = [threshold, tp, fp, tn, fn, acc, mean_conf]\n avg.loc[-1] = row\n\n avg.index += 1\n avg.sort_index()\n avg.reset_index(inplace=True, drop=True)\n\n print(avg)\n\n avg.to_csv(output_file)\n\n\ndef calculate_average_results(results_dict: dict, thresholds, output_file):\n avg = pd.DataFrame(columns=[\"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", 'Accuracy', \"Precision\", \"Recall\", \"Specificity\", \"F1\", \"F2\", \"Mean RT\"])\n for threshold in thresholds:\n # TP, FP, FN, TN, RT\n results = [0, 0, 0, 0]\n sums = [0, 0, 0, 0]\n srt = 0\n drt = 1e-7\n for vid, res_dict in results_dict.items(): # for each video\n results = [res + new for res, new in zip(results, res_dict[threshold][:-1])]\n #sums = [val + new for val, new in zip(sums, results)]\n #print(res_dict[threshold][:-1])\n #print(sum)\n #print(results)\n srt = srt + res_dict[threshold][-1] if res_dict[threshold][-1] != -1 else srt\n drt = drt + 1 if res_dict[threshold][-1] != -1 else drt\n\n # switched values fn <-> tn, as requested by J.B.\n tp, fp, tn, fn = results[0], results[1], results[2], results[3]\n\n try:\n acc = (tp + tn) / (tp + fp + fn + tn)\n except:\n acc = -1\n try:\n pre = tp / (tp + fp)\n except:\n pre = -1\n try:\n rec = tp / (tp + fn)\n except:\n rec = -1\n try:\n spec = tn / (fp + tn)\n except:\n spec = -1\n try:\n mean_rt = srt / drt\n except:\n mean_rt = -1\n\n f1 = (2*pre*rec) / (pre+rec)\n f2 = (5*pre*rec) / ( (4*pre) + rec)\n\n # switched values fn <-> tn, as requested by J.B.\n row = [threshold, tp, fp, tn, fn, acc, pre, rec, spec, f1, f2, mean_rt]\n avg.loc[-1] = row\n\n avg.index += 1\n avg.sort_index()\n avg.reset_index(inplace=True, drop=True)\n\n print(avg)\n\n avg.to_csv(output_file)\n\n\ndef save_detection_plot(output_folder, threshold, vid_folder, video_gt, video_pred):\n title = \"Video: {} - threshold: {}\".format(vid_folder.split(\"/\")[-1], threshold)\n plt.title(title)\n plt.plot(video_gt, color='blue')\n plt.plot(video_pred, color='gold')\n plt.savefig(os.path.join(output_folder, \"detect_plot-{}-{}.png\".format(vid_folder.split(\"/\")[-1], threshold)))\n plt.clf()\n\n\ndef process_video_for_detection(file, has_confidence, thresh, vid_folder):\n video_len = len(os.listdir(vid_folder)) + 1\n video_gt = np.zeros((video_len, 1))\n video_pred = np.zeros((video_len, 1))\n\n first_polyp = -1\n first_detected_polyp = -1\n\n tp, fp, fn, tn = 0, 0, 0, 0\n for frame in sorted(os.listdir(vid_folder)):\n\n polyp_n = int(frame.split(\"_\")[0].split(\"-\")[1])\n im_frame = Image.open(os.path.join(vid_folder, frame))\n is_polyp = np.asarray(im_frame).sum() > 0\n video_gt[polyp_n] = 1.1 if is_polyp else 0\n\n if is_polyp and first_polyp == -1:\n first_polyp = polyp_n\n\n frame_output = file.loc[file[0] == polyp_n]\n if has_confidence:\n frame_output = frame_output.loc[frame_output[2] >= thresh]\n\n if frame_output.empty:\n if is_polyp:\n fn += 1\n else:\n tn += 1\n else:\n pred_out = frame_output[1].tolist()[0]\n if pred_out:\n if is_polyp:\n tp += 1\n if first_detected_polyp == -1:\n first_detected_polyp = polyp_n\n else:\n fp += 1\n else:\n if is_polyp:\n fn += 1\n else:\n tn += 1\n\n video_pred[polyp_n] = 0.9\n\n rt = first_detected_polyp - first_polyp if first_detected_polyp != -1 else -1\n\n # switched values fn <-> tn, as requested by J.B.\n return [tp, fp, tn, fn, rt], video_gt, video_pred\n\n\ndef process_video_for_localization(file, has_confidence, threshold, vid_folder):\n tp, fp, tn, fn = 0, 0, 0, 0\n histo_tp, histo_fp, histo_tn, histo_fn = 0, 0, 0, 0\n\n # HISTOLOGIAS DE VIDEOS DE TEST (eventually should be loaded from file)\n no_adenomas = [2, 16]\n\n first_polyp = -1\n first_detected_polyp = -1\n i = 0\n\n vid_n = int(vid_folder.split('/')[-1])\n histologia_real = 0 if (vid_n in no_adenomas) else 1\n\n for frame in sorted(os.listdir(vid_folder)):\n i+=1\n\n #print(\"frame\", i)\n polyp_n = int(frame.split(\"_\")[0].split(\"-\")[1])\n im_frame = Image.open(os.path.join(vid_folder, frame))\n im_frame_np = np.asarray(im_frame, dtype=int)\n is_polyp = im_frame_np.sum() > 0\n\n # 8-connected\n kernel = np.array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n labeled_frame, max_polyp = label(im_frame, structure=kernel)\n\n if is_polyp and first_polyp == -1:\n first_polyp = polyp_n\n frame_output = file.loc[file[0] == polyp_n]\n if has_confidence:\n frame_output = frame_output.loc[frame_output[3] >= threshold]\n\n #if i>35:\n # break\n #print(frame)\n\n if frame_output.empty:\n if is_polyp:\n fn += max_polyp\n else:\n tn += 1\n else:\n already_detected = []\n\n for detection_row in frame_output.iterrows():\n detection = detection_row[1]\n frame_pred = True\n centroid_x = int(detection[1])\n centroid_y = int(detection[2])\n\n #print(\"Detection:\",centroid_x, centroid_y)\n #print(im_frame_np[centroid_y-5:centroid_y+5, centroid_x-5:centroid_x+5])\n\n if frame_pred:\n if is_polyp:\n if im_frame_np[centroid_y, centroid_x] != 0:\n if labeled_frame[centroid_y, centroid_x] not in already_detected:\n tp += 1\n already_detected += [labeled_frame[centroid_y, centroid_x]]\n\n if first_detected_polyp == -1:\n first_detected_polyp = polyp_n\n\n # HISTOLOGIAS:\n histologia_red = int(detection[4])\n\n if (histologia_red == 0) and (histologia_real == 0):\n histo_tn += 1\n elif (histologia_red == 0) and (histologia_real == 1):\n histo_fn += 1\n elif (histologia_red == 1) and (histologia_real == 0):\n histo_fp += 1\n elif (histologia_red == 1) and (histologia_real == 1):\n histo_tp += 1\n else:\n fp += 1\n else:\n fp += 1\n else:\n if not is_polyp:\n tn += 1\n\n detected_in_frame = len(set(already_detected))\n fn += (max_polyp - detected_in_frame)\n\n rt = first_detected_polyp - first_polyp if first_detected_polyp != -1 else -1\n\n positives = histo_fp + histo_tp\n negatives = histo_fn + histo_tn\n pred_histo = 1 if positives >= negatives else 0\n if(positives+negatives) == 0:\n conf = 0\n acc = 0\n else:\n conf = positives/(positives+negatives) if positives >= negatives else negatives/(positives+negatives)\n acc = (histo_tp + histo_tn) / (positives + negatives)\n\n # switched values fn <-> tn, as requested by J.B.\n return [tp, fp, tn, fn, rt], [histo_tp, histo_fp, histo_tn, histo_fn, acc, histologia_real, pred_histo, conf]\n\n\ndef generate_results_per_video(videos, confidences, thresholds, gt):\n detect_dict = {}\n local_dict = {}\n classif_dict = {}\n for threshold in thresholds:\n # TODO change plots\n res_detection, _, _ = process_video_for_detection(videos[0], confidences[0], threshold, gt)\n res_localization, res_classif = process_video_for_localization(videos[1], confidences[1], threshold, gt)\n print(\" -thr\",threshold, \"done...\")\n\n detect_dict[threshold] = res_detection\n local_dict[threshold] = res_localization\n classif_dict[threshold] = res_classif\n return detect_dict, local_dict, classif_dict\n\n\ndef do_giana_eval(folder_detection, folder_localization, folder_gt, root_folder_output, team, thr=0, series=False):\n\n # DEBUGGING !!!!!\n nvids = 18 # should be 18\n\n folder_output_detection = os.path.join(root_folder_output, \"Detection/\"+team)\n folder_output_localization = os.path.join(root_folder_output, \"Localization/\"+team)\n folder_output_classif = os.path.join(root_folder_output, \"Classif/\"+team)\n average_detection_output_file = os.path.join(folder_output_detection, \"average.csv\")\n average_localization_output_file = os.path.join(folder_output_localization, \"average.csv\")\n average_classif_output_file = os.path.join(folder_output_classif, \"average.csv\")\n\n if series:\n thresholds = [x / 10 for x in range(1, 10)]\n elif thr!=0:\n thresholds = [thr]\n else:\n thresholds = [0]\n\n if not os.path.exists(folder_output_detection):\n os.makedirs(folder_output_detection)\n if not os.path.exists(folder_output_localization):\n os.makedirs(folder_output_localization)\n if not os.path.exists(folder_output_classif):\n os.makedirs(folder_output_classif)\n\n files_detection = sorted(os.listdir(folder_detection))[0:nvids]\n files_localization = sorted(os.listdir(folder_localization))[0:nvids]\n\n results_detection = {}\n results_localization = {}\n results_classif = {}\n\n # for each video:\n for detection, localization in zip(files_detection, files_localization):\n\n detection_csv = os.path.join(folder_detection, detection)\n detection_df = pd.read_csv(detection_csv, header=None)\n detection_confidence = detection_df.shape[1] > 2\n\n localization_csv = os.path.join(folder_localization, localization)\n localization_df = pd.read_csv(localization_csv, header=None)\n localization_confidence = localization_df.shape[1] > 3\n\n # both named the same\n vid_name = localization_csv.split(\"/\")[-1].split(\".\")[0]\n gt_vid_folder = os.path.join(folder_gt, str(int(vid_name)))\n print('Processing video', vid_name, \"...\")\n res_detection, res_localization, res_classif = generate_results_per_video((detection_df, localization_df),\n (detection_confidence, localization_confidence),\n thresholds, gt_vid_folder)\n\n pd.DataFrame.from_dict(res_detection, columns=[\"TP\", \"FP\", \"TN\", \"FN\", \"RT\"], orient='index').to_csv(\n os.path.join(folder_output_detection, \"d{}.csv\".format(vid_name)))\n results_detection[vid_name] = res_detection\n\n pd.DataFrame.from_dict(res_localization, columns=[\"TP\", \"FP\", \"TN\", \"FN\", \"RT\"], orient='index').to_csv(\n os.path.join(folder_output_localization, \"l{}.csv\".format(vid_name)))\n results_localization[vid_name] = res_localization\n\n pd.DataFrame.from_dict(res_classif, columns=[\"TP\", \"FP\", \"TN\", \"FN\", \"Acc\", \"Histo-real\", \"Histo-pred\", \"Conf\"], orient='index').to_csv(\n os.path.join(folder_output_classif, \"l{}.csv\".format(vid_name)))\n results_classif[vid_name] = res_classif\n\n calculate_average_results(results_detection, thresholds, average_detection_output_file)\n calculate_average_results(results_localization, thresholds, average_localization_output_file)\n calculate_average_classif_results(results_classif, thresholds, average_classif_output_file)\n\n #nvids = len(results_detection)\n\n global_detection_list = np.zeros([nvids*len(thresholds), 7])\n global_localization_list = np.zeros([nvids*len(thresholds), 7])\n global_classif_list = np.zeros([nvids*len(thresholds), 10])\n\n i=0;\n j=0;\n k=0;\n for vidname in sorted(results_detection.keys()):\n\n vid = int(vidname)\n\n for key, vals in results_detection[vidname].items():\n\n global_detection_list[i, :] = ([vid] + [key] + vals)\n i += 1\n\n #print(np.around(global_detection_list, decimals=4))\n\n for key, vals in results_localization[vidname].items():\n global_localization_list[j, :] = ([vid] + [key] + vals)\n j += 1\n\n for key, vals in results_classif[vidname].items():\n global_classif_list[k, :] = ([vid] + [key] + vals)\n k += 1\n\n\n #print(\"\")\n\n columns = [\"Video\", \"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", \"RT\"]\n detframe = pd.DataFrame(global_detection_list, columns=columns)\n locframe = pd.DataFrame(global_localization_list, columns=columns)\n classifframe = pd.DataFrame(global_classif_list, columns=[\"Video\", \"Thr\", \"TP\", \"FP\", \"TN\", \"FN\", \"Acc\", \"Histo-real\", \"Histo-pred\", \"Conf\"])\n\n print(\"\")\n\n detframe.to_csv(os.path.join(folder_output_detection, \"detection.csv\"))\n locframe.to_csv(os.path.join(folder_output_localization, \"localization.csv\"))\n classifframe.to_csv(os.path.join(folder_output_classif, \"classification.csv\"))\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n ap = ArgumentParser()\n ap.add_argument(\"--res\", \"--results_root\", type=str, default='results')\n ap.add_argument(\"--thr\", \"--threshold\", type=float, default=0)\n ap.add_argument(\"--team\", \"--team\", type=str, required=True)\n ap.add_argument(\"--out\", \"--output_folder\", type=str, default=None)\n ap.add_argument(\"--list\", action='store_true', help=\"threshold series\")\n\n params = ap.parse_args()\n team = params.team.split('.bbox.json')[0].split('json/')[-1]\n\n folder_detection = os.path.join(params.res, \"Detection\")\n folder_detection = os.path.join(folder_detection, team)\n folder_localization = os.path.join(params.res, \"Localization\")\n folder_localization = os.path.join(folder_localization, team)\n output_folder = params.out\n\n if output_folder is None:\n output_folder = os.path.join(params.res, \"results_giana\")\n folder_gt = \"/home/marina/Downloads/DATASETS/cvcvideoclinicdbtest/masks/\"\n\n do_giana_eval(folder_detection, folder_localization, folder_gt, output_folder, team, params.thr, params.list)\n" ]
[ [ "numpy.zeros", "pandas.read_csv", "pandas.DataFrame", "numpy.asarray", "matplotlib.pyplot.clf", "matplotlib.pyplot.title", "numpy.array", "matplotlib.pyplot.plot", "scipy.ndimage.measurements.label", "pandas.DataFrame.from_dict" ] ]
dorlivne/PoPS
[ "088425d1a40a4c2e6856b07744281cd8ab9bce3b" ]
[ "Pacman/processor.py" ]
[ "import numpy as np\nfrom PIL import Image\nfrom copy import deepcopy\nINPUT_SHAPE = (84, 84)\n\n\ndef init_state():\n # return np.zeros((84, 84, 4))\n return np.zeros((4, 84, 84))\n\ndef append_frame(state, frame):\n # new_state = deepcopy(state)\n # new_state[:, :, :-1] = state[:, :, 1:]\n # new_state[:, :, -1] = frame\n new_state = deepcopy(state)\n new_state[:-1, :, :, ] = state[1:, :, :]\n new_state[-1, :, :] = frame\n del state\n return new_state\n\n\ndef process_observation(observation):\n assert observation.ndim == 3\n img = Image.fromarray(observation)\n img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale\n processed_observation = np.array(img)\n assert processed_observation.shape == INPUT_SHAPE\n return processed_observation.astype('float32') / 255. # saves storage in experience memory\n\n\ndef process_state_batch(batch):\n return np.asarray(batch).astype('float32') / 255.\n\n\ndef clip_rewards(reward):\n return np.clip(reward, -1, 1)\n" ]
[ [ "numpy.array", "numpy.clip", "numpy.asarray", "numpy.zeros" ] ]
kim-com/tensorflow
[ "4301e3f34b8da528c58bdafe05cd66c8a55fce9e" ]
[ "tensorflow/python/eager/function_cache.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Cache to manage concrete functions and their signatures.\"\"\"\n\nimport collections\nfrom typing import Optional, Sequence, Tuple\n\nfrom tensorflow.core.function import trace_type\nfrom tensorflow.core.function.polymorphism import type_dispatch\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import func_graph as func_graph_module\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.types import trace\nfrom tensorflow.python.util import memory\n\n# TODO(b/182990542): Enable and remove flag when stable.\nDELETE_WITH_WEAKREF = False\n\nExecutionContext = collections.namedtuple(\"ExecutionContext\", [\n \"parent_graph\",\n \"device_functions\",\n \"colocation_stack\",\n \"in_cross_replica_context\",\n \"variable_policy\",\n \"xla_context_id\",\n])\n\n\nclass FunctionCacheKey(trace.TraceType):\n \"\"\"The unique key associated with a concrete function.\n\n Attributes:\n function_signature: A TraceType corresponding to the function arguments.\n call_context: The ExecutionContext for when the function_signature was\n generated.\n \"\"\"\n\n def __init__(self, function_signature: trace.TraceType,\n call_context: ExecutionContext):\n self.function_signature = function_signature\n self.call_context = call_context\n\n def is_subtype_of(self, other: trace.TraceType) -> bool:\n if not isinstance(other, FunctionCacheKey):\n return False\n\n if self.call_context != other.call_context:\n return False\n\n return self.function_signature.is_subtype_of(other.function_signature)\n\n def most_specific_common_supertype(\n self, others: Sequence[trace.TraceType]) -> Optional[\"FunctionCacheKey\"]:\n if not all(\n isinstance(other, FunctionCacheKey) and\n self.call_context == other.call_context for other in others):\n return None\n\n common = self.function_signature.most_specific_common_supertype(\n [other.function_signature for other in others])\n\n if common is None:\n return None\n\n return FunctionCacheKey(common, self.call_context)\n\n def __hash__(self) -> int:\n return hash((self.call_context, self.function_signature))\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, trace.TraceType):\n return NotImplemented\n\n if not isinstance(other, FunctionCacheKey):\n return False\n\n return (self.call_context == other.call_context and\n self.function_signature == other.function_signature)\n\n def __repr__(self) -> str:\n return (\n f\"{type(self).__name__}(function_signature={repr(self.function_signature)},\"\n f\" call_context={repr(self.call_context)})\")\n\n\nclass FunctionCache:\n \"\"\"A container for managing concrete functions.\"\"\"\n\n __slots__ = [\n \"_missed\", \"_primary\", \"_dispatch_table\", \"arg_relaxed_specs\",\n \"arg_relaxed\", \"_garbage_collectors\"\n ]\n\n def __init__(self):\n # The set of functions that have been missed; entries are ExecutionContext.\n self._missed = set()\n # The primary cache, mapping FunctionCacheKey to a concrete function.\n self._primary = collections.OrderedDict()\n\n # Maps a FunctionCacheKey K to a FunctionCacheKey V such that it is safe\n # to dispatch K to the concrete function of V that exists in _primary.\n # Used to lookup posible concrete functions when K is not in _primary.\n self._dispatch_table = type_dispatch.TypeDispatchTable()\n\n # TODO(b/202430155): Incorporate relaxation logic inside FunctionCache.\n # A cache key lookup, mapping a cache key generated without shape info to a\n # flat list of `TypeSpec`s with relaxed shapes (one for each flattened\n # argument). Arguments that are not Tensors or `CompositeTensor`s contain a\n # `None` for the corresponding relaxed spec.\n self.arg_relaxed_specs = collections.OrderedDict()\n # The secondary cache, mapping a cache key generated without shape info to a\n # function.\n self.arg_relaxed = collections.OrderedDict()\n # All OrderedDicts require manual garbage collection.\n\n self._garbage_collectors = [\n _FunctionGarbageCollector(self._primary),\n _FunctionGarbageCollector(self.arg_relaxed),\n _FunctionGarbageCollector(self.arg_relaxed_specs)\n ]\n\n # Note: Instead of returning any viable function, we can return the most\n # specfic one by maintaining trees of traces where children are more specific\n # traces of their parents.\n def lookup(self, key: FunctionCacheKey, use_function_subtyping: bool):\n \"\"\"Looks up a concrete function based on the key.\"\"\"\n if not use_function_subtyping:\n return self._primary.get(key, None)\n\n dispatch_key = self._dispatch_table.dispatch(key)\n if dispatch_key is not None:\n return self._primary[dispatch_key]\n\n return None\n\n def delete(self, key: FunctionCacheKey):\n \"\"\"Deletes a concrete function given the key it was added with.\"\"\"\n if key not in self._primary:\n return False\n\n del self._primary[key]\n self._dispatch_table.delete(key)\n\n return True\n\n def add(self, key: FunctionCacheKey,\n deletion_observer: trace_type.WeakrefDeletionObserver,\n concrete):\n \"\"\"Adds a new concrete function alongside its key.\n\n Args:\n key: A FunctionCacheKey object corresponding to the provided `concrete`.\n deletion_observer: A WeakrefDeletionObserver object for the `key`.\n concrete: The concrete function to be added to the cache.\n \"\"\"\n self._primary[key] = concrete\n self._dispatch_table.add_target(key)\n deletion_observer.add_listener(\n lambda: self.delete(key) if DELETE_WITH_WEAKREF else None)\n\n # TODO(b/205971333): Remove this function.\n def clear(self):\n \"\"\"Removes all concrete functions from the cache.\"\"\"\n self._primary.clear()\n self._dispatch_table.clear()\n self.arg_relaxed_specs.clear()\n self.arg_relaxed.clear()\n\n def values(self):\n \"\"\"Returns a list of all `ConcreteFunction` instances held by this cache.\"\"\"\n # We need to simultaneously make sure our returned concrete functions are\n # unique *and* make sure they are returned in a deterministic order for\n # serialization.\n #\n # TODO(b/174215821): It's likely that we ultimately would just prefer to\n # choose the most specific concrete function shape given a set of\n # arguments. If and when that is implemented, this logic can be revisited.\n primary_functions = set(self._primary.values())\n return list(self._primary.values()) + [\n v for v in self.arg_relaxed.values() if v not in primary_functions\n ]\n\n def has_call_context(self, call_context: ExecutionContext) -> bool:\n \"\"\"Checks if an ExcutionContext was observed.\"\"\"\n return call_context in self._missed\n\n def add_call_context(self, call_context: ExecutionContext) -> None:\n \"\"\"Adds a new ExcutionContext observation.\"\"\"\n self._missed.add(call_context)\n\n\nclass _FunctionGarbageCollector(object):\n \"\"\"Cleans up cycles when a defun goes out of scope.\"\"\"\n\n __slots__ = [\"_cache\"]\n\n def __init__(self, cache):\n self._cache = cache\n\n def __del__(self):\n if func_graph_module is None or memory is None:\n return\n try:\n while self._cache:\n self._cache.popitem()\n memory.dismantle_ordered_dict(self._cache)\n except: # pylint: disable=bare-except\n pass\n\n\ndef make_cache_key(\n args,\n include_tensor_ranks_only: bool = False\n) -> Tuple[FunctionCacheKey, trace_type.WeakrefDeletionObserver]:\n \"\"\"Computes the cache key given the function arguments.\"\"\"\n signature_context = trace_type.SignatureContext(\n include_tensor_ranks_only)\n function_signature = trace_type.make_function_signature(\n args, signature_context)\n return FunctionCacheKey(\n function_signature,\n _make_execution_context()), signature_context.deletion_observer\n\n\ndef _make_execution_context() -> ExecutionContext:\n \"\"\"Generates an ExecutionContext based on current contextual info.\"\"\"\n ctx = context.context()\n\n # Don't need to open an init_scope if the _cache_key call is in eager mode\n # already.\n executing_eagerly = ctx.executing_eagerly()\n parent_graph = None\n xla_context_id = 0\n if not executing_eagerly:\n # We want to force function retracing for each different\n # XLAControlFlowContext, so add `xla_context_id` to the cache key.\n xla_context = _enclosing_xla_context()\n if xla_context is not None and xla_context.RequiresUniqueFunctionRetracing(\n ):\n xla_context_id = id(xla_context)\n\n with ops.init_scope():\n # The graph, or whether we're executing eagerly, should be a part of the\n # cache key so we don't improperly capture tensors such as variables.\n executing_eagerly = ctx.executing_eagerly()\n parent_graph = None if executing_eagerly else ops.get_default_graph()\n\n # pylint: disable=protected-access\n default_graph = ops.get_default_graph()\n # TODO(b/117617952): The current distribution strategy will affect graph\n # building (e.g. accessing different variables from different devices) and\n # so requires retracing for each device.\n strategy_stack = default_graph._distribution_strategy_stack\n uses_distribution_strategy = (\n strategy_stack and\n strategy_stack[-1].strategy.extended._retrace_functions_for_each_device)\n if executing_eagerly:\n colocation_stack = ()\n if uses_distribution_strategy:\n device_functions = (pydev.merge_device(ctx.device_name),)\n else:\n device_functions = ()\n else:\n colocation_stack = tuple(default_graph._colocation_stack.peek_objs())\n if (uses_distribution_strategy or\n func_graph_module.device_stack_has_callable(\n default_graph._device_function_stack)):\n # Putting the device in the cache key ensures that call-site device\n # annotations are respected.\n device_functions = tuple(default_graph._device_functions_outer_to_inner)\n else:\n device_functions = ()\n\n in_cross_replica_context = False\n try:\n in_cross_replica_context = (strategy_stack[-1].replica_context is None) # pylint: disable=protected-access\n except (AttributeError, IndexError):\n pass\n\n if save_context.in_save_context():\n variable_policy = (\n save_context.get_save_options().experimental_variable_policy)\n else:\n variable_policy = None\n\n return ExecutionContext(parent_graph, device_functions, colocation_stack,\n in_cross_replica_context, variable_policy,\n xla_context_id)\n\n\ndef _enclosing_xla_context():\n \"\"\"Returns the XLAControlFlowContext, which exists inside a tpu.rewrite().\"\"\"\n graph = ops.get_default_graph()\n while graph is not None:\n # pylint: disable=protected-access\n context_ = graph._get_control_flow_context()\n # pylint: enable=protected-access\n while context_ is not None:\n if isinstance(context_, control_flow_ops.XLAControlFlowContext):\n return context_\n context_ = context_.outer_context\n # This may be a FuncGraph due to defuns or v2 control flow. We need to\n # find the original graph with the XLAControlFlowContext.\n graph = getattr(graph, \"outer_graph\", None)\n return None\n" ]
[ [ "tensorflow.core.function.trace_type.SignatureContext", "tensorflow.python.framework.func_graph.device_stack_has_callable", "tensorflow.python.util.memory.dismantle_ordered_dict", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.saved_model.save_context.get_save_options", "tensorflow.core.function.polymorphism.type_dispatch.TypeDispatchTable", "tensorflow.python.eager.context.context", "tensorflow.python.framework.device.merge_device", "tensorflow.python.saved_model.save_context.in_save_context", "tensorflow.core.function.trace_type.make_function_signature", "tensorflow.python.framework.ops.init_scope" ] ]
skye/flax
[ "23a91dbc27dd182e26f196546468d33238ca5735" ]
[ "examples/lm1b/train.py" ]
[ "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Language Modeling example.\n\nThis script trains a Transformer on the lm1b dataset.\nThe data is loaded using tensorflow_datasets.\n\"\"\"\n\nimport functools\nimport itertools\nimport os\nimport time\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom flax import jax_utils\nfrom flax import nn\nfrom flax import optim\nimport decode\nimport input_pipeline\nimport models\nfrom flax.metrics import tensorboard\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\nimport jax\nfrom jax import random\nimport jax.nn\nimport jax.numpy as jnp\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'model_dir', default=None,\n help='Directory to store model data.')\n\nflags.DEFINE_string(\n 'data_dir', default=None,\n help='Directory containing TFDS lm1b/subwords32k dataset.')\n\nflags.DEFINE_integer(\n 'batch_size', default=2048,\n help='Batch size for training.')\n\nflags.DEFINE_integer(\n 'eval_frequency', default=1000,\n help='Frequency of eval during training, e.g. every 1000 steps.')\n\nflags.DEFINE_integer(\n 'num_train_steps', default=500000,\n help='Number of training steps.')\n\nflags.DEFINE_integer(\n 'num_eval_steps', default=20,\n help='Number of evaluation steps. If -1 use the whole evaluation set.')\n\nflags.DEFINE_float(\n 'learning_rate', default=0.05,\n help='Learning rate.')\n\nflags.DEFINE_float(\n 'weight_decay', default=1e-1,\n help='Decay factor for AdamW-style weight decay.')\n\nflags.DEFINE_integer(\n 'max_target_length', default=512,\n help='Maximum length of training examples.')\n\nflags.DEFINE_integer(\n 'max_eval_target_length', default=2048,\n help='Maximum length of eval examples.')\n\nflags.DEFINE_float(\n 'sampling_temperature', default=0.6,\n help='Sampling temperature for language model inference.')\n\nflags.DEFINE_integer(\n 'sampling_top_k', default=20,\n help='Top k cutoff for logit sampling. If 0 then no top-k cutoff is used.')\n\nflags.DEFINE_string(\n 'prompt', default='I love to ',\n help='Prompt for language model sampling.')\n\nflags.DEFINE_integer(\n 'max_predict_token_length', default=50,\n help='Maximum example text inference token length.')\n\nflags.DEFINE_bool(\n 'save_checkpoints', default=True,\n help='Whether to save model checkpoints for debugging.')\n\nflags.DEFINE_bool(\n 'restore_checkpoints', default=True,\n help='Whether to restore from existing model checkpoints.')\n\nflags.DEFINE_integer(\n 'checkpoint_freq', default=10000,\n help='Whether to restore from existing model checkpoints.')\n\nflags.DEFINE_integer(\n 'random_seed', default=0,\n help='Integer for PRNG random seed.')\n\n\[email protected](jax.jit, static_argnums=(1, 2))\ndef create_model(key, input_shape, model_kwargs):\n module = models.TransformerLM.partial(**model_kwargs)\n with nn.attention.Cache().mutate() as cache_def:\n _, initial_params = module.init_by_shape(key,\n [(input_shape, jnp.float32)],\n cache=cache_def)\n model = nn.Model(module, initial_params)\n return model, cache_def\n\n\ndef create_optimizer(model, learning_rate):\n optimizer_def = optim.Adam(\n learning_rate,\n beta1=0.9,\n beta2=0.98,\n eps=1e-9,\n weight_decay=FLAGS.weight_decay)\n optimizer = optimizer_def.create(model)\n return optimizer\n\n\ndef create_learning_rate_scheduler(\n factors='constant * linear_warmup * rsqrt_decay',\n base_learning_rate=0.5,\n warmup_steps=8000,\n decay_factor=0.5,\n steps_per_decay=20000,\n steps_per_cycle=100000):\n \"\"\"Creates learning rate schedule.\n\n Interprets factors in the factors string which can consist of:\n * constant: interpreted as the constant value,\n * linear_warmup: interpreted as linear warmup until warmup_steps,\n * rsqrt_decay: divide by square root of max(step, warmup_steps)\n * decay_every: Every k steps decay the learning rate by decay_factor.\n * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.\n\n Args:\n factors: a string with factors separated by '*' that defines the schedule.\n base_learning_rate: float, the starting constant for the lr schedule.\n warmup_steps: how many steps to warm up for in the warmup schedule.\n decay_factor: The amount to decay the learning rate by.\n steps_per_decay: How often to decay the learning rate.\n steps_per_cycle: Steps per cycle when using cosine decay.\n\n Returns:\n A function learning_rate(step): float -> {'learning_rate': float}, the\n step-dependent lr.\n \"\"\"\n factors = [n.strip() for n in factors.split('*')]\n\n def step_fn(step):\n \"\"\"Step to learning rate function.\"\"\"\n ret = 1.0\n for name in factors:\n if name == 'constant':\n ret *= base_learning_rate\n elif name == 'linear_warmup':\n ret *= jnp.minimum(1.0, step / warmup_steps)\n elif name == 'rsqrt_decay':\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'rsqrt_normalized_decay':\n ret *= jnp.sqrt(warmup_steps)\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'decay_every':\n ret *= (decay_factor**(step // steps_per_decay))\n elif name == 'cosine_decay':\n progress = jnp.maximum(0.0,\n (step - warmup_steps) / float(steps_per_cycle))\n ret *= jnp.maximum(0.0,\n 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))\n else:\n raise ValueError('Unknown factor %s.' % name)\n return jnp.asarray(ret, dtype=jnp.float32)\n\n return step_fn\n\n\ndef compute_weighted_cross_entropy(logits, targets, weights=None):\n \"\"\"Compute weighted cross entropy and entropy for log probs and targets.\n\n Args:\n logits: [batch, length, num_classes] float array.\n targets: categorical targets [batch, length] int array.\n weights: None or array of shape [batch x length]\n\n Returns:\n Tuple of scalar loss and batch normalizing factor.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n onehot_targets = common_utils.onehot(targets, logits.shape[-1])\n loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)\n normalizing_factor = onehot_targets.sum()\n if weights is not None:\n loss = loss * weights\n normalizing_factor = weights.sum()\n\n return loss.sum(), normalizing_factor\n\n\ndef compute_weighted_accuracy(logits, targets, weights=None):\n \"\"\"Compute weighted accuracy for log probs and targets.\n\n Args:\n logits: [batch, length, num_classes] float array.\n targets: categorical targets [batch, length] int array.\n weights: None or array of shape [batch x length]\n\n Returns:\n Tuple of scalar accuracy and batch normalizing factor.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)\n normalizing_factor = jnp.prod(logits.shape[:-1])\n if weights is not None:\n loss = loss * weights\n normalizing_factor = weights.sum()\n\n return loss.sum(), normalizing_factor\n\n\ndef compute_metrics(logits, labels, weights):\n \"\"\"Compute summary metrics.\"\"\"\n loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights)\n acc, _ = compute_weighted_accuracy(logits, labels, weights)\n metrics = {\n 'loss': loss,\n 'accuracy': acc,\n 'denominator': weight_sum,\n }\n metrics = jax.lax.psum(metrics, 'batch')\n return metrics\n\n\ndef train_step(optimizer, inputs, learning_rate_fn, dropout_rng=None):\n \"\"\"Perform a single training step.\"\"\"\n weights = jnp.where(inputs > 0, 1, 0)\n\n # We handle PRNG splitting inside the top pmap, rather\n # than handling it outside in the training loop - doing the\n # latter can add some stalls to the devices.\n dropout_rng, new_dropout_rng = random.split(dropout_rng)\n\n def loss_fn(model):\n \"\"\"Loss function used for training.\"\"\"\n with nn.stochastic(dropout_rng):\n logits = model(inputs, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, inputs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits\n\n step = optimizer.state.step\n lr = learning_rate_fn(step)\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (_, logits), grad = grad_fn(optimizer.target)\n grad = jax.lax.pmean(grad, 'batch')\n new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)\n metrics = compute_metrics(logits, inputs, weights)\n metrics['learning_rate'] = lr\n\n return new_optimizer, metrics, new_dropout_rng\n\n\ndef eval_step(model, inputs):\n weights = jnp.where(inputs > 0, 1, 0)\n logits = model(inputs, train=False)\n return compute_metrics(logits, inputs, weights)\n\n\ndef predict_step(inputs, model, cache, prng_key):\n \"\"\"Fast sampling of language model from prompt.\"\"\"\n prefix_len = inputs.shape[1]\n pad_len = FLAGS.max_predict_token_length - prefix_len\n padded_inputs = jnp.pad(inputs, jnp.array([[0, 0], [0, pad_len]]))\n\n def tokens_ids_to_logits(ids, cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n with cache.mutate() as new_cache:\n logits = model(ids, shift=False, train=False, cache=new_cache)\n # Remove singleton sequence-length dimension\n # [batch, 1, vocab] --> [batch, vocab]\n logits = logits.squeeze(axis=1)\n return logits, new_cache\n\n sampled_seqs = decode.temperature_sample(\n padded_inputs,\n cache,\n tokens_ids_to_logits,\n prng_key,\n temperature=FLAGS.sampling_temperature,\n topk=FLAGS.sampling_top_k,\n eos_token=2**16) # No EOS tokens used in default lm1b dataset encoding.\n\n return sampled_seqs\n\n\ndef tohost(x):\n \"\"\"Collect batches from all devices to host and flatten batch dimensions.\"\"\"\n n_device, n_batch, *remaining_dims = x.shape\n return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n tf.enable_v2_behavior()\n\n batch_size = FLAGS.batch_size\n learning_rate = FLAGS.learning_rate\n num_train_steps = FLAGS.num_train_steps\n num_eval_steps = FLAGS.num_eval_steps\n eval_freq = FLAGS.eval_frequency\n max_target_length = FLAGS.max_target_length\n max_eval_target_length = FLAGS.max_eval_target_length\n random_seed = FLAGS.random_seed\n\n if jax.host_id() == 0:\n train_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'train'))\n eval_summary_writer = tensorboard.SummaryWriter(\n os.path.join(FLAGS.model_dir, 'eval'))\n\n if batch_size % jax.device_count() > 0:\n raise ValueError('Batch size must be divisible by the number of devices')\n train_ds, eval_ds, info_ds = input_pipeline.get_lm1b_datasets(\n n_devices=jax.local_device_count(),\n data_dir=FLAGS.data_dir,\n batch_size=batch_size,\n dynamic_batching=True,\n max_target_length=max_target_length,\n max_eval_target_length=max_eval_target_length)\n vocab_size = info_ds['text'].encoder.vocab_size\n encoder = info_ds['text'].encoder\n\n train_iter = iter(train_ds)\n input_shape = (batch_size, max_target_length)\n\n transformer_lm_kwargs = {\n 'vocab_size': vocab_size,\n 'emb_dim': 512,\n 'num_heads': 8,\n 'num_layers': 6,\n 'qkv_dim': 512,\n 'mlp_dim': 2048,\n 'max_len': max(max_target_length, max_eval_target_length)\n }\n\n rng = random.PRNGKey(random_seed)\n rng = jax.random.fold_in(rng, jax.host_id())\n rng, init_rng = random.split(rng)\n # We init the first set of dropout PRNG keys, but update it afterwards inside\n # the main pmap'd training update for performance.\n dropout_rngs = random.split(rng, jax.local_device_count())\n\n model, cache_def = create_model(init_rng, input_shape, transformer_lm_kwargs)\n optimizer = create_optimizer(model, learning_rate)\n del model # Don't keep a copy of the initial model.\n start_step = 0\n if FLAGS.restore_checkpoints:\n # Restore unreplicated optimizer + model state from last checkpoint.\n optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)\n # Grab last step.\n start_step = int(optimizer.state.step)\n\n # Replicate optimizer.\n optimizer = jax_utils.replicate(optimizer)\n\n learning_rate_fn = create_learning_rate_scheduler(\n base_learning_rate=learning_rate)\n p_train_step = jax.pmap(\n functools.partial(train_step, learning_rate_fn=learning_rate_fn),\n axis_name='batch')\n p_eval_step = jax.pmap(eval_step, axis_name='batch')\n p_pred_step = jax.pmap(predict_step, axis_name='batch')\n\n metrics_all = []\n tick = time.time()\n for step, batch in zip(range(start_step, num_train_steps), train_iter):\n batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access\n optimizer, metrics, dropout_rngs = p_train_step(\n optimizer, batch, dropout_rng=dropout_rngs)\n metrics_all.append(metrics)\n\n # Save a Checkpoint\n if ((step % FLAGS.checkpoint_freq == 0 and step > 0) or\n step == num_train_steps - 1):\n if jax.host_id() == 0 and FLAGS.save_checkpoints:\n # Save unreplicated optimizer + model state.\n checkpoints.save_checkpoint(\n FLAGS.model_dir, jax_utils.unreplicate(optimizer), step)\n\n # Periodic metric handling.\n if step % eval_freq == 0 and step > 0:\n metrics_all = common_utils.get_metrics(metrics_all)\n lr = metrics_all.pop('learning_rate').mean()\n metrics_sums = jax.tree_map(jnp.sum, metrics_all)\n denominator = metrics_sums.pop('denominator')\n summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop\n summary['learning_rate'] = lr\n # Calculate (clipped) perplexity after averaging log-perplexities:\n summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)\n logging.info('train in step: %d, loss: %.4f', step, summary['loss'])\n if jax.host_id() == 0:\n tock = time.time()\n steps_per_sec = eval_freq / (tock - tick)\n tick = tock\n train_summary_writer.scalar('steps per second', steps_per_sec, step)\n for key, val in summary.items():\n train_summary_writer.scalar(key, val, step)\n train_summary_writer.flush()\n # Reset metric accumulation for next evaluation cycle.\n metrics_all = []\n\n # Eval Metrics\n eval_metrics = []\n eval_iter = iter(eval_ds)\n if num_eval_steps == -1:\n num_iter = itertools.repeat(1)\n else:\n num_iter = range(num_eval_steps)\n for _, eval_batch in zip(num_iter, eval_iter):\n # pylint: disable=protected-access\n eval_batch = common_utils.shard(\n jax.tree_map(lambda x: x._numpy(), eval_batch))\n # pylint: enable=protected-access\n metrics = p_eval_step(optimizer.target, eval_batch)\n eval_metrics.append(metrics)\n eval_metrics = common_utils.get_metrics(eval_metrics)\n eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)\n eval_denominator = eval_metrics_sums.pop('denominator')\n eval_summary = jax.tree_map(\n lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop\n eval_metrics_sums)\n # Calculate (clipped) perplexity after averaging log-perplexities:\n eval_summary['perplexity'] = jnp.clip(\n jnp.exp(eval_summary['loss']), a_max=1.0e4)\n logging.info('eval in step: %d, loss: %.4f', step, eval_summary['loss'])\n if jax.host_id() == 0:\n for key, val in eval_summary.items():\n eval_summary_writer.scalar(key, val, step)\n eval_summary_writer.flush()\n\n # Fast inference of prompt extension using trained LM.\n rng, subrng = jax.random.split(rng)\n pred_rngs = random.split(subrng, jax.local_device_count())\n prompt = jnp.array(encoder.encode(FLAGS.prompt))\n prompt = jax_utils.replicate(prompt)\n prompt = jnp.reshape(prompt, (prompt.shape[0], 1, prompt.shape[1]))\n cache = jax_utils.replicate(\n cache_def.initialize_cache((1, FLAGS.max_predict_token_length)))\n predicted = p_pred_step(prompt, optimizer.target, cache, pred_rngs)\n predicted = tohost(predicted)\n exemplars = ''\n for n in range(predicted.shape[0]):\n exemplars += encoder.decode(predicted[n]) + '\\n\\n'\n if jax.host_id() == 0:\n eval_summary_writer.text('samples', exemplars, step)\n eval_summary_writer.flush()\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "numpy.array", "tensorflow.compat.v2.enable_v2_behavior" ] ]
albertomancino/elliot
[ "339c6421b86646c7a5a1f5001b08a16550ed1d37" ]
[ "elliot/run.py" ]
[ "\"\"\"\nModule description:\n\n\"\"\"\n\n__version__ = '0.3.1'\n__author__ = 'Vito Walter Anelli, Claudio Pomo'\n__email__ = '[email protected], [email protected]'\n\nimport importlib\nimport sys\nfrom os import path\n\nimport numpy as np\nfrom hyperopt import Trials, fmin\n\nimport elliot.hyperoptimization as ho\nfrom elliot.namespace.namespace_model_builder import NameSpaceBuilder\nfrom elliot.result_handler.result_handler import ResultHandler, HyperParameterStudy, StatTest\nfrom elliot.utils import logging as logging_project\n\n_rstate = np.random.RandomState(42)\nhere = path.abspath(path.dirname(__file__))\n\nprint(u'''\n /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\/////////// \\\\////\\\\\\\\\\\\ \\\\////\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\\\\\ /\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/// /\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\/////// \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\\\\\ /\\\\\\\\\\\\///\\\\\\\\\\\\ \\\\////\\\\\\\\\\\\//// \n \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\\\\\ \\\\//\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \n \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\//\\\\\\\\\\\\ /\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ /\\\\\\\\ \n \\\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ \\\\/\\\\\\\\\\\\ \\\\///\\\\\\\\\\\\\\\\\\\\/ \\\\//\\\\\\\\\\\\\\\\\\\\ \n \\\\/////////////// \\\\///////// \\\\///////// \\\\/// \\\\///// \\\\///// ''')\n\nprint(f'Version Number: {__version__}')\n\n\ndef run_experiment(config_path: str = ''):\n builder = NameSpaceBuilder(config_path, here, path.abspath(path.dirname(config_path)))\n base = builder.base\n config_test(builder, base)\n logging_project.init(base.base_namespace.path_logger_config, base.base_namespace.path_log_folder)\n logger = logging_project.get_logger(\"__main__\")\n\n if base.base_namespace.version != __version__:\n logger.error(f'Your config file use a different version of Elliot! '\n f'In different versions of Elliot the results may slightly change due to progressive improvement! '\n f'Some feature could be deprecated! Download latest version at this link '\n f'https://github.com/sisinflab/elliot/releases')\n raise Exception(\n 'Version mismatch! In different versions of Elliot the results may slightly change due to progressive improvement!')\n\n logger.info(\"Start experiment\")\n base.base_namespace.evaluation.relevance_threshold = getattr(base.base_namespace.evaluation, \"relevance_threshold\",\n 0)\n res_handler = ResultHandler(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n hyper_handler = HyperParameterStudy(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n dataloader_class = getattr(importlib.import_module(\"elliot.dataset\"), base.base_namespace.data_config.dataloader)\n dataloader = dataloader_class(config=base.base_namespace)\n data_test_list = dataloader.generate_dataobjects()\n for key, model_base in builder.models():\n test_results = []\n test_trials = []\n for test_fold_index, data_test in enumerate(data_test_list):\n logging_project.prepare_logger(key, base.base_namespace.path_log_folder)\n if key.startswith(\"external.\"):\n spec = importlib.util.spec_from_file_location(\"external\",\n path.relpath(base.base_namespace.external_models_path))\n external = importlib.util.module_from_spec(spec)\n sys.modules[spec.name] = external\n spec.loader.exec_module(external)\n model_class = getattr(importlib.import_module(\"external\"), key.split(\".\", 1)[1])\n else:\n model_class = getattr(importlib.import_module(\"elliot.recommender\"), key)\n\n model_placeholder = ho.ModelCoordinator(data_test, base.base_namespace, model_base, model_class,\n test_fold_index)\n if isinstance(model_base, tuple):\n logger.info(f\"Tuning begun for {model_class.__name__}\\\\n\")\n trials = Trials()\n fmin(model_placeholder.objective,\n space=model_base[1],\n algo=model_base[3],\n trials=trials,\n verbose=False,\n rstate=_rstate,\n max_evals=model_base[2])\n\n # argmin relativo alla combinazione migliore di iperparametri\n min_val = np.argmin([i[\"result\"][\"loss\"] for i in trials._trials])\n ############################################\n best_model_loss = trials._trials[min_val][\"result\"][\"loss\"]\n best_model_params = trials._trials[min_val][\"result\"][\"params\"]\n best_model_results = trials._trials[min_val][\"result\"][\"test_results\"]\n ############################################\n\n # aggiunta a lista performance test\n test_results.append(trials._trials[min_val][\"result\"])\n test_trials.append(trials)\n logger.info(f\"Tuning ended for {model_class.__name__}\")\n else:\n logger.info(f\"Training begun for {model_class.__name__}\\\\n\")\n single = model_placeholder.single()\n\n ############################################\n best_model_loss = single[\"loss\"]\n best_model_params = single[\"params\"]\n best_model_results = single[\"test_results\"]\n ############################################\n\n # aggiunta a lista performance test\n test_results.append(single)\n logger.info(f\"Training ended for {model_class.__name__}\")\n\n logger.info(f\"Loss:\\\\t{best_model_loss}\")\n logger.info(f\"Best Model params:\\\\t{best_model_params}\")\n logger.info(f\"Best Model results:\\\\t{best_model_results}\")\n\n # Migliore sui test, aggiunta a performance totali\n min_val = np.argmin([i[\"loss\"] for i in test_results])\n\n res_handler.add_oneshot_recommender(**test_results[min_val])\n\n if isinstance(model_base, tuple):\n hyper_handler.add_trials(test_trials[min_val])\n\n # res_handler.save_results(output=base.base_namespace.path_output_rec_performance)\n hyper_handler.save_trials(output=base.base_namespace.path_output_rec_performance)\n res_handler.save_best_results(output=base.base_namespace.path_output_rec_performance)\n cutoff_k = getattr(base.base_namespace.evaluation, \"cutoffs\", [base.base_namespace.top_k])\n cutoff_k = cutoff_k if isinstance(cutoff_k, list) else [cutoff_k]\n first_metric = base.base_namespace.evaluation.simple_metrics[\n 0] if base.base_namespace.evaluation.simple_metrics else \"\"\n res_handler.save_best_models(output=base.base_namespace.path_output_rec_performance, default_metric=first_metric,\n default_k=cutoff_k)\n if hasattr(base.base_namespace,\n \"print_results_as_triplets\") and base.base_namespace.print_results_as_triplets == True:\n res_handler.save_best_results_as_triplets(output=base.base_namespace.path_output_rec_performance)\n hyper_handler.save_trials_as_triplets(output=base.base_namespace.path_output_rec_performance)\n if hasattr(base.base_namespace.evaluation, \"paired_ttest\") and base.base_namespace.evaluation.paired_ttest:\n res_handler.save_best_statistical_results(stat_test=StatTest.PairedTTest,\n output=base.base_namespace.path_output_rec_performance)\n if hasattr(base.base_namespace.evaluation, \"wilcoxon_test\") and base.base_namespace.evaluation.wilcoxon_test:\n res_handler.save_best_statistical_results(stat_test=StatTest.WilcoxonTest,\n output=base.base_namespace.path_output_rec_performance)\n\n logger.info(\"End experiment\")\n\n\ndef _reset_verbose_option(model):\n if isinstance(model, tuple):\n model[0].meta.verbose = False\n model[0].meta.save_recs = False\n model[0].meta.save_weights = False\n else:\n model.meta.verbose = False\n model.meta.save_recs = False\n model.meta.save_weights = False\n return model\n\n\ndef config_test(builder, base):\n if base.base_namespace.config_test:\n logging_project.init(base.base_namespace.path_logger_config, base.base_namespace.path_log_folder)\n logger = logging_project.get_logger(\"__main__\")\n logger.info(\"Start config test\")\n base.base_namespace.evaluation.relevance_threshold = getattr(base.base_namespace.evaluation,\n \"relevance_threshold\", 0)\n res_handler = ResultHandler(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n hyper_handler = HyperParameterStudy(rel_threshold=base.base_namespace.evaluation.relevance_threshold)\n dataloader_class = getattr(importlib.import_module(\"elliot.dataset\"),\n base.base_namespace.data_config.dataloader)\n dataloader = dataloader_class(config=base.base_namespace)\n data_test_list = dataloader.generate_dataobjects_mock()\n for key, model_base in builder.models():\n test_results = []\n test_trials = []\n for data_test in data_test_list:\n if key.startswith(\"external.\"):\n spec = importlib.util.spec_from_file_location(\"external\",\n path.relpath(\n base.base_namespace.external_models_path))\n external = importlib.util.module_from_spec(spec)\n sys.modules[spec.name] = external\n spec.loader.exec_module(external)\n model_class = getattr(importlib.import_module(\"external\"), key.split(\".\", 1)[1])\n else:\n model_class = getattr(importlib.import_module(\"elliot.recommender\"), key)\n\n model_base_mock = model_base\n model_base_mock = _reset_verbose_option(model_base_mock)\n model_placeholder = ho.ModelCoordinator(data_test, base.base_namespace, model_base_mock, model_class)\n if isinstance(model_base, tuple):\n trials = Trials()\n fmin(model_placeholder.objective,\n space=model_base_mock[1],\n algo=model_base_mock[3],\n trials=trials,\n rstate=_rstate,\n max_evals=model_base_mock[2])\n\n min_val = np.argmin([i[\"result\"][\"loss\"] for i in trials._trials])\n\n test_results.append(trials._trials[min_val][\"result\"])\n test_trials.append(trials)\n else:\n single = model_placeholder.single()\n\n test_results.append(single)\n\n min_val = np.argmin([i[\"loss\"] for i in test_results])\n\n res_handler.add_oneshot_recommender(**test_results[min_val])\n\n if isinstance(model_base, tuple):\n hyper_handler.add_trials(test_trials[min_val])\n logger.info(\"End config test without issues\")\n base.base_namespace.config_test = False\n\n\nif __name__ == '__main__':\n run_experiment(\"./config/VBPR_amazon_baby.yml\")\n" ]
[ [ "numpy.random.RandomState", "numpy.argmin" ] ]
DigitalPhonetics/SpeechRepresentationFinetuning
[ "11d7130919888d0a27de61f5075e72f4a024673b" ]
[ "Combine/models.py" ]
[ "\"\"\"\nAdapt from:\nhttps://github.com/facebookresearch/barlowtwins/blob/main/main.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom transformers import Wav2Vec2Model\nfrom transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices\n\n\ndef off_diagonal(x):\n \"\"\"\n For the purpose of calculation:\n return flattened view of the off-diagonal elements of a square matrix\n \"\"\"\n n, m = x.shape\n # need to ensure it is matrix\n assert n == m\n return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()\n\n\nclass BarlowTwins(nn.Module):\n def __init__(self, output_size, lambd, batch_size, device):\n super().__init__()\n self.output_size = output_size\n self.lambd = lambd\n self.batch_size = batch_size\n self.device = device\n\n # linear layer as projector\n # self.linear_layer = nn.Sequential(nn.Linear(1024, 64))\n self.dropout = nn.Sequential(nn.Dropout(0.5))\n self.wav2vec_model = Wav2Vec2Model.from_pretrained(\"facebook/wav2vec2-base\")\n self.wav2vec_model.fc = nn.Identity()\n # We will try to use projector in the original paper\n # 3-layers projector\n proj_layers = []\n for layer in range(3):\n if layer == 0: # first layer\n proj_layers.append(nn.Linear(1024, self.output_size, bias=False))\n else:\n proj_layers.append(\n nn.Linear(self.output_size, self.output_size, bias=False)\n )\n if layer < 2: # if not the last layer\n proj_layers.append(nn.BatchNorm1d(self.output_size))\n proj_layers.append(nn.ReLU(inplace=True))\n self.projector = nn.Sequential(*proj_layers)\n self.bn = nn.BatchNorm1d(self.output_size, affine=False)\n\n def forward(self, input_1, input_2):\n # compute masked indices\n batch_size, raw_sequence_length = input_1.shape\n sequence_length = self.wav2vec_model._get_feat_extract_output_lengths(\n raw_sequence_length\n )\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length), mask_prob=0.2, mask_length=2\n )\n mask_time_indices = torch.from_numpy(mask_time_indices).to(self.device)\n\n # compute masked indices\n n = input_1.shape[0]\n # print(\"n: \\n\", n) # 32\n output_1 = self.wav2vec_model(\n input_1, mask_time_indices=mask_time_indices\n ).extract_features # [32, 2, 512]\n output_1 = output_1.reshape(n, -1) # [32, 1024]\n # TODO: try droupout\n output_1 = self.dropout(output_1)\n # print(\"output_1: \\n\", output_1.shape) # 32\n\n # TODO: (batch)normalization version of representation\n # output_1 = self.linear_layer(output_1) # [32, 64]\n output_1 = self.projector(output_1)\n\n output_2 = self.wav2vec_model(\n input_2, mask_time_indices=mask_time_indices\n ).extract_features\n # TODO: remove reshape perphas\n output_2 = output_2.reshape(n, -1)\n # output_2 = self.linear_layer(output_2)\n output_2 = self.projector(output_2)\n # TODO: try droupout\n output_2 = self.dropout(output_2)\n\n return output_1, output_2\n\n def loss(self, output_1, output_2):\n # empirical cross-correlation matrix\n c = self.bn(output_1).T @ self.bn(output_2) # [32, 64]\n\n # sum the cross-correlation matrix between all gpus\n c.div_(self.batch_size) # 32 is batch size\n # torch.distributed.all_reduce(c)\n\n on_diag = torch.diagonal(c).add_(-1).pow(2).sum()\n off_diag = off_diagonal(c).pow_(2).sum()\n loss_val = on_diag + self.lambd * off_diag\n return loss_val\n\n\nclass BarlowTwins_Contrastive(nn.Module):\n def __init__(\n self, output_size, lambd, triplet_margin, barlowtwins_lambd, batch_size, device\n ):\n super().__init__()\n self.output_size = output_size\n self.lambd = lambd\n self.barlowtwins_lambd = barlowtwins_lambd\n self.batch_size = batch_size\n self.device = device\n self.cosine_similarity = nn.CosineSimilarity()\n self.triplet_margin = triplet_margin\n\n # linear layer as projector\n # self.linear_layer = nn.Sequential(nn.Linear(1024, 64))\n self.dropout = nn.Sequential(nn.Dropout(0.5))\n self.wav2vec_model = Wav2Vec2Model.from_pretrained(\"facebook/wav2vec2-base\")\n # self.wav2vec_model.fc = nn.Identity()\n # 3-layers projector\n proj_layers = []\n for layer in range(3):\n if layer == 0: # first layer\n proj_layers.append(nn.Linear(1024, self.output_size, bias=False))\n else:\n proj_layers.append(\n nn.Linear(self.output_size, self.output_size, bias=False)\n )\n if layer < 2: # if not the last layer\n proj_layers.append(nn.BatchNorm1d(self.output_size))\n proj_layers.append(nn.ReLU(inplace=True))\n self.projector = nn.Sequential(*proj_layers)\n self.bn = nn.BatchNorm1d(self.output_size, affine=False)\n\n def forward(self, anchor, positive, negative):\n # compute masked indices\n n = anchor.shape[0]\n batch_size, raw_sequence_length = anchor.shape\n sequence_length = self.wav2vec_model._get_feat_extract_output_lengths(\n raw_sequence_length\n )\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length), mask_prob=0.2, mask_length=2\n )\n mask_time_indices = torch.from_numpy(mask_time_indices).to(self.device)\n\n anchor_out = self.wav2vec_model(\n anchor, mask_time_indices=mask_time_indices\n ).extract_features\n anchor_out = self.dropout(anchor_out)\n anchor_out = anchor_out.reshape(n, -1)\n anchor_out = self.projector(anchor_out)\n\n positive_out = self.wav2vec_model(\n positive, mask_time_indices=mask_time_indices\n ).extract_features\n positive_out = self.dropout(positive_out)\n positive_out = positive_out.reshape(n, -1)\n positive_out = self.projector(positive_out)\n\n negative_out = self.wav2vec_model(\n negative, mask_time_indices=mask_time_indices\n ).extract_features\n negative_out = self.dropout(negative_out)\n negative_out = negative_out.reshape(n, -1)\n negative_out = self.projector(negative_out)\n\n return anchor_out, positive_out, negative_out\n\n def barlowtwins_loss(self, anchor_out, positive_out):\n # empirical cross-correlation matrix\n c = self.bn(anchor_out).T @ self.bn(positive_out) # [32, 64]\n\n # sum the cross-correlation matrix between all gpus\n # TODO: use argueparser for batch size 32\n c.div_(self.batch_size) # 32 is batch size\n # torch.distributed.all_reduce(c)\n\n on_diag = torch.diagonal(c).add_(-1).pow(2).sum()\n off_diag = off_diagonal(c).pow_(2).sum()\n loss_val = on_diag + self.barlowtwins_lambd * off_diag\n return loss_val\n\n def triplet_loss(self, anchor_out, positive_out, negative_out, reduction=\"mean\"):\n positive_distance = 1 - self.cosine_similarity(anchor_out, positive_out)\n\n negative_distance = 1 - self.cosine_similarity(anchor_out, negative_out)\n\n losses = torch.max(\n positive_distance - negative_distance + self.triplet_margin,\n torch.full_like(positive_distance, 0),\n )\n if reduction == \"mean\":\n return torch.mean(losses)\n else:\n return torch.sum(losses)\n\n def combine_loss(self, barlowtwins_loss, triplet_loss):\n return barlowtwins_loss * self.lambd + triplet_loss\n" ]
[ [ "torch.sum", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.diagonal", "torch.from_numpy", "torch.full_like", "torch.nn.Sequential", "torch.nn.Identity", "torch.nn.ReLU", "torch.nn.Dropout", "torch.mean", "torch.nn.CosineSimilarity" ] ]
HawChang/PaddleHub
[ "9894fbb1dc8575ae1fa74f32a23cc1363467461b" ]
[ "hub_module/modules/image/text_recognition/chinese_text_detection_db_server/module.py" ]
[ "# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport ast\nimport math\nimport os\nimport time\n\nfrom paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor\nfrom paddlehub.common.logger import logger\nfrom paddlehub.module.module import moduleinfo, runnable, serving\nfrom PIL import Image\nimport base64\nimport cv2\nimport numpy as np\nimport paddle.fluid as fluid\nimport paddlehub as hub\n\n\ndef base64_to_cv2(b64str):\n data = base64.b64decode(b64str.encode('utf8'))\n data = np.fromstring(data, np.uint8)\n data = cv2.imdecode(data, cv2.IMREAD_COLOR)\n return data\n\n\n@moduleinfo(\n name=\"chinese_text_detection_db_server\",\n version=\"1.0.0\",\n summary=\n \"The module aims to detect chinese text position in the image, which is based on differentiable_binarization algorithm.\",\n author=\"paddle-dev\",\n author_email=\"[email protected]\",\n type=\"cv/text_recognition\")\nclass ChineseTextDetectionDBServer(hub.Module):\n def _initialize(self):\n \"\"\"\n initialize with the necessary elements\n \"\"\"\n self.pretrained_model_path = os.path.join(self.directory,\n 'ch_det_r50_vd_db')\n self._set_config()\n\n def check_requirements(self):\n try:\n import shapely, pyclipper\n except:\n print(\n 'This module requires the shapely, pyclipper tools. The running enviroment does not meet the requirments. Please install the two packages.'\n )\n exit()\n\n def _set_config(self):\n \"\"\"\n predictor config setting\n \"\"\"\n model_file_path = os.path.join(self.pretrained_model_path, 'model')\n params_file_path = os.path.join(self.pretrained_model_path, 'params')\n\n config = AnalysisConfig(model_file_path, params_file_path)\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n use_gpu = True\n except:\n use_gpu = False\n\n if use_gpu:\n config.enable_use_gpu(8000, 0)\n else:\n config.disable_gpu()\n\n config.disable_glog_info()\n\n # use zero copy\n config.delete_pass(\"conv_transpose_eltwiseadd_bn_fuse_pass\")\n config.switch_use_feed_fetch_ops(False)\n self.predictor = create_paddle_predictor(config)\n input_names = self.predictor.get_input_names()\n self.input_tensor = self.predictor.get_input_tensor(input_names[0])\n output_names = self.predictor.get_output_names()\n self.output_tensors = []\n for output_name in output_names:\n output_tensor = self.predictor.get_output_tensor(output_name)\n self.output_tensors.append(output_tensor)\n\n def read_images(self, paths=[]):\n images = []\n for img_path in paths:\n assert os.path.isfile(\n img_path), \"The {} isn't a valid file.\".format(img_path)\n img = cv2.imread(img_path)\n if img is None:\n logger.info(\"error in loading image:{}\".format(img_path))\n continue\n images.append(img)\n return images\n\n def filter_tag_det_res(self, dt_boxes, image_shape):\n img_height, img_width = image_shape[0:2]\n dt_boxes_new = []\n for box in dt_boxes:\n box = self.order_points_clockwise(box)\n left = int(np.min(box[:, 0]))\n right = int(np.max(box[:, 0]))\n top = int(np.min(box[:, 1]))\n bottom = int(np.max(box[:, 1]))\n bbox_height = bottom - top\n bbox_width = right - left\n diffh = math.fabs(box[0, 1] - box[1, 1])\n diffw = math.fabs(box[0, 0] - box[3, 0])\n rect_width = int(np.linalg.norm(box[0] - box[1]))\n rect_height = int(np.linalg.norm(box[0] - box[3]))\n if rect_width <= 10 or rect_height <= 10:\n continue\n dt_boxes_new.append(box)\n dt_boxes = np.array(dt_boxes_new)\n return dt_boxes\n\n def order_points_clockwise(self, pts):\n \"\"\"\n reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py\n # sort the points based on their x-coordinates\n \"\"\"\n xSorted = pts[np.argsort(pts[:, 0]), :]\n\n # grab the left-most and right-most points from the sorted\n # x-roodinate points\n leftMost = xSorted[:2, :]\n rightMost = xSorted[2:, :]\n\n # now, sort the left-most coordinates according to their\n # y-coordinates so we can grab the top-left and bottom-left\n # points, respectively\n leftMost = leftMost[np.argsort(leftMost[:, 1]), :]\n (tl, bl) = leftMost\n\n rightMost = rightMost[np.argsort(rightMost[:, 1]), :]\n (tr, br) = rightMost\n\n rect = np.array([tl, tr, br, bl], dtype=\"float32\")\n return rect\n\n def detect_text(self,\n images=[],\n paths=[],\n use_gpu=False,\n output_dir='detection_result',\n visualization=False,\n box_thresh=0.5):\n \"\"\"\n Get the text box in the predicted images.\n Args:\n images (list(numpy.ndarray)): images data, shape of each is [H, W, C]. If images not paths\n paths (list[str]): The paths of images. If paths not images\n use_gpu (bool): Whether to use gpu. Default false.\n output_dir (str): The directory to store output images.\n visualization (bool): Whether to save image or not.\n box_thresh(float): the threshold of the detected text box's confidence\n Returns:\n res (list): The result of text detection box and save path of images.\n \"\"\"\n self.check_requirements()\n\n from chinese_text_detection_db_server.processor import DBPreProcess, DBPostProcess, draw_boxes, get_image_ext\n\n if use_gpu:\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n except:\n raise RuntimeError(\n \"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES via export CUDA_VISIBLE_DEVICES=cuda_device_id.\"\n )\n\n if images != [] and isinstance(images, list) and paths == []:\n predicted_data = images\n elif images == [] and isinstance(paths, list) and paths != []:\n predicted_data = self.read_images(paths)\n else:\n raise TypeError(\"The input data is inconsistent with expectations.\")\n\n assert predicted_data != [], \"There is not any image to be predicted. Please check the input data.\"\n\n preprocessor = DBPreProcess()\n postprocessor = DBPostProcess(box_thresh)\n\n all_imgs = []\n all_ratios = []\n all_results = []\n for original_image in predicted_data:\n im, ratio_list = preprocessor(original_image)\n res = {'save_path': ''}\n if im is None:\n res['data'] = []\n\n else:\n im = im.copy()\n starttime = time.time()\n self.input_tensor.copy_from_cpu(im)\n self.predictor.zero_copy_run()\n data_out = self.output_tensors[0].copy_to_cpu()\n dt_boxes_list = postprocessor(data_out, [ratio_list])\n boxes = self.filter_tag_det_res(dt_boxes_list[0],\n original_image.shape)\n res['data'] = boxes.astype(np.int).tolist()\n\n all_imgs.append(im)\n all_ratios.append(ratio_list)\n if visualization:\n img = Image.fromarray(\n cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))\n draw_img = draw_boxes(img, boxes)\n draw_img = np.array(draw_img)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n ext = get_image_ext(original_image)\n saved_name = 'ndarray_{}{}'.format(time.time(), ext)\n cv2.imwrite(\n os.path.join(output_dir, saved_name),\n draw_img[:, :, ::-1])\n res['save_path'] = os.path.join(output_dir, saved_name)\n\n all_results.append(res)\n\n return all_results\n\n def save_inference_model(self,\n dirname,\n model_filename=None,\n params_filename=None,\n combined=True):\n if combined:\n model_filename = \"__model__\" if not model_filename else model_filename\n params_filename = \"__params__\" if not params_filename else params_filename\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n model_file_path = os.path.join(self.pretrained_model_path, 'model')\n params_file_path = os.path.join(self.pretrained_model_path, 'params')\n program, feeded_var_names, target_vars = fluid.io.load_inference_model(\n dirname=self.pretrained_model_path,\n model_filename=model_file_path,\n params_filename=params_file_path,\n executor=exe)\n\n fluid.io.save_inference_model(\n dirname=dirname,\n main_program=program,\n executor=exe,\n feeded_var_names=feeded_var_names,\n target_vars=target_vars,\n model_filename=model_filename,\n params_filename=params_filename)\n\n @serving\n def serving_method(self, images, **kwargs):\n \"\"\"\n Run as a service.\n \"\"\"\n images_decode = [base64_to_cv2(image) for image in images]\n results = self.detect_text(images=images_decode, **kwargs)\n return results\n\n @runnable\n def run_cmd(self, argvs):\n \"\"\"\n Run as a command\n \"\"\"\n self.parser = argparse.ArgumentParser(\n description=\"Run the %s module.\" % self.name,\n prog='hub run %s' % self.name,\n usage='%(prog)s',\n add_help=True)\n\n self.arg_input_group = self.parser.add_argument_group(\n title=\"Input options\", description=\"Input data. Required\")\n self.arg_config_group = self.parser.add_argument_group(\n title=\"Config options\",\n description=\n \"Run configuration for controlling module behavior, not required.\")\n\n self.add_module_config_arg()\n self.add_module_input_arg()\n\n args = self.parser.parse_args(argvs)\n results = self.detect_text(\n paths=[args.input_path],\n use_gpu=args.use_gpu,\n output_dir=args.output_dir,\n visualization=args.visualization)\n return results\n\n def add_module_config_arg(self):\n \"\"\"\n Add the command config options\n \"\"\"\n self.arg_config_group.add_argument(\n '--use_gpu',\n type=ast.literal_eval,\n default=False,\n help=\"whether use GPU or not\")\n self.arg_config_group.add_argument(\n '--output_dir',\n type=str,\n default='detection_result',\n help=\"The directory to save output images.\")\n self.arg_config_group.add_argument(\n '--visualization',\n type=ast.literal_eval,\n default=False,\n help=\"whether to save output as images.\")\n\n def add_module_input_arg(self):\n \"\"\"\n Add the command input options\n \"\"\"\n self.arg_input_group.add_argument(\n '--input_path', type=str, default=None, help=\"diretory to image\")\n\n\nif __name__ == '__main__':\n db = ChineseTextDetectionDBServer()\n image_path = [\n '/mnt/zhangxuefei/PaddleOCR/doc/imgs/11.jpg',\n '/mnt/zhangxuefei/PaddleOCR/doc/imgs/12.jpg'\n ]\n res = db.detect_text(paths=image_path, visualization=True)\n db.save_inference_model('save')\n print(res)\n" ]
[ [ "numpy.linalg.norm", "numpy.argsort", "numpy.max", "numpy.min", "numpy.array", "numpy.fromstring" ] ]
SoulaimenTheGreat/Wine-Prediction
[ "0e692ee430c09f90e7b49a3d3fb6450b6e25b663" ]
[ "src/data/fetch_database.py" ]
[ "# import motor.motor_asyncio\nfrom pymongo import MongoClient\nfrom dotenv import load_dotenv\nimport os\nimport pandas as pd\nfrom dagster import solid\n\n\ndef load_env_variables():\n \"\"\"\n Function to load environment variables from .env file\n :return: database password and database name\n \"\"\"\n load_dotenv()\n database_password = os.environ.get('PASSWORD')\n database_name = os.environ.get('DATABASE')\n return database_password, database_name\n\n\ndef configure_database_collection(collection_name: str):\n \"\"\"\n Configure the database connection, database and collection by passing the collection name\n :return: the collection\n \"\"\"\n # load database password and name from environment variables\n database_password, database_name = load_env_variables()\n MONGO_DETAILS = \"mongodb+srv://admin:\" + database_password + \"@wineestimations.ycvrd.mongodb.net/\" + database_name + \\\n \"?retryWrites=true \"\n client = MongoClient(MONGO_DETAILS)\n database = client[database_name]\n collection = database.get_collection(collection_name)\n return collection\n\n\n# def estimation_helper(estimation) -> dict:\n# return {\n# \"id\": str(estimation[\"_id\"]),\n# \"wineName\": estimation[\"wineName\"],\n# \"designation\": estimation[\"designation\"],\n# \"vineyard\": estimation[\"vineyard\"],\n# \"cuvee\": estimation[\"cuvee\"],\n# \"bottleType\": estimation[\"bottleType\"],\n# \"color\": estimation[\"color\"],\n# \"vintage\": estimation[\"vintage\"],\n# \"wineSearcherMin\": estimation[\"wineSearcherMin\"],\n# \"wineSearcherMax\": estimation[\"wineSearcherMax\"],\n# \"idealWinePrice\": estimation[\"idealWinePrice\"],\n# \"correctedMin\": estimation[\"correctedMin\"],\n# \"correctedMax\": estimation[\"correctedMax\"],\n# \"weightedMin\": estimation[\"weightedMin\"],\n# \"weightedMax\": estimation[\"weightedMax\"],\n# \"wineLevel\": estimation[\"wineLevel\"],\n# \"label\": estimation[\"label\"],\n# \"cap\": estimation[\"cap\"],\n# \"limpidity\": estimation[\"limpidity\"],\n# \"date\": estimation[\"date\"],\n# }\n\n@solid\ndef retrieve_filtered_estimations(collection_name: str, condition: dict):\n \"\"\"\n Retrieve records from mongo database by passing collection name and condition for filtering\n :return: list of retrieved records\n\n example: collection_name:'estimations_collection', condition:{\"wineLevel\": 1, \"label\": 1, \"cap\": 1, \"limpidity\": 1}\n \"\"\"\n collection = configure_database_collection(collection_name)\n filtered_estimations = []\n for estimation in collection.find(condition):\n filtered_estimations.append(estimation)\n return filtered_estimations\n\n\n@solid\ndef convert_to_csv(collection_name: str, condition: dict, filename: str):\n \"\"\"\n Convert the retrieved data from the database to csv format by passing collection name, condition, and filename in\n order to save it in data/raw as a centralised directory for data\n \"\"\"\n records = retrieve_filtered_estimations(collection_name, condition)\n records_df = pd.DataFrame.from_records(records)\n records_df.to_csv(path_or_buf=\"../../data/raw/\" + filename + \".csv\",\n index=False)\n\n\n# convert_to_csv(\"estimations_collection\", {\"wineLevel\": 1, \"label\": 1, \"cap\": 1, \"limpidity\": 1}, \"wine_estimations\")\nconvert_to_csv(\"add_weight_collection\", {\"updatedWeight\": True, \"caps_score\": 1, \"label_score\": 1, \"limpidity_score\": 1,\n \"wineLevel_score\": 1}, \"weighted_wine_estimations\")\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
josephedradan/algorithms
[ "6caa107b0df245653eab81143ebf0d9c7e5515fb" ]
[ "algorithms/miniumum_edit_distance.py" ]
[ "\"\"\"\nCreated by Joseph Edradan\nGithub: https://github.com/josephedradan\n\nDate created: 2/15/2021\n\nPurpose:\n\nDetails:\n\nDescription:\n\nNotes:\n\nIMPORTANT NOTES:\n\nExplanation:\n\nReference:\n Minimum Edit Distance Algorithm in Python in 2020 (EXPLAINED)\n Notes:\n Using Rylan Fowers' minimum edit distance algo\n\n Reference:\n https://www.youtube.com/watch?v=AY2DZ4a9gyk\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\ndef min_edit_distance_fowers(source, target):\n list_source_char = [c for c in source]\n list_target_char = [c for c in target]\n\n # Make np_array\n np_array = np.zeros((len(source), len(target)))\n\n # Target is 0th row\n np_array[0] = [c for c in range(len(target))]\n\n # Source is the 0th col ([index_row_all, index_column])\n np_array[:, 0] = [c for c in range(len(source))]\n\n \"\"\"\n Solve the [1,1] location if necessary\n \n If the char at index 1 of both target and source are different the amount of edits needed to achieve the target\n string is the min of the top and left indices values around the [1,1]\n \n \"\"\"\n\n try:\n # if target[1] != source[1]:\n # np_array[1, 1] = 2\n\n # For item index in row, start on index 1 (Target is the row)\n for i in range(1, len(target)):\n\n # for item index in col, start on index 1 (Source is the column)\n for j in range(1, len(source)):\n\n # If the respective chars from i and j for the source and target are NOT the same\n if target[i] != source[j]:\n\n \"\"\"\n Change the value at the given position given i and j\n \n Note that i and j are switched \n \n \"\"\"\n np_array[j, i] = min(np_array[j - 1, i], np_array[j, i - 1]) + 1\n\n # If the respective chars from i and j for the source and target are the same\n else:\n np_array[j, i] = np_array[j - 1, i - 1]\n\n except Exception as e:\n print(e)\n print(\"S:{:<20} T:{:<20} j{:<5} i:{:<5} \".format(source, target, j, i))\n\n # Make pandas DF of the np array\n data_frame = pd.DataFrame(np_array, columns=list_target_char, index=list_source_char)\n\n return np_array, data_frame, np_array[-1, -1]\n\n\nif __name__ == '__main__':\n print(min_edit_distance_fowers(\"joseph\", \"edradan\")[0])\n print(min_edit_distance_fowers(\"joseph\", \"edradan\")[1])\n print(min_edit_distance_fowers(\"joseph\", \"edradan\")[2])\n print()\n\n print(min_edit_distance_fowers(\"#joseph\", \"#joe\")[0])\n print(min_edit_distance_fowers(\"#joseph\", \"#joe\")[1])\n print(min_edit_distance_fowers(\"#joseph\", \"#joe\")[2])\n print()\n\n print(min_edit_distance_fowers(\"$BCDE\", \"#DE\")[0])\n print(min_edit_distance_fowers(\"$BCDE\", \"#DE\")[1])\n print(min_edit_distance_fowers(\"$BCDE\", \"#DE\")[2])\n print()\n" ]
[ [ "pandas.DataFrame" ] ]
MahdiSajedei/Searching-for-activation-functions
[ "031ef131df7fe84fa2cafadc946b5a33df8925ec" ]
[ "src/utils.py" ]
[ "import os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\n\ndef path_exists(path, overwrite=False):\n if not os.path.isdir(path):\n os.mkdir(path)\n elif overwrite == True :\n shutil.rmtree(path)\n return path\n\ndef remove_dir(path):\n os.rmdir(path)\n return True\n\ndef relu_init(shape, dtype=tf.float32, partition_info=None):\n init_range = np.sprt(2.0 / shape[1])\n return tf.random_normal(shape, dtype=dtype) * init_range\n\ndef ones(shape, dtype=tf.float32):\n return tf.ones(shape, dtype=dtype)\n\ndef zeros(shape, dtype=tf.float32):\n return tf.zeros(shape, dtype=dtype)\n\ndef tanh_init(shape, dtype=tf.float32, partition_info=None):\n init_range = np.sqrt(6.0 / (shape[0] + shape[1]))\n return tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=dtype)\n\ndef leaky_relu(X, alpha=0.01):\n return tf.maximum(X, alpha * X)\n\ndef max(input):\n return tf.argmax(input)\n" ]
[ [ "tensorflow.zeros", "tensorflow.ones", "numpy.sprt", "tensorflow.random_uniform", "tensorflow.argmax", "numpy.sqrt", "tensorflow.random_normal", "tensorflow.maximum" ] ]
gem763/crawly
[ "df41e5fc67a4e5092120a1bfe459d57e201849b8" ]
[ "newscrawler/record.py" ]
[ "import pandas as pd\nimport pandas_gbq as gbq\nimport json\nfrom google.oauth2 import service_account\nfrom IPython.core.debugger import set_trace\nfrom pathlib import Path\nimport time\nfrom . import accounts\n\n'''\nConfiguration\n'''\nproj = 'global-news-crawl'\ntable_downloaded = 'news_dataset.downloaded'\ntable_trashed = 'news_dataset.trashed'\ncredentials = service_account.Credentials.from_service_account_info(accounts.bigquery_account)\n#credentials = service_account.Credentials.from_service_account_file('global-news-crawl-c48d7cd9aa81.json')\n\nlocalpath_to_downloaded = 'newsdata/downloaded'\nlocalpath_to_trashed = 'newsdata/trashed'\n\n\nclass Recorder:\n def __init__(self, storage='local'):\n self.storage = storage\n self.ids = self._get_ids(storage)\n\n def _query_ids_from_bigquery(self, tb):\n qry = 'SELECT id FROM `{}`'.format(proj + '.' + tb)\n return gbq.read_gbq(qry, credentials=credentials).id\n\n def _retreive_ids_from_local(self, path):\n return [p.stem for p in Path(path).glob('**/*.json')]\n\n\n def _get_ids(self, storage):\n start = time.time()\n print('checking ' + storage + ' storage... ', end='')\n\n if storage == 'bigquery':\n ids_downloaded = self._query_ids_from_bigquery(table_downloaded)\n ids_trashed = self._query_ids_from_bigquery(table_trashed)\n\n elif storage == 'local':\n ids_downloaded = self._retreive_ids_from_local(localpath_to_downloaded)\n ids_trashed = self._retreive_ids_from_local(localpath_to_trashed)\n\n ids_downloaded_set = set(ids_downloaded)\n ids_trashed_set = set(ids_trashed)\n\n if len(ids_downloaded) != len(ids_downloaded_set):\n '''downloaded articles의 uniqueness'''\n raise self.DuplicatesInSingleTable('duplicated in downloaded')\n\n if len(ids_trashed) != len(ids_trashed_set):\n '''trashed articles의 uniqueness'''\n raise self.DuplicatesInSingleTable('duplicated in trashed')\n\n if len(ids_downloaded_set & ids_trashed_set) != 0:\n '''downloaded와 trashed 간의 uniqueness'''\n raise self.DuplicatesBetweenTwoTables('duplicated between downloaded and trashed')\n\n ids = ids_downloaded_set | ids_trashed_set\n\n print('done ({howlong:.2f} seconds)'.format(howlong=time.time()-start))\n print('we have total {} articles ({} downloaded, {} trashed)'.format(len(ids), len(ids_downloaded_set), len(ids_trashed_set)))\n return ids\n\n\n def has(self, id):\n return id in self.ids\n\n\n def update(self, downloaded=None, trashed=None, chunksize=1000, subdir_len=3):\n '''\n downloaded or trashed = {\n id0: {...},\n id1: {...},\n ...\n }\n '''\n if self.storage == 'bigquery':\n self._update_bigquery('downloaded', downloaded, chunksize)\n self._update_bigquery('trashed', trashed, chunksize)\n\n elif self.storage == 'local':\n self._update_local('downloaded', downloaded, subdir_len)\n self._update_local('trashed', trashed, subdir_len)\n\n\n def _update_local(self, newstype, newsdict, subdir_len):\n if newsdict is not None:\n if newstype == 'downloaded':\n path = localpath_to_downloaded\n elif newstype == 'trashed':\n path = localpath_to_trashed\n\n for id, article in newsdict.items():\n# '''\n# local storage의 경우,\n# downloaded는 downloaded 폴더에,\n# trashed는 trashed/id[:3] 폴더에 저장했다\n# 나중에 혹시 local에 저장할 일이 있다면, 저장방식을 통일하는 것이 좋겠다 (2019.10.31)\n# '''\n# if newstype == 'downloaded':\n# _dir = Path(path)\n# elif newstype == 'trashed':\n# _dir = Path(path + '/' + id[:subdir_len])\n\n _dir = Path(path + '/' + id[:subdir_len])\n _dir.mkdir(parents=True, exist_ok=True)\n fname = id + '.json'\n fpath = _dir / fname\n with fpath.open('w') as f:\n json.dump(article, f)\n\n\n def _update_bigquery(self, newstype, newsdict, chunksize):\n if newsdict is not None:\n if newstype == 'downloaded':\n tb = table_downloaded #+ '2'\n elif newstype == 'trashed':\n tb = table_trashed #+ '2'\n\n df = pd.DataFrame.from_dict(newsdict, orient='index')\n df.index.name = 'id'\n df = df.reset_index()\n gbq.to_gbq(df, tb, project_id=proj, if_exists='append', chunksize=chunksize, credentials=credentials, progress_bar=False)\n\n class DuplicatesInSingleTable(Exception):\n pass\n\n class DuplicatesBetweenTwoTables(Exception):\n pass\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
sriniiyer/concode
[ "864e30807f6988731ac3b4b98af6562c18bb42ff" ]
[ "ConcodeDecoder.py" ]
[ "import torch\nfrom torch import nn\nfrom GlobalAttention import GlobalAttention\nfrom torch.autograd import Variable\nfrom Beam import TreeBeam\nfrom UtilClass import bottle, unbottle\nfrom preprocess import rhs, CDDataset\nfrom decoders import DecoderState, Prediction\nimport torch.nn.functional as F\n\nclass ConcodeDecoder(nn.Module):\n\n def __init__(self, vocabs, opt):\n super(ConcodeDecoder, self).__init__()\n\n self.opt = opt\n self.vocabs = vocabs\n\n self.nt_embedding = nn.Embedding(\n len(vocabs['nt']),\n opt.tgt_word_vec_size,\n padding_idx=vocabs['nt'].stoi['<blank>'])\n\n self.rule_embedding = nn.Embedding(\n len(vocabs['prev_rules']),\n opt.tgt_word_vec_size,\n padding_idx=vocabs['prev_rules'].stoi['<blank>'])\n\n self.attn = GlobalAttention(\n opt.decoder_rnn_size,\n attn_type='general',\n include_rnn=False)\n\n self.attn_linear = nn.Linear(self.opt.decoder_rnn_size * 3, self.opt.decoder_rnn_size)\n\n self.var_attn = GlobalAttention(\n opt.decoder_rnn_size,\n attn_type='general',\n include_rnn=False)\n\n if opt.copy_attn:\n self.copy_attn = GlobalAttention(\n opt.decoder_rnn_size,\n attn_type='general')\n\n self.decoder_rnn = nn.LSTM(\n input_size=opt.tgt_word_vec_size * 3 + opt.decoder_rnn_size, # nt and prev_rule\n hidden_size=opt.decoder_rnn_size,\n num_layers=opt.dec_layers,\n dropout=opt.dropout,\n batch_first=True)\n\n self.decoder_dropout = nn.Dropout(opt.dropout)\n\n def forward(self, batch, all_context, context_masks, decState):\n\n src_context = all_context[0]\n src_context_mask = context_masks[0]\n rest_context = torch.cat(all_context[1:], 1)\n rest_context_mask = torch.cat(context_masks[1:], 1)\n\n context = torch.cat(all_context, 1)\n context_lengths = torch.cat(context_masks, 1)\n\n # embed everything\n nt_embeddings = self.nt_embedding(Variable(batch['nt'].cuda(), requires_grad=False))\n rule_embeddings = self.rule_embedding(Variable(batch['prev_rules'].cuda(), requires_grad=False))\n parent_rule_embeddings = self.rule_embedding(Variable(batch['parent_rules'].cuda(), requires_grad=False))\n\n attn_outputs, attn_scores, copy_attn_scores = [], [], []\n # For each batch we have to maintain states\n\n batch_size = batch['nt'].size(0) # 1 for predict\n num_decodes = 0\n\n attn_outputs, attn_scores, copy_attn_scores = [], [], []\n for i, (nt, rule, parent_rule) in enumerate(zip(nt_embeddings.split(1, 1), rule_embeddings.split(1, 1), parent_rule_embeddings.split(1, 1))):\n # accumulate parent decoder states\n parent_states = []\n for j in range(0, batch_size):\n try: # this is needed coz the batch is of different sizes\n parent_states.append(batch['parent_states'][j][i]) # one state for every batch\n except:\n parent_states.append(batch['parent_states'][j][0]) # one state for every batch\n parent_states = torch.cat(parent_states, 0)\n\n rnn_output, prev_hidden = self.decoder_rnn(torch.cat((nt, rule, parent_rule, parent_states), 2), decState.hidden)\n num_decodes += 1\n rnn_output.contiguous()\n\n if self.opt.twostep:\n src_attn_output, src_attn_score = self.attn(rnn_output, src_context, src_context_mask)\n varmet_attn_output, varmet_attn_score = self.var_attn(src_attn_output, rest_context, rest_context_mask)\n\n attn_output = F.tanh(self.attn_linear(torch.cat((rnn_output, src_attn_output, varmet_attn_output), 2)))\n attn_scores.append(varmet_attn_score)\n copy_attn_scores.append(varmet_attn_score)\n else:\n attn_output, attn_score = self.attn(rnn_output, context, context_lengths)\n # attn_score is b x tgt_len x src_len, src should be removed from this\n attn_scores.append(attn_score[:, :, src_context.size(1):])\n copy_attn_scores.append(attn_score[:, :, src_context.size(1):])\n\n\n attn_output = self.decoder_dropout(attn_output)\n attn_outputs.append(attn_output)\n\n decState.update_state(prev_hidden, attn_output)\n\n # update all children\n for j, elem in enumerate(rnn_output.split(1, 0)):\n # children wont be there during prediction\n if 'children' in batch and i in batch['children'][j]: # rule i has children\n for child in batch['children'][j][i]:\n batch['parent_states'][j][child] = elem\n\n output = torch.cat(attn_outputs, 1)\n attn_scores = torch.cat(attn_scores, 1)\n copy_attn_scores = torch.cat(copy_attn_scores, 1) if self.opt.copy_attn else None\n\n return output, attn_scores, copy_attn_scores\n\n def predict(self, enc_hidden, context, context_lengths, batch, beam_size, max_code_length, generator, replace_unk, vis_params):\n\n # This decoder does not have input feeding. Parent state replces that\n decState = DecoderState(\n enc_hidden, #encoder hidden\n Variable(torch.zeros(1, 1, self.opt.decoder_rnn_size).cuda(), requires_grad=False) # parent state\n )\n # Repeat everything beam_size times.\n def rvar(a, beam_size):\n return Variable(a.repeat(beam_size, 1, 1), volatile=True)\n\n context = tuple(rvar(context[i].data, beam_size) for i in range(0, len(context)))\n context_lengths = tuple(context_lengths[i].repeat(beam_size, 1) for i in range(0, len(context_lengths)))\n\n decState.repeat_beam_size_times(beam_size)\n\n # Use only one beam\n beam = TreeBeam(beam_size, True, self.vocabs, self.opt.decoder_rnn_size)\n\n for count in range(0, max_code_length): # We will break when we have the required number of terminals\n # to be consistent with seq2seq\n\n if beam.done():\n break\n\n # Construct batch x beam_size nxt words.\n # Get all the pending current beam words and arrange for forward.\n # Uses the start symbol in the beginning\n inp = beam.getCurrentState() # Should return a batch of the frontier\n\n # Run one step., decState gets automatically updated\n output, attn, copy_attn = self.forward(inp, context, context_lengths, decState)\n src_map = torch.zeros(0, 0)\n if self.opt.var_names:\n src_map = torch.cat((src_map, batch['concode_src_map_vars']), 1)\n if self.opt.method_names:\n src_map = torch.cat((src_map, batch['concode_src_map_methods']), 1)\n\n scores = generator(bottle(output), bottle(copy_attn), src_map, inp) #generator needs the non-terminals\n\n out = generator.collapseCopyScores(unbottle(scores.data.clone(), beam_size), batch) # needs seq2seq from batch\n out = out.log()\n\n # beam x tgt_vocab\n\n beam.advance(out[:, 0], attn.data[:, 0], output)\n decState.beam_update(beam.getCurrentOrigin(), beam_size)\n\n pred_score_total = 0\n pred_words_total = 0\n\n score, times, k = beam.getFinal() # times is the length of the prediction\n hyp, att = beam.getHyp(times, k)\n goldNl = []\n if self.opt.var_names:\n goldNl += batch['concode_var'][0] # because batch = 1\n if self.opt.method_names:\n goldNl += batch['concode_method'][0] # because batch = 1\n\n goldCode = self.vocabs['code'].addStartOrEnd(batch['raw_code'][0])\n predSent, copied_tokens, replaced_tokens = self.buildTargetTokens(\n hyp,\n self.vocabs,\n goldNl,\n att,\n batch['concode_vocab'][0],\n replace_unk\n )\n predSent = ConcodeDecoder.rulesToCode(predSent)\n pred_score_total += score\n pred_words_total += len(predSent)\n\n return Prediction(goldNl, goldCode, predSent, att)\n\n @staticmethod\n def rulesToCode(rules):\n stack = []\n code = []\n for i in range(0, len(rules)):\n if not CDDataset._is_terminal_rule(rules[i]):\n stack.extend(rhs(rules[i]).split('___')[::-1])\n else:\n code.append(rhs(rules[i]))\n\n try:\n top = stack.pop()\n\n while not top[0].isupper():\n code.append(top)\n if len(stack) == 0:\n break\n top = stack.pop()\n except:\n pass\n\n return code\n\n def buildTargetTokens(self, pred, vocabs, src, attn, copy_vocab, replace_unk):\n vocab = vocabs['next_rules']\n tokens = []\n copied_tokens, replaced_tokens = [], []\n for tok in pred:\n if tok < len(vocab):\n tokens.append(vocab.itos[tok])\n else:\n tokens.append(\"IdentifierNT-->\" + copy_vocab.itos[tok - len(vocab)])\n copied_tokens.append(copy_vocab.itos[tok - len(vocab)])\n\n if replace_unk and attn is not None:\n for i in range(len(tokens)):\n if tokens[i] == '<unk>':\n _, maxIndex = attn[i].max(0)\n tokens[i] = \"IdentifierNT-->\" + src[maxIndex[0]]\n replaced_tokens.append(src[maxIndex[0]])\n\n return tokens, copied_tokens, replaced_tokens\n" ]
[ [ "torch.nn.LSTM", "torch.nn.Linear", "torch.zeros", "torch.cat", "torch.nn.Dropout" ] ]
ngohaily/geopandas
[ "2725f346e430edb6a5164c21dd707de328329f31" ]
[ "geopandas/tests/test_geom_methods.py" ]
[ "from __future__ import absolute_import\n\nimport string\n\nimport numpy as np\nfrom pandas import Series, DataFrame, MultiIndex\nfrom shapely.geometry import (\n Point, LinearRing, LineString, Polygon, MultiPoint)\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.ops import unary_union\n\nfrom geopandas import GeoSeries, GeoDataFrame\nfrom geopandas.base import GeoPandasBase\n\nfrom geopandas.tests.util import (\n geom_equals, geom_almost_equals, assert_geoseries_equal)\n\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom pandas.util.testing import assert_series_equal, assert_frame_equal\n\n\ndef assert_array_dtype_equal(a, b, *args, **kwargs):\n a = np.asanyarray(a)\n b = np.asanyarray(b)\n assert a.dtype == b.dtype\n assert_array_equal(a, b, *args, **kwargs)\n\n\nclass TestGeomMethods:\n\n def setup_method(self):\n self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])\n self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])\n self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])\n self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.inner_sq = Polygon([(0.25, 0.25), (0.75, 0.25), (0.75, 0.75),\n (0.25, 0.75)])\n self.nested_squares = Polygon(self.sq.boundary,\n [self.inner_sq.boundary])\n self.p0 = Point(5, 5)\n self.p3d = Point(5, 5, 5)\n self.g0 = GeoSeries([self.t1, self.t2, self.sq, self.inner_sq,\n self.nested_squares, self.p0])\n self.g1 = GeoSeries([self.t1, self.sq])\n self.g2 = GeoSeries([self.sq, self.t1])\n self.g3 = GeoSeries([self.t1, self.t2])\n self.g3.crs = {'init': 'epsg:4326', 'no_defs': True}\n self.g4 = GeoSeries([self.t2, self.t1])\n self.g4.crs = {'init': 'epsg:4326', 'no_defs': True}\n self.g_3d = GeoSeries([self.p0, self.p3d])\n self.na = GeoSeries([self.t1, self.t2, Polygon()])\n self.na_none = GeoSeries([self.t1, None])\n self.a1 = self.g1.copy()\n self.a1.index = ['A', 'B']\n self.a2 = self.g2.copy()\n self.a2.index = ['B', 'C']\n self.esb = Point(-73.9847, 40.7484)\n self.sol = Point(-74.0446, 40.6893)\n self.landmarks = GeoSeries([self.esb, self.sol],\n crs={'init': 'epsg:4326', 'no_defs': True})\n self.l1 = LineString([(0, 0), (0, 1), (1, 1)])\n self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.g5 = GeoSeries([self.l1, self.l2])\n self.g6 = GeoSeries([self.p0, self.t3])\n self.empty = GeoSeries([])\n self.empty_poly = Polygon()\n\n # Crossed lines\n self.l3 = LineString([(0, 0), (1, 1)])\n self.l4 = LineString([(0, 1), (1, 0)])\n self.crossed_lines = GeoSeries([self.l3, self.l4])\n\n # Placeholder for testing, will just drop in different geometries\n # when needed\n self.gdf1 = GeoDataFrame({'geometry': self.g1,\n 'col0': [1.0, 2.0],\n 'col1': ['geo', 'pandas']})\n self.gdf2 = GeoDataFrame({'geometry': self.g1,\n 'col3': [4, 5],\n 'col4': ['rand', 'string']})\n\n def _test_unary_real(self, op, expected, a):\n \"\"\" Tests for 'area', 'length', 'is_valid', etc. \"\"\"\n fcmp = assert_series_equal\n self._test_unary(op, expected, a, fcmp)\n\n def _test_unary_topological(self, op, expected, a):\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert a.equals(b)\n self._test_unary(op, expected, a, fcmp)\n\n def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):\n \"\"\" Tests for 'intersection', 'union', 'symmetric_difference', etc. \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, True, right_df,\n *args, **kwargs)\n\n def _test_binary_real(self, op, expected, a, b, *args, **kwargs):\n fcmp = assert_series_equal\n self._binary_op_test(op, expected, a, b, fcmp, True, False,\n *args, **kwargs)\n\n def _test_binary_operator(self, op, expected, a, b):\n \"\"\"\n The operators only have GeoSeries on the left, but can have\n GeoSeries or GeoDataFrame on the right.\n\n \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n def fcmp(a, b): assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, False, right_df)\n\n def _binary_op_test(self, op, expected, left, right, fcmp, left_df,\n right_df,\n *args, **kwargs):\n \"\"\"\n This is a helper to call a function on GeoSeries and GeoDataFrame\n arguments. For example, 'intersection' is a member of both GeoSeries\n and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs.\n This function has the ability to test all four combinations of input\n types.\n\n Parameters\n ----------\n\n expected : str\n The operation to be tested. e.g., 'intersection'\n left: GeoSeries\n right: GeoSeries\n fcmp: function\n Called with the result of the operation and expected. It should\n assert if the result is incorrect\n left_df: bool\n If the left input should also be called with a GeoDataFrame\n right_df: bool\n Indicates whether the right input should be called with a\n GeoDataFrame\n\n \"\"\"\n def _make_gdf(s):\n n = len(s)\n col1 = string.ascii_lowercase[:n]\n col2 = range(n)\n\n return GeoDataFrame({'geometry': s.values,\n 'col1': col1,\n 'col2': col2},\n index=s.index, crs=s.crs)\n\n # Test GeoSeries.op(GeoSeries)\n result = getattr(left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoSeries)\n gdf_left = _make_gdf(left)\n result = getattr(gdf_left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if right_df:\n # Test GeoSeries.op(GeoDataFrame)\n gdf_right = _make_gdf(right)\n result = getattr(left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoDataFrame)\n result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n def _test_unary(self, op, expected, a, fcmp):\n # GeoSeries, (GeoSeries or geometry)\n result = getattr(a, op)\n fcmp(result, expected)\n\n # GeoDataFrame, (GeoSeries or geometry)\n gdf = self.gdf1.set_geometry(a)\n result = getattr(gdf, op)\n fcmp(result, expected)\n\n def test_crs_warning(self):\n # operations on geometries should warn for different CRS\n no_crs_g3 = self.g3.copy()\n no_crs_g3.crs = None\n with pytest.warns(UserWarning):\n self._test_binary_topological('intersection', self.g3,\n self.g3, no_crs_g3)\n\n def test_intersection(self):\n self._test_binary_topological('intersection', self.t1,\n self.g1, self.g2)\n self._test_binary_topological('intersection', self.empty_poly,\n self.g1, self.empty)\n\n def test_union_series(self):\n self._test_binary_topological('union', self.sq, self.g1, self.g2)\n\n def test_union_polygon(self):\n self._test_binary_topological('union', self.sq, self.g1, self.t2)\n\n def test_symmetric_difference_series(self):\n self._test_binary_topological('symmetric_difference', self.sq,\n self.g3, self.g4)\n\n def test_symmetric_difference_poly(self):\n expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)\n self._test_binary_topological('symmetric_difference', expected,\n self.g3, self.t1)\n\n def test_difference_series(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n self._test_binary_topological('difference', expected,\n self.g1, self.g2)\n\n def test_difference_poly(self):\n expected = GeoSeries([self.t1, self.t1])\n self._test_binary_topological('difference', expected,\n self.g1, self.t2)\n\n def test_geo_op_empty_result(self):\n l1 = LineString([(0, 0), (1, 1)])\n l2 = LineString([(2, 2), (3, 3)])\n expected = GeoSeries([GeometryCollection()])\n # binary geo resulting in empty geometry\n result = GeoSeries([l1]).intersection(l2)\n assert_geoseries_equal(result, expected)\n # binary geo empty result with right GeoSeries\n result = GeoSeries([l1]).intersection(GeoSeries([l2]))\n assert_geoseries_equal(result, expected)\n # unary geo resulting in emtpy geometry\n result = GeoSeries([GeometryCollection()]).convex_hull\n assert_geoseries_equal(result, expected)\n\n def test_boundary(self):\n l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])\n l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])\n expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)\n\n self._test_unary_topological('boundary', expected, self.g1)\n\n def test_area(self):\n expected = Series(np.array([0.5, 1.0]), index=self.g1.index)\n self._test_unary_real('area', expected, self.g1)\n\n expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)\n self._test_unary_real('area', expected, self.na_none)\n\n def test_bounds(self):\n # Set columns to get the order right\n expected = DataFrame({'minx': [0.0, 0.0], 'miny': [0.0, 0.0],\n 'maxx': [1.0, 1.0], 'maxy': [1.0, 1.0]},\n index=self.g1.index,\n columns=['minx', 'miny', 'maxx', 'maxy'])\n\n result = self.g1.bounds\n assert_frame_equal(expected, result)\n\n gdf = self.gdf1.set_geometry(self.g1)\n result = gdf.bounds\n assert_frame_equal(expected, result)\n\n def test_unary_union(self):\n p1 = self.t1\n p2 = Polygon([(2, 0), (3, 0), (3, 1)])\n expected = unary_union([p1, p2])\n g = GeoSeries([p1, p2])\n\n self._test_unary_topological('unary_union', expected, g)\n\n def test_contains(self):\n expected = [True, False, True, False, False, False]\n assert_array_dtype_equal(expected, self.g0.contains(self.t1))\n\n def test_length(self):\n expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)\n self._test_unary_real('length', expected, self.g1)\n\n expected = Series(\n np.array([2 + np.sqrt(2), np.nan]),\n index=self.na_none.index)\n self._test_unary_real('length', expected, self.na_none)\n\n def test_crosses(self):\n expected = [False, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.crosses(self.t1))\n\n expected = [False, True]\n assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))\n\n def test_disjoint(self):\n expected = [False, False, False, False, False, True]\n assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))\n\n def test_relate(self):\n expected = Series(['212101212',\n '212101212',\n '212FF1FF2',\n '2FFF1FFF2',\n 'FF2F112F2',\n 'FF0FFF212'],\n index=self.g0.index)\n assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))\n\n expected = Series(['FF0FFF212',\n None],\n index=self.g6.index)\n assert_array_dtype_equal(expected, self.g6.relate(self.na_none))\n\n def test_distance(self):\n expected = Series(np.array([np.sqrt((5 - 1)**2 + (5 - 1)**2), np.nan]),\n self.na_none.index)\n assert_array_dtype_equal(expected, self.na_none.distance(self.p0))\n\n expected = Series(np.array([np.sqrt(4**2 + 4**2), np.nan]),\n self.g6.index)\n assert_array_dtype_equal(expected, self.g6.distance(self.na_none))\n\n def test_intersects(self):\n expected = [True, True, True, True, True, False]\n assert_array_dtype_equal(expected, self.g0.intersects(self.t1))\n\n expected = [True, False]\n assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(expected, self.empty.intersects(self.t1))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(\n expected, self.empty.intersects(self.empty_poly))\n\n expected = [False] * 6\n assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))\n\n def test_overlaps(self):\n expected = [True, True, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))\n\n expected = [False, False]\n assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))\n\n def test_touches(self):\n expected = [False, True, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.touches(self.t1))\n\n def test_within(self):\n expected = [True, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.within(self.t1))\n\n expected = [True, True, True, True, True, False]\n assert_array_dtype_equal(expected, self.g0.within(self.sq))\n\n def test_is_valid(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_valid', expected, self.g1)\n\n def test_is_empty(self):\n expected = Series(np.array([False] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_empty', expected, self.g1)\n\n def test_is_ring(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_ring', expected, self.g1)\n\n def test_is_simple(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real('is_simple', expected, self.g1)\n\n def test_has_z(self):\n expected = Series([False, True], self.g_3d.index)\n self._test_unary_real('has_z', expected, self.g_3d)\n\n def test_xy_points(self):\n expected_x = [-73.9847, -74.0446]\n expected_y = [40.7484, 40.6893]\n\n assert_array_dtype_equal(expected_x, self.landmarks.geometry.x)\n assert_array_dtype_equal(expected_y, self.landmarks.geometry.y)\n\n def test_xy_polygons(self):\n # accessing x attribute in polygon geoseries should raise an error\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.x\n # and same for accessing y attribute in polygon geoseries\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.y\n\n def test_centroid(self):\n polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])\n point = Point(0, 0)\n polygons = GeoSeries([polygon for i in range(3)])\n points = GeoSeries([point for i in range(3)])\n assert_geoseries_equal(polygons.centroid, points)\n\n def test_convex_hull(self):\n # the convex hull of a square should be the same as the square\n squares = GeoSeries([self.sq for i in range(3)])\n assert_geoseries_equal(squares, squares.convex_hull)\n\n def test_exterior(self):\n exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])\n for expected, computed in zip(exp_exterior, self.g3.exterior):\n assert computed.equals(expected)\n\n def test_interiors(self):\n original = GeoSeries([self.t1, self.nested_squares])\n\n # This is a polygon with no interior.\n expected = []\n assert original.interiors[0] == expected\n # This is a polygon with an interior.\n expected = LinearRing(self.inner_sq.boundary)\n assert original.interiors[1][0].equals(expected)\n\n def test_interpolate(self):\n expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])\n self._test_binary_topological('interpolate', expected, self.g5,\n 0.75, normalized=True)\n\n expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)])\n self._test_binary_topological('interpolate', expected, self.g5,\n 1.5)\n\n def test_interpolate_distance_array(self):\n expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)])\n self._test_binary_topological('interpolate', expected, self.g5,\n np.array([0.75, 1.5]))\n\n expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)])\n self._test_binary_topological('interpolate', expected, self.g5,\n np.array([0.75, 1.5]), normalized=True)\n\n def test_interpolate_distance_wrong_length(self):\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_interpolate_distance_wrong_index(self):\n distances = Series([1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_project(self):\n expected = Series([2.0, 1.5], index=self.g5.index)\n p = Point(1.0, 0.5)\n self._test_binary_real('project', expected, self.g5, p)\n\n expected = Series([1.0, 0.5], index=self.g5.index)\n self._test_binary_real('project', expected, self.g5, p,\n normalized=True)\n\n def test_translate_tuple(self):\n trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y\n assert self.landmarks.translate(*trans)[0].equals(self.sol)\n\n res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]\n assert res.equals(self.sol)\n\n def test_rotate(self):\n angle = 98\n expected = self.g4\n\n o = Point(0, 0)\n res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)\n assert geom_almost_equals(self.g4, res)\n\n res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))\n assert geom_almost_equals(expected, res.rotate(-angle, origin=o))\n\n def test_scale(self):\n expected = self.g4\n\n scale = 2., 1.\n inv = tuple(1./i for i in scale)\n\n o = Point(0, 0)\n res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)\n res = res.scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_skew(self):\n expected = self.g4\n\n skew = 45.\n o = Point(0, 0)\n\n # Test xs\n res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)\n res = res.skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n # Test ys\n res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)\n res = res.skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_buffer(self):\n original = GeoSeries([Point(0, 0)])\n expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5),\n (5, 0)))])\n calculated = original.buffer(5, resolution=1)\n assert geom_almost_equals(expected, calculated)\n\n def test_buffer_args(self):\n args = dict(cap_style=3, join_style=2, mitre_limit=2.5)\n calculated_series = self.g0.buffer(10, **args)\n for original, calculated in zip(self.g0, calculated_series):\n expected = original.buffer(10, **args)\n assert calculated.equals(expected)\n\n def test_buffer_distance_array(self):\n original = GeoSeries([self.p0, self.p0])\n expected = GeoSeries(\n [Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))),\n Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))),\n ])\n calculated = original.buffer(np.array([1, 5]), resolution=1)\n assert_geoseries_equal(calculated, expected, check_less_precise=True)\n\n def test_buffer_distance_wrong_length(self):\n original = GeoSeries([self.p0, self.p0])\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_buffer_distance_wrong_index(self):\n original = GeoSeries([self.p0, self.p0], index=[0, 1])\n distances = Series(data=[1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_envelope(self):\n e = self.g3.envelope\n assert np.all(e.geom_equals(self.sq))\n assert isinstance(e, GeoSeries)\n assert self.g3.crs == e.crs\n\n def test_total_bounds(self):\n bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y\n assert isinstance(self.landmarks.total_bounds, np.ndarray)\n assert tuple(self.landmarks.total_bounds) == bbox\n\n df = GeoDataFrame({'geometry': self.landmarks,\n 'col1': range(len(self.landmarks))})\n assert tuple(df.total_bounds) == bbox\n\n def test_explode_geoseries(self):\n s = GeoSeries([MultiPoint([(0, 0), (1, 1)]),\n MultiPoint([(2, 2), (3, 3), (4, 4)])])\n s.index.name = 'test_index_name'\n expected_index_name = ['test_index_name', None]\n index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]\n expected = GeoSeries([Point(0, 0), Point(1, 1), Point(2, 2),\n Point(3, 3), Point(4, 4)],\n index=MultiIndex.from_tuples(\n index, names=expected_index_name))\n assert_geoseries_equal(expected, s.explode())\n\n @pytest.mark.parametrize(\"index_name\", [None, 'test'])\n def test_explode_geodataframe(self, index_name):\n s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])\n df = GeoDataFrame({'col': [1, 2], 'geometry': s})\n df.index.name = index_name\n\n test_df = df.explode()\n\n expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])\n expected_df = GeoDataFrame({'col': [1, 1, 2], 'geometry': expected_s})\n expected_index = MultiIndex([[0, 1], [0, 1]], # levels\n [[0, 0, 1], [0, 1, 0]], # labels/codes\n names=[index_name, None])\n expected_df = expected_df.set_index(expected_index)\n assert_frame_equal(test_df, expected_df)\n\n #\n # Test '&', '|', '^', and '-'\n # The left can only be a GeoSeries. The right hand side can be a\n # GeoSeries, GeoDataFrame or Shapely geometry\n #\n def test_intersection_operator(self):\n self._test_binary_operator('__and__', self.t1, self.g1, self.g2)\n\n def test_union_operator(self):\n self._test_binary_operator('__or__', self.sq, self.g1, self.g2)\n\n def test_union_operator_polygon(self):\n self._test_binary_operator('__or__', self.sq, self.g1, self.t2)\n\n def test_symmetric_difference_operator(self):\n self._test_binary_operator('__xor__', self.sq, self.g3, self.g4)\n\n def test_difference_series2(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n self._test_binary_operator('__sub__', expected, self.g1, self.g2)\n\n def test_difference_poly2(self):\n expected = GeoSeries([self.t1, self.t1])\n self._test_binary_operator('__sub__', expected, self.g1, self.t2)\n" ]
[ [ "pandas.MultiIndex", "numpy.sqrt", "pandas.Series", "pandas.DataFrame", "numpy.testing.assert_array_equal", "numpy.asanyarray", "pandas.MultiIndex.from_tuples", "numpy.array", "pandas.util.testing.assert_frame_equal" ] ]
terryjx/FunFact
[ "595c3b68bac7cc92c802969f207f060c1242a88b" ]
[ "funfact/legacy/rbf/_base.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom collections import namedtuple\nimport dill\nimport numpy as np\nimport pycuda.driver as cuda\nfrom funfact.cuda import context_manager, ManagedArray\n\n\nclass RBFExpansionBasePyCUDA:\n\n def __init__(self):\n context_manager.autoinit()\n\n @staticmethod\n def as_namedtuple(name, **kwargs):\n return namedtuple(name, list(kwargs.keys()))(*kwargs.values())\n\n @staticmethod\n def _as_cuda_array(arr, dtype=None, order=None):\n if (isinstance(arr, np.ndarray) and\n isinstance(arr.base, cuda.ManagedAllocation) and\n arr.dtype == dtype and\n ((order is None) or\n (order == 'C' and arr.flags.c_contiguous) or\n (order == 'F' and arr.flags.f_contiguous))):\n return arr\n else:\n return ManagedArray.copy(arr, dtype, order)\n\n @staticmethod\n def _zero_cuda_array(arr):\n assert isinstance(arr.base, cuda.ManagedAllocation)\n cuda.memset_d32(\n arr.base.get_device_pointer(),\n 0,\n arr.dtype.itemsize // 4 * np.prod(arr.shape).item()\n )\n\n def to_pickle(self, file):\n state = self.__dict__.copy()\n open(file, 'wb').write(dill.dumps(state))\n\n @classmethod\n def from_pickle(cls, file):\n fac = cls()\n for key, val in dill.loads(open(file, 'rb').read()).items():\n setattr(fac, key, val)\n return fac\n\n @property\n def config(self):\n return {\n key: self.__dict__[key] for key in self.__dict__\n if not key.startswith('_')\n }\n\n @property\n def report(self):\n return self._report\n\n @report.setter\n def report(self, report_dict):\n self._report = self.as_namedtuple('report', **report_dict)\n\n @property\n def optimum(self):\n return self._optimum\n\n class Model:\n '''An approximation of a dense matrix as a sum of RBF over distance\n matrices.\n '''\n\n def __init__(\n self, f, x, x_names=None\n ):\n for w in x:\n assert w.shape[-1] == x[0].shape[-1],\\\n \"Inconsisent component size.\"\n self.f = f\n self.x = x\n self.x_names = x_names\n\n def __repr__(self):\n xns = ', '.join(self.x_names)\n return f'<ensemble of {len(self)} RBF expansions [x_names = {xns}]>'\n\n def __len__(self):\n return len(self.x[-1])\n\n def __call__(\n self, runs=None, components=None, device=None\n ):\n x = self.x\n if components is not None:\n components = np.array(components)\n if components.ndim == 0:\n components = np.expand_dims(components, 0)\n x = [w[..., components, :] if w.ndim >= 2 else w for w in x]\n if runs is not None:\n x = [w[..., runs] for w in x]\n return self.f(*x)\n\n @property\n def funrank(self):\n return len(self.x[-2])\n\n def __getattr__(self, a):\n return self.x[self.x_names.index(a)]\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n" ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.prod" ] ]
houzeyu2683/IRRHW
[ "c44298ad14c468eff36bc75ebc63abdc9ba24d55" ]
[ "HW/2/script/download.py" ]
[ "\n\n##\n## The packages.\nfrom selenium import webdriver\nimport pandas, os, tqdm, time\n\n\n##\n## The arguments.\n# keyword = [\"Covid-19\", \"Stroke\", \"Myocardial Infarction\", \"influenza\", \"asthma\", \"chest cavity\"]\nkeyword = [\"chest cavity\"]\nfor k in keyword:\n\n platform = \"pubmed\"\n site = \"https://pubmed.ncbi.nlm.nih.gov/\"\n number = 100\n folder = \"resource/csv/{}\".format(k)\n os.makedirs(folder) if not os.path.isdir(folder) else None\n\n\n ##\n ##\n option = webdriver.chrome.options.Options()\n option.binary_location = \"/usr/bin/google-chrome\"\n # option.add_argument('--no-sandbox')\n driver = webdriver.Chrome(options=option, executable_path='driver/chrome')\n page = range(1, number+1, 1)\n group = {\n \"link\":[],\n \"title\":[],\n \"abstract\":[],\n \"tag\":[],\n \"author\":[]\n }\n for p in page:\n \n try:\n\n driver.get(\"{}?term={}&filter=simsearch1.fha&page={}\".format(site, k, p))\n group['link'] += [i.get_attribute(\"href\") for i in driver.find_elements_by_css_selector(\".docsum-title\")]\n pass\n\n except:\n\n continue\n\n pass\n\n link = pandas.DataFrame({\"link\":group['link']})\n link.to_csv(os.path.join(folder, \"link.csv\"), index=False)\n\n def remove(x, what=\"\"):\n\n output = []\n for i in x:\n\n if(i==what):\n\n continue\n\n else:\n\n output += [i]\n pass\n\n pass\n \n return(output)\n\n for l in tqdm.tqdm(group['link'], total=len(group['link'])):\n\n try:\n\n driver.get(l)\n pass\n\n except:\n\n group['title'] += [None]\n group['abstract'] += [None]\n group['tag'] += [None]\n group['author'] += [None] \n continue\n\n try:\n \n title = driver.find_element_by_css_selector(\".heading-title\").text\n pass\n\n except:\n\n title = None\n pass\n\n try:\n \n abstract = driver.find_element_by_css_selector(\"#enc-abstract p\").text\n pass\n\n except:\n\n abstract = None\n pass\n\n try:\n\n tag = driver.find_element_by_css_selector(\"#enc-abstract+ p\").text.split(\": \")[-1]\n pass\n\n except:\n\n tag = None\n pass\n\n try:\n \n author = \";\".join(remove([i.text for i in driver.find_elements_by_css_selector(\".full-name\")], what=''))\n pass\n\n except:\n\n author = None\n pass\n\n group['title'] += [title]\n group['abstract'] += [abstract]\n group['tag'] += [tag]\n group['author'] += [author]\n time.sleep(1)\n pass\n\n table = pandas.DataFrame(group)\n table.to_csv(os.path.join(folder, \"{}.csv\".format(k)), index=False)\n driver.close()\n pass\n\n\n## \n## Merge all table together.\npath, folder = 'resource/csv', ['asthma', 'Covid-19', \"influenza\", \"Myocardial Infarction\", 'Stroke', \"chest cavity\"]\ngroup = []\nfor f in folder:\n\n p = os.path.join(path, f, '{}.csv'.format(f))\n t = pandas.read_csv(p)\n t['keyword'] = f\n t = t.dropna(subset=['title'])\n group += [t]\n pass\n\ndata = pandas.concat(group).reset_index(drop=True)\ndata.to_csv(os.path.join(path, \"data.csv\"), index=False)\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.concat" ] ]
eraofelix/PV-RCNN
[ "6361ec99cc1c92120263ef56b2c2b003c2cd7264" ]
[ "pvrcnn/inference.py" ]
[ "import copy\nimport os\nimport os.path as osp\nimport numpy as np\nimport torch\nimport sys\nsys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nsys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')\nimport cv2\nfrom tqdm import tqdm\nimport time\nfrom pvrcnn.core import cfg, Preprocessor\nfrom pvrcnn.detector import PV_RCNN, Second\nfrom pvrcnn.ops import nms_rotated, box_iou_rotated\nfrom pvrcnn.core import cfg, AnchorGenerator\nfrom viz.gen_bev import gen_bev_map, draw_bev_box\n\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n\n\ndef to_device(item):\n for key in ['points', 'features', 'coordinates', 'occupancy']:\n item[key] = item[key].cuda()\n return item\n\ndef inference(out, anchors, cfg):\n cls_map, reg_map = out['P_cls'].squeeze(0), out['P_reg'].squeeze(0)\n score_map = cls_map.sigmoid()\n top_scores, class_idx = score_map.view(cfg.NUM_CLASSES, -1).max(0)\n top_scores, anchor_idx = top_scores.topk(k=cfg.PROPOSAL.TOPK)\n class_idx = class_idx[anchor_idx]\n # import pdb;pdb.set_trace()\n top_anchors = anchors.view(cfg.NUM_CLASSES, -1, cfg.BOX_DOF)[class_idx, anchor_idx]\n top_boxes = reg_map.reshape(cfg.NUM_CLASSES, -1, cfg.BOX_DOF)[class_idx, anchor_idx]\n\n P_xyz, P_wlh, P_yaw = top_boxes.split([3, 3, 1], dim=1)\n A_xyz, A_wlh, A_yaw = top_anchors.split([3, 3, 1], dim=1)\n\n A_wl, A_h = A_wlh.split([2, 1], -1)\n A_norm = A_wl.norm(dim=-1, keepdim=True).expand(-1, 2)\n A_norm = torch.cat((A_norm, A_h), dim=-1)\n\n top_boxes = torch.cat((\n (P_xyz * A_norm + A_xyz),\n (torch.exp(P_wlh) * A_wlh),\n (P_yaw + A_yaw)), dim=1\n )\n\n nms_idx = nms_rotated(top_boxes[:, [0, 1, 3, 4, 6]], top_scores, iou_threshold=0.1)\n top_boxes = top_boxes[nms_idx]\n top_scores = top_scores[nms_idx]\n top_classes = class_idx[nms_idx]\n return top_boxes, top_scores\n\n\nclass Inference():\n def __init__(self,):\n self.cfg = cfg\n self.cfg.merge_from_file('../configs/second/car.yaml')\n self.preprocessor = Preprocessor(cfg)\n self.anchors = AnchorGenerator(cfg).anchors.cuda()\n self.net = PV_RCNN(cfg).cuda().eval()\n # self.net = Second(cfg).cuda().eval()\n ckpt = torch.load('./ckpts/epoch_49.pth')\n self.net.load_state_dict(ckpt['state_dict'])\n pass\n\n def inference_bin_to_img(self, bin_path):\n pc = np.fromfile(bin_path, np.float32).reshape(-1, 4)\n item = dict(points=[pc])\n with torch.no_grad():\n item = to_device(self.preprocessor(item))\n out = self.net(item)\n top_boxes, top_scores= inference(out, self.anchors, self.cfg)\n rgb = draw_bev_box(pc, top_boxes.cpu().numpy())\n return rgb\n\n def inference_bins_to_video(self, bins_dir, vid_path):\n writer = cv2.VideoWriter(vid_path, cv2.VideoWriter_fourcc(*'MJPG'), 10, (2000,1000))\n bin_names = os.listdir(bins_dir)\n bin_names.sort()\n bin_paths = [os.path.join(bins_dir, p) for p in bin_names if '.bin' in p]\n for bin_path in tqdm(bin_paths[:200]):\n rgb = self.inference_bin_to_img(bin_path).astype(np.uint8)\n writer.write(rgb)\n\n\n\nif __name__ == '__main__':\n \n basedir = osp.join(cfg.DATA.ROOTDIR, 'velodyne_reduced/')\n bin_path = osp.join(basedir, '1544426448586.bin')\n bins_dir = '/home/kun.fan/mnt/output/lidar_baseline_20200228/20200227-154819_262'\n png_path = os.path.expanduser('~/mnt/output/1544426448586.png')\n vid_path = os.path.expanduser('~/mnt/output/test.avi')\n\n infer = Inference()\n rgb = infer.inference_bin_to_img(bin_path)\n cv2.imwrite(png_path, rgb)\n infer.inference_bins_to_video(bins_dir, vid_path)\n" ]
[ [ "numpy.fromfile", "torch.load", "torch.no_grad", "torch.exp", "torch.cat" ] ]
luizapozzobon/myo_project
[ "ce35149c444dee5a13dc7d1f1915046066ba47e2" ]
[ "primeiros_models/dense_features.py" ]
[ "from sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\ndef load_compilado(arquivo):\n path = '/home/luiza/UFSM/Myo/myo_project/datasets/oficial/' + arquivo\n df = pd.read_csv(path)\n return df\n\ndf = load_compilado('features_balanced.csv')\ndf_labels = df['Label']\ndf = df.drop(columns=['Label'])\n\nx_train, x_test, y_train, y_test = train_test_split(df.values, df_labels.values, test_size=0.3, random_state=0)\n\nprint('All Data:')\nprint(y_train)\nprint(y_test)\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.optimizers import Adam\n\ninput_shape = (x_train.shape[1])\nprint(input_shape, y_train.shape, x_train.shape)\n\noptimizer = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False)\n\nclassifier = Sequential()\nclassifier.add(Dense(32, input_dim=x_train.shape[1]))\nclassifier.add(Activation('relu'))\nclassifier.add(Dense(units = 64))\nclassifier.add(Activation('relu'))\nclassifier.add(Dense(units = 128))\nclassifier.add(Activation('relu'))\nclassifier.add(Dense(units = 1, activation=\"softmax\"))\n\nclassifier.summary()\n\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1])\n\nclassifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics=[\"accuracy\"])\n\nclassifier.fit(x_train, y_train, epochs = 10000, batch_size = 43, verbose=1)\n\n# Save\nclassifier.save(\"model_cross_splited_data.h5\")\nprint(\"Saved model to disk\")\n\n###############################################\n\nfrom tensorflow import keras\n\n# # Load Model\n# model = keras.models.load_model('model_cross_splited_data.h5')\n# model.summary()\n\ndef evaluateModel(prediction, y):\n good = 0\n for i in range(len(y)):\n if (prediction[i] == np.argmax(y[i])):\n good = good +1\n return (good/len(y)) * 100.0\n\n# result_test = classifier.predict_classes(X_test)\n# print(\"Correct classification rate on test data\")\n# print(evaluateModel(result_test, y_test))\n\nresult_train = classifier.predict_classes(x_train)\nprint(\"Correct classification rate on train data\")\nprint(evaluateModel(result_train, y_train))\n" ]
[ [ "tensorflow.keras.models.Sequential", "tensorflow.keras.optimizers.Adam", "pandas.read_csv", "tensorflow.keras.layers.Activation", "numpy.argmax", "tensorflow.keras.layers.Dense", "sklearn.model_selection.train_test_split" ] ]
alexborio/Projects
[ "a85ad4aab370b009de14e3696e06aad92ca4859f" ]
[ "VAE/vae.py" ]
[ "\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nencoder_layers = []\ndecoder_layers = []\n\nNormal = tf.contrib.distributions.Normal\nBernoulli = tf.contrib.distributions.Bernoulli\n\nclass Layer(object):\n def __init__(self, n, m, f=tf.nn.relu):\n self.W = tf.Variable(tf.random_normal(shape=(n, m))*2 / np.sqrt(n), dtype=tf.float32)\n self.W = tf.cast(self.W, dtype=tf.float32)\n self.c = tf.Variable(tf.zeros(m), dtype=tf.float32)\n self.c = tf.cast(self.c, dtype=tf.float32)\n self.f = f\n\n def forward(self, X):\n return self.f(tf.matmul(X, self.W) + self.c)\n\n\ndef KLDivergence(mu, sigma):\n KL1 = tf.log(sigma)\n KL2 = (1 + tf.pow((-mu), 2))/2/tf.pow(sigma, 2) - 0.5\n KL = KL1 + KL2\n return tf.reduce_sum(KL, axis=1)\n\n\nmnist = input_data.read_data_sets('../data/MNIST_data', one_hot=True)\nX = mnist.train.images\n\nn_input = 28*28\n\nhidden_layer_sizes = [200, 150, 100, 20, 2]\nX_in = tf.placeholder(dtype=tf.float32, shape=(None, n_input))\nZ = X_in\nn = n_input\n\nfor m in hidden_layer_sizes[:-1]:\n\n layer = Layer(n, m)\n Z = layer.forward(Z)\n encoder_layers.append(layer)\n n = m\n\n\nm_latent = hidden_layer_sizes[-1] * 2\n\nlayer = Layer(n, m_latent, lambda x: x)\n\nZ = layer.forward(Z)\nencoder_layers.append(layer)\n\nmu = Z[:, :(m_latent // 2)]\nsigma = tf.exp(Z[:, (m_latent // 2):])\n\nE = tf.placeholder(dtype=tf.float32, shape=(None, hidden_layer_sizes[-1]))\n\nn = m_latent // 2\n\nZ = E*sigma + mu\n\nfor m in reversed(hidden_layer_sizes[:-1]):\n layer = Layer(n, m)\n Z = layer.forward(Z)\n decoder_layers.append(layer)\n n = m\n\nlayer = Layer(n, n_input, lambda x: x)\nZ = layer.forward(Z)\ndecoder_layers.append(layer)\n\nkl = -tf.log(sigma) + 0.5 * (sigma ** 2 + mu ** 2) - 0.5\nkl = tf.reduce_sum(kl, axis=1)\n\n#kl = KLDivergence(mu, sigma)\n\nprobs = tf.contrib.distributions.Bernoulli(logits=Z).log_prob(X_in)\ncost = tf.reduce_sum(tf.reduce_sum(probs, 1) - kl)\ntrain_op = tf.train.RMSPropOptimizer(0.001).minimize(-cost)\nsess = tf.InteractiveSession()\ninit_op = tf.global_variables_initializer()\n\nsess.run(init_op)\n\nN = (X.shape)[0]\ncosts = []\nn_batch_sz = 100\nepochs = 50\nn_batches = N // n_batch_sz\nX = (X > 0.5).astype(np.float32)\nfor epoch in range(epochs):\n\n np.random.shuffle(X)\n\n for i in range(n_batches):\n\n dict1 = {X_in: X[i*n_batch_sz:(i + 1)*n_batch_sz, :]}\n dict2 = {E: np.reshape(np.random.randn(m_latent // 2), (1, m_latent // 2))}\n dict1.update(dict2)\n _, c = sess.run((train_op, cost), feed_dict=dict1)\n c /= n_batch_sz\n costs.append(c)\n print(c)\n\n\ndone = False\n\nZ_in = tf.placeholder(dtype=tf.float32, shape=(None, hidden_layer_sizes[-1]))\nZ_sim = Z_in\nfor layer in decoder_layers:\n Z_sim = layer.forward(Z_sim)\n\nZ_sim_out = tf.nn.sigmoid(Z_sim)\n\nwhile not done:\n feed = {Z_in: np.reshape(np.random.randn(m_latent // 2), (1, m_latent // 2))}\n X_sim = sess.run(Z_sim_out, feed_dict=feed)\n\n im_X_sim = X_sim.reshape(28, 28)\n plt.imshow(im_X_sim, cmap='gray')\n plt.show()\n\n ans = input(\"Generate another?\")\n if ans and ans[0] in ('n' or 'N'):\n done = True" ]
[ [ "tensorflow.contrib.distributions.Bernoulli", "tensorflow.matmul", "tensorflow.InteractiveSession", "matplotlib.pyplot.imshow", "tensorflow.random_normal", "tensorflow.reduce_sum", "tensorflow.global_variables_initializer", "tensorflow.train.RMSPropOptimizer", "tensorflow.pow", "tensorflow.cast", "tensorflow.nn.sigmoid", "tensorflow.placeholder", "numpy.random.shuffle", "tensorflow.zeros", "numpy.random.randn", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.exp", "matplotlib.pyplot.show", "numpy.sqrt", "tensorflow.log" ] ]
bhgedigital/probability
[ "df70fe68491f839df438628fa79cb3378888039e" ]
[ "tensorflow_probability/python/distributions/inverse_gamma.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The InverseGamma distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n\n__all__ = [\n \"InverseGamma\",\n \"InverseGammaWithSoftplusConcentrationRate\",\n]\n\n\nclass InverseGamma(distribution.Distribution):\n \"\"\"InverseGamma distribution.\n\n The `InverseGamma` distribution is defined over positive real numbers using\n parameters `concentration` (aka \"alpha\") and `scale` (aka \"beta\").\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z\n Z = Gamma(alpha) beta**-alpha\n ```\n\n where:\n\n * `concentration = alpha`,\n * `scale = beta`,\n * `Z` is the normalizing constant, and,\n * `Gamma` is the [gamma function](\n https://en.wikipedia.org/wiki/Gamma_function).\n\n The cumulative density function (cdf) is,\n\n ```none\n cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)\n ```\n\n where `GammaInc` is the [upper incomplete Gamma function](\n https://en.wikipedia.org/wiki/Incomplete_gamma_function).\n\n The parameters can be intuited via their relationship to mean and variance\n when these moments exist,\n\n ```none\n mean = beta / (alpha - 1) when alpha > 1\n variance = beta**2 / (alpha - 1)**2 / (alpha - 2) when alpha > 2\n ```\n\n i.e., under the same conditions:\n\n ```none\n alpha = mean**2 / variance + 2\n beta = mean * (mean**2 / variance + 1)\n ```\n\n Distribution parameters are automatically broadcast in all functions; see\n examples for details.\n\n Samples of this distribution are reparameterized (pathwise differentiable).\n The derivatives are computed using the approach described in the paper\n\n [Michael Figurnov, Shakir Mohamed, Andriy Mnih.\n Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n dist = tfd.InverseGamma(concentration=3.0, scale=2.0)\n dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], scale=[2.0, 3.0])\n ```\n\n Compute the gradients of samples w.r.t. the parameters:\n\n ```python\n tfd = tfp.distributions\n concentration = tf.constant(3.0)\n scale = tf.constant(2.0)\n dist = tfd.InverseGamma(concentration, scale)\n samples = dist.sample(5) # Shape [5]\n loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function\n # Unbiased stochastic gradients of the loss function\n grads = tf.gradients(loss, [concentration, scale])\n ```\n\n \"\"\"\n\n @deprecation.deprecated_args(\n \"2019-05-08\", \"The `rate` parameter is deprecated. Use `scale` instead.\"\n \"The `rate` parameter was always interpreted as a `scale` parameter, \"\n \"but erroneously misnamed.\", \"rate\")\n def __init__(self,\n concentration,\n scale=None,\n validate_args=False,\n allow_nan_stats=True,\n rate=None,\n name=\"InverseGamma\"):\n \"\"\"Construct InverseGamma with `concentration` and `scale` parameters.\n\n The parameters `concentration` and `scale` must be shaped in a way that\n supports broadcasting (e.g. `concentration + scale` is a valid operation).\n\n Args:\n concentration: Floating point tensor, the concentration params of the\n distribution(s). Must contain only positive values.\n scale: Floating point tensor, the scale params of the distribution(s).\n Must contain only positive values.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n rate: Deprecated (mis-named) alias for `scale`.\n name: Python `str` name prefixed to Ops created by this class.\n\n\n Raises:\n TypeError: if `concentration` and `scale` are different dtypes.\n \"\"\"\n if rate is not None:\n scale = rate\n parameters = dict(locals())\n with tf.compat.v1.name_scope(name, values=[concentration, scale]) as name:\n dtype = dtype_util.common_dtype([concentration, scale],\n preferred_dtype=tf.float32)\n concentration = tf.convert_to_tensor(\n value=concentration, name=\"concentration\", dtype=dtype)\n scale = tf.convert_to_tensor(value=scale, name=\"scale\", dtype=dtype)\n with tf.control_dependencies([\n tf.compat.v1.assert_positive(\n concentration, message=\"Concentration must be positive.\"),\n tf.compat.v1\n .assert_positive(scale, message=\"Scale must be positive.\"),\n ] if validate_args else []):\n self._concentration = tf.identity(concentration, name=\"concentration\")\n self._scale = tf.identity(scale, name=\"scale\")\n tf.debugging.assert_same_float_dtype([self._concentration, self._scale])\n\n super(InverseGamma, self).__init__(\n dtype=self._concentration.dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n parameters=parameters,\n graph_parents=[self._concentration, self._scale],\n name=name)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return dict(\n zip((\"concentration\", \"scale\"),\n ([tf.convert_to_tensor(value=sample_shape, dtype=tf.int32)] * 2)))\n\n def _params_event_ndims(self):\n return dict(concentration=0, rate=0, scale=0)\n\n @property\n def concentration(self):\n \"\"\"Concentration parameter.\"\"\"\n return self._concentration\n\n @property\n @deprecation.deprecated(\n \"2019-05-08\", \"The `rate` parameter is deprecated. Use `scale` instead.\"\n \"The `rate` parameter was always interpreted as a `scale`parameter, but \"\n \"erroneously misnamed.\")\n def rate(self):\n \"\"\"Scale parameter.\"\"\"\n return self._scale\n\n @property\n def scale(self):\n \"\"\"Scale parameter.\"\"\"\n return self._scale\n\n def _batch_shape_tensor(self):\n return tf.broadcast_dynamic_shape(\n tf.shape(input=self.concentration), tf.shape(input=self.scale))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(self.concentration.shape,\n self.scale.shape)\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: See `tf.random_gamma` docstring for sampling details and\n caveats.\"\"\")\n def _sample_n(self, n, seed=None):\n return 1. / tf.random.gamma(\n shape=[n],\n alpha=self.concentration,\n beta=self.scale,\n dtype=self.dtype,\n seed=seed)\n\n def _log_prob(self, x):\n return self._log_unnormalized_prob(x) - self._log_normalization()\n\n def _cdf(self, x):\n x = self._maybe_assert_valid_sample(x)\n # Note that igammac returns the upper regularized incomplete gamma\n # function Q(a, x), which is what we want for the CDF.\n return tf.math.igammac(self.concentration, self.scale / x)\n\n def _log_unnormalized_prob(self, x):\n x = self._maybe_assert_valid_sample(x)\n return -(1. + self.concentration) * tf.math.log(x) - self.scale / x\n\n def _log_normalization(self):\n return (tf.math.lgamma(self.concentration) -\n self.concentration * tf.math.log(self.scale))\n\n def _entropy(self):\n return (self.concentration + tf.math.log(self.scale) +\n tf.math.lgamma(self.concentration) -\n ((1. + self.concentration) * tf.math.digamma(self.concentration)))\n\n @distribution_util.AppendDocstring(\n \"\"\"The mean of an inverse gamma distribution is\n `scale / (concentration - 1)`, when `concentration > 1`, and `NaN`\n otherwise. If `self.allow_nan_stats` is `False`, an exception will be\n raised rather than returning `NaN`\"\"\")\n def _mean(self):\n mean = self.scale / (self.concentration - 1.)\n if self.allow_nan_stats:\n nan = tf.fill(\n self.batch_shape_tensor(),\n np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),\n name=\"nan\")\n return tf.where(self.concentration > 1., mean, nan)\n else:\n return distribution_util.with_dependencies([\n tf.compat.v1.assert_less(\n tf.ones([], self.dtype),\n self.concentration,\n message=\"mean undefined when any concentration <= 1\"),\n ], mean)\n\n @distribution_util.AppendDocstring(\n \"\"\"Variance for inverse gamma is defined only for `concentration > 2`. If\n `self.allow_nan_stats` is `False`, an exception will be raised rather\n than returning `NaN`.\"\"\")\n def _variance(self):\n var = (\n tf.square(self.scale) / tf.square(self.concentration - 1.) /\n (self.concentration - 2.))\n if self.allow_nan_stats:\n nan = tf.fill(\n self.batch_shape_tensor(),\n np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),\n name=\"nan\")\n return tf.where(self.concentration > 2., var, nan)\n else:\n return distribution_util.with_dependencies([\n tf.compat.v1.assert_less(\n tf.constant(2., dtype=self.dtype),\n self.concentration,\n message=\"variance undefined when any concentration <= 2\"),\n ], var)\n\n @distribution_util.AppendDocstring(\n \"\"\"The mode of an inverse gamma distribution is `scale / (concentration +\n 1)`.\"\"\")\n def _mode(self):\n return self.scale / (1. + self.concentration)\n\n def _maybe_assert_valid_sample(self, x):\n tf.debugging.assert_same_float_dtype(tensors=[x], dtype=self.dtype)\n if not self.validate_args:\n return x\n return distribution_util.with_dependencies([\n tf.compat.v1.assert_positive(x),\n ], x)\n\n\nclass _InverseGammaWithSoftplusConcentrationScale(InverseGamma):\n \"\"\"`InverseGamma` with softplus of `concentration` and `scale`.\"\"\"\n\n @deprecation.deprecated_args(\n \"2019-05-08\", \"The `rate` parameter is deprecated. Use `scale` instead.\"\n \"The `rate` parameter was always interpreted as a `scale`parameter, but \"\n \"erroneously misnamed.\", \"rate\")\n def __init__(self,\n concentration,\n scale=None,\n validate_args=False,\n allow_nan_stats=True,\n rate=None,\n name=\"InverseGammaWithSoftplusConcentrationScale\"):\n if rate is not None:\n scale = rate\n parameters = dict(locals())\n with tf.compat.v1.name_scope(name, values=[concentration, scale]) as name:\n dtype = dtype_util.common_dtype([concentration, scale])\n concentration = tf.convert_to_tensor(\n value=concentration, name=\"softplus_concentration\", dtype=dtype)\n scale = tf.convert_to_tensor(\n value=scale, name=\"softplus_scale\", dtype=dtype)\n super(_InverseGammaWithSoftplusConcentrationScale, self).__init__(\n concentration=tf.nn.softplus(\n concentration, name=\"softplus_concentration\"),\n scale=tf.nn.softplus(scale, name=\"softplus_scale\"),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=name)\n self._parameters = parameters\n\n\n_rate_deprecator = deprecation.deprecated(\n \"2019-06-05\",\n \"InverseGammaWithSoftplusConcentrationRate is deprecated, use \"\n \"InverseGamma(concentration=tf.nn.softplus(concentration), \"\n \"scale=tf.nn.softplus(scale)) instead.\",\n warn_once=True)\n# pylint: disable=invalid-name\nInverseGammaWithSoftplusConcentrationRate = _rate_deprecator(\n _InverseGammaWithSoftplusConcentrationScale)\n\n_scale_deprecator = deprecation.deprecated(\n \"2019-06-05\",\n \"InverseGammaWithSoftplusConcentrationScale is deprecated, use \"\n \"InverseGamma(concentration=tf.nn.softplus(concentration), \"\n \"scale=tf.nn.softplus(scale)) instead.\",\n warn_once=True)\nInverseGammaWithSoftplusConcentrationScale = _scale_deprecator(\n _InverseGammaWithSoftplusConcentrationScale)\n" ]
[ [ "tensorflow.compat.v1.assert_positive", "tensorflow.python.util.deprecation.deprecated", "tensorflow.ones", "tensorflow.convert_to_tensor", "tensorflow.identity", "tensorflow.math.log", "tensorflow.random.gamma", "tensorflow.constant", "tensorflow.nn.softplus", "tensorflow.shape", "tensorflow.math.igammac", "tensorflow.TensorShape", "tensorflow.broadcast_static_shape", "tensorflow.math.lgamma", "tensorflow.debugging.assert_same_float_dtype", "tensorflow.where", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.square", "tensorflow.math.digamma", "tensorflow.compat.v1.name_scope" ] ]
brynhayder/metapop
[ "2a5f25a904cba7133c398c9ce7fff6ad7a5d8705" ]
[ "src/plot.py" ]
[ "from argparse import ArgumentParser\nimport os\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nfilenames = \"susceptible.csv exposed.csv infected.csv recovered.csv\".split()\n\n\ndef title(ax, region):\n return ax.set_title(region, x=0.95, y=0.9, ha=\"right\", va=\"top\")\n\n\ndef legend(fig, ax):\n lins, labs = ax.get_legend_handles_labels()\n return fig.legend(\n lins, labs, ncol=len(labs), bbox_to_anchor=(0.5, 0.05), loc=\"center\"\n )\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Plot Results\")\n parser.add_argument(\n \"folder\",\n type=str,\n help=(\n \"Path to csv file to plot.\"\n \" Must contain files susceptible.csv, exposed.csv,\"\n \" infected.csv and recovered.csv.\"\n \" These files should be of csv type, comma delimited\"\n \" and with the same number of columns (the regions).\"\n \" The first row will be read as region names.\"\n \" We will assume that there is no index column.\"\n ),\n )\n parser.add_argument(\n \"--rt\",\n type=str,\n help=(\n \"Path to rt csv used for simulation.\"\n \" If given we will plot the R_t timeseries.\"\n ),\n default=None,\n )\n args = parser.parse_args()\n\n outputs = pd.concat(\n {\n k.replace(\".csv\", \"\"): pd.read_csv(\n os.path.join(args.folder, k), header=None\n )\n for k in filenames\n },\n axis=1,\n ).swaplevel(axis=1)\n\n regions = outputs.columns.levels[0]\n\n if args.rt is not None:\n rt = pd.read_csv(os.path.join(args.rt), header=None)\n npop = outputs.groupby(level=0, axis=1).sum()\n rts = rt * outputs.swaplevel(axis=1)[\"susceptible\"] / npop\n xaxis = outputs.index\n fig, axarr = plt.subplots(len(regions), 1, sharex=True, squeeze=False)\n for ax, region in zip(axarr.flat, regions):\n ax.plot(xaxis, rts[region], label=\"R_t\", zorder=100)\n ax.plot(xaxis[:-1], rt[region], label=\"R_0\", alpha=0.5)\n ax.axhline(1, ls=\"--\", alpha=0.5, label=\"R_t=1\", color=\"k\")\n ax.set_ylabel(\"Reproduction\")\n ax.set_xlabel(\"Days\")\n ax.grid(alpha=0.25)\n title(ax, region)\n legend(fig, ax)\n plt.tight_layout()\n plt.subplots_adjust(hspace=0.1)\n\n fig, axarr = plt.subplots(len(regions), 1, sharex=True, sharey=False, squeeze=False)\n for ax, region in zip(axarr.flat, regions):\n title(ax, region)\n outputs[region]['infected'].plot(ax=ax, legend=False)\n ax.set_ylabel(\"Population\")\n ax.grid(alpha=0.2)\n ax.set_xlabel(\"Timesteps\")\n legend(fig, ax)\n plt.subplots_adjust(hspace=0.05)\n\n plt.show()\n" ]
[ [ "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "matplotlib.pyplot.tight_layout" ] ]
dequadras/pandas
[ "8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa", "8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa" ]
[ "pandas/tests/io/generate_legacy_storage_files.py", "pandas/tests/indexing/test_floats.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"\nself-contained to write legacy storage pickle files\n\nTo use this script. Create an environment where you want\ngenerate pickles, say its for 0.20.3, with your pandas clone\nin ~/pandas\n\n. activate pandas_0.20.3\ncd ~/\n\n$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \\\n pandas/pandas/tests/io/data/legacy_pickle/0.20.3/ pickle\n\nThis script generates a storage file for the current arch, system,\nand python version\n pandas version: 0.20.3\n output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/\n storage format: pickle\ncreated pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle\n\nThe idea here is you are using the *current* version of the\ngenerate_legacy_storage_files with an *older* version of pandas to\ngenerate a pickle file. We will then check this file into a current\nbranch, and test using test_pickle.py. This will load the *older*\npickles and test versus the current data that is generated\n(with master). These are then compared.\n\nIf we have cases where we changed the signature (e.g. we renamed\noffset -> freq in Timestamp). Then we have to conditionally execute\nin the generate_legacy_storage_files.py to make it\nrun under the older AND the newer version.\n\n\"\"\"\n\nfrom datetime import timedelta\nfrom distutils.version import LooseVersion\nimport os\nimport pickle\nimport platform as pl\nimport sys\n\nimport numpy as np\n\nimport pandas\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n MultiIndex,\n NaT,\n Period,\n RangeIndex,\n Series,\n Timestamp,\n bdate_range,\n date_range,\n period_range,\n timedelta_range,\n)\n\nfrom pandas.tseries.offsets import (\n FY5253,\n BusinessDay,\n BusinessHour,\n CustomBusinessDay,\n DateOffset,\n Day,\n Easter,\n Hour,\n LastWeekOfMonth,\n Minute,\n MonthBegin,\n MonthEnd,\n QuarterBegin,\n QuarterEnd,\n SemiMonthBegin,\n SemiMonthEnd,\n Week,\n WeekOfMonth,\n YearBegin,\n YearEnd,\n)\n\ntry:\n # TODO: remove try/except when 0.24.0 is the legacy version.\n from pandas.arrays import SparseArray\nexcept ImportError:\n from pandas.core.sparse.api import SparseArray\n\n\n_loose_version = LooseVersion(pandas.__version__)\n\n\ndef _create_sp_series():\n nan = np.nan\n\n # nan-based\n arr = np.arange(15, dtype=np.float64)\n arr[7:12] = nan\n arr[-1:] = nan\n\n bseries = Series(SparseArray(arr, kind=\"block\"))\n bseries.name = \"bseries\"\n return bseries\n\n\ndef _create_sp_tsseries():\n nan = np.nan\n\n # nan-based\n arr = np.arange(15, dtype=np.float64)\n arr[7:12] = nan\n arr[-1:] = nan\n\n date_index = bdate_range(\"1/1/2011\", periods=len(arr))\n bseries = Series(SparseArray(arr, kind=\"block\"), index=date_index)\n bseries.name = \"btsseries\"\n return bseries\n\n\ndef _create_sp_frame():\n nan = np.nan\n\n data = {\n \"A\": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],\n \"B\": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],\n \"C\": np.arange(10).astype(np.int64),\n \"D\": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],\n }\n\n dates = bdate_range(\"1/1/2011\", periods=10)\n return DataFrame(data, index=dates).apply(SparseArray)\n\n\ndef create_data():\n \"\"\" create the pickle data \"\"\"\n data = {\n \"A\": [0.0, 1.0, 2.0, 3.0, np.nan],\n \"B\": [0, 1, 0, 1, 0],\n \"C\": [\"foo1\", \"foo2\", \"foo3\", \"foo4\", \"foo5\"],\n \"D\": date_range(\"1/1/2009\", periods=5),\n \"E\": [0.0, 1, Timestamp(\"20100101\"), \"foo\", 2.0],\n }\n\n scalars = dict(timestamp=Timestamp(\"20130101\"), period=Period(\"2012\", \"M\"))\n\n index = dict(\n int=Index(np.arange(10)),\n date=date_range(\"20130101\", periods=10),\n period=period_range(\"2013-01-01\", freq=\"M\", periods=10),\n float=Index(np.arange(10, dtype=np.float64)),\n uint=Index(np.arange(10, dtype=np.uint64)),\n timedelta=timedelta_range(\"00:00:00\", freq=\"30T\", periods=10),\n )\n\n index[\"range\"] = RangeIndex(10)\n\n if _loose_version >= LooseVersion(\"0.21\"):\n from pandas import interval_range\n\n index[\"interval\"] = interval_range(0, periods=10)\n\n mi = dict(\n reg2=MultiIndex.from_tuples(\n tuple(\n zip(\n *[\n [\"bar\", \"bar\", \"baz\", \"baz\", \"foo\", \"foo\", \"qux\", \"qux\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n )\n ),\n names=[\"first\", \"second\"],\n )\n )\n\n series = dict(\n float=Series(data[\"A\"]),\n int=Series(data[\"B\"]),\n mixed=Series(data[\"E\"]),\n ts=Series(\n np.arange(10).astype(np.int64), index=date_range(\"20130101\", periods=10)\n ),\n mi=Series(\n np.arange(5).astype(np.float64),\n index=MultiIndex.from_tuples(\n tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=[\"one\", \"two\"]\n ),\n ),\n dup=Series(np.arange(5).astype(np.float64), index=[\"A\", \"B\", \"C\", \"D\", \"A\"]),\n cat=Series(Categorical([\"foo\", \"bar\", \"baz\"])),\n dt=Series(date_range(\"20130101\", periods=5)),\n dt_tz=Series(date_range(\"20130101\", periods=5, tz=\"US/Eastern\")),\n period=Series([Period(\"2000Q1\")] * 5),\n )\n\n mixed_dup_df = DataFrame(data)\n mixed_dup_df.columns = list(\"ABCDA\")\n frame = dict(\n float=DataFrame({\"A\": series[\"float\"], \"B\": series[\"float\"] + 1}),\n int=DataFrame({\"A\": series[\"int\"], \"B\": series[\"int\"] + 1}),\n mixed=DataFrame({k: data[k] for k in [\"A\", \"B\", \"C\", \"D\"]}),\n mi=DataFrame(\n {\"A\": np.arange(5).astype(np.float64), \"B\": np.arange(5).astype(np.int64)},\n index=MultiIndex.from_tuples(\n tuple(\n zip(\n *[\n [\"bar\", \"bar\", \"baz\", \"baz\", \"baz\"],\n [\"one\", \"two\", \"one\", \"two\", \"three\"],\n ]\n )\n ),\n names=[\"first\", \"second\"],\n ),\n ),\n dup=DataFrame(\n np.arange(15).reshape(5, 3).astype(np.float64), columns=[\"A\", \"B\", \"A\"]\n ),\n cat_onecol=DataFrame({\"A\": Categorical([\"foo\", \"bar\"])}),\n cat_and_float=DataFrame(\n {\n \"A\": Categorical([\"foo\", \"bar\", \"baz\"]),\n \"B\": np.arange(3).astype(np.int64),\n }\n ),\n mixed_dup=mixed_dup_df,\n dt_mixed_tzs=DataFrame(\n {\n \"A\": Timestamp(\"20130102\", tz=\"US/Eastern\"),\n \"B\": Timestamp(\"20130603\", tz=\"CET\"),\n },\n index=range(5),\n ),\n dt_mixed2_tzs=DataFrame(\n {\n \"A\": Timestamp(\"20130102\", tz=\"US/Eastern\"),\n \"B\": Timestamp(\"20130603\", tz=\"CET\"),\n \"C\": Timestamp(\"20130603\", tz=\"UTC\"),\n },\n index=range(5),\n ),\n )\n\n cat = dict(\n int8=Categorical(list(\"abcdefg\")),\n int16=Categorical(np.arange(1000)),\n int32=Categorical(np.arange(10000)),\n )\n\n timestamp = dict(\n normal=Timestamp(\"2011-01-01\"),\n nat=NaT,\n tz=Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n )\n\n timestamp[\"freq\"] = Timestamp(\"2011-01-01\", freq=\"D\")\n timestamp[\"both\"] = Timestamp(\"2011-01-01\", tz=\"Asia/Tokyo\", freq=\"M\")\n\n off = {\n \"DateOffset\": DateOffset(years=1),\n \"DateOffset_h_ns\": DateOffset(hour=6, nanoseconds=5824),\n \"BusinessDay\": BusinessDay(offset=timedelta(seconds=9)),\n \"BusinessHour\": BusinessHour(normalize=True, n=6, end=\"15:14\"),\n \"CustomBusinessDay\": CustomBusinessDay(weekmask=\"Mon Fri\"),\n \"SemiMonthBegin\": SemiMonthBegin(day_of_month=9),\n \"SemiMonthEnd\": SemiMonthEnd(day_of_month=24),\n \"MonthBegin\": MonthBegin(1),\n \"MonthEnd\": MonthEnd(1),\n \"QuarterBegin\": QuarterBegin(1),\n \"QuarterEnd\": QuarterEnd(1),\n \"Day\": Day(1),\n \"YearBegin\": YearBegin(1),\n \"YearEnd\": YearEnd(1),\n \"Week\": Week(1),\n \"Week_Tues\": Week(2, normalize=False, weekday=1),\n \"WeekOfMonth\": WeekOfMonth(week=3, weekday=4),\n \"LastWeekOfMonth\": LastWeekOfMonth(n=1, weekday=3),\n \"FY5253\": FY5253(n=2, weekday=6, startingMonth=7, variation=\"last\"),\n \"Easter\": Easter(),\n \"Hour\": Hour(1),\n \"Minute\": Minute(1),\n }\n\n return dict(\n series=series,\n frame=frame,\n index=index,\n scalars=scalars,\n mi=mi,\n sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()),\n sp_frame=dict(float=_create_sp_frame()),\n cat=cat,\n timestamp=timestamp,\n offsets=off,\n )\n\n\ndef create_pickle_data():\n data = create_data()\n\n return data\n\n\ndef platform_name():\n return \"_\".join(\n [\n str(pandas.__version__),\n str(pl.machine()),\n str(pl.system().lower()),\n str(pl.python_version()),\n ]\n )\n\n\ndef write_legacy_pickles(output_dir):\n\n version = pandas.__version__\n\n print(\n \"This script generates a storage file for the current arch, system, \"\n \"and python version\"\n )\n print(\" pandas version: {0}\".format(version))\n print(\" output dir : {0}\".format(output_dir))\n print(\" storage format: pickle\")\n\n pth = \"{0}.pickle\".format(platform_name())\n\n fh = open(os.path.join(output_dir, pth), \"wb\")\n pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)\n fh.close()\n\n print(\"created pickle file: {pth}\".format(pth=pth))\n\n\ndef write_legacy_file():\n # force our cwd to be the first searched\n sys.path.insert(0, \".\")\n\n if not (3 <= len(sys.argv) <= 4):\n exit(\n \"Specify output directory and storage type: generate_legacy_\"\n \"storage_files.py <output_dir> <storage_type> \"\n )\n\n output_dir = str(sys.argv[1])\n storage_type = str(sys.argv[2])\n\n if storage_type == \"pickle\":\n write_legacy_pickles(output_dir=output_dir)\n else:\n exit(\"storage_type must be one of {'pickle'}\")\n\n\nif __name__ == \"__main__\":\n write_legacy_file()\n", "import numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series\nimport pandas._testing as tm\n\n\nclass TestFloatIndexers:\n def check(self, result, original, indexer, getitem):\n \"\"\"\n comparator for results\n we need to take care if we are indexing on a\n Series or a frame\n \"\"\"\n if isinstance(original, Series):\n expected = original.iloc[indexer]\n else:\n if getitem:\n expected = original.iloc[:, indexer]\n else:\n expected = original.iloc[indexer]\n\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"index_func\",\n [\n tm.makeStringIndex,\n tm.makeUnicodeIndex,\n tm.makeCategoricalIndex,\n tm.makeDateIndex,\n tm.makeTimedeltaIndex,\n tm.makePeriodIndex,\n tm.makeIntIndex,\n tm.makeRangeIndex,\n ],\n )\n def test_scalar_error(self, index_func):\n\n # GH 4892\n # float_indexers should raise exceptions\n # on appropriate Index types & accessors\n # this duplicates the code below\n # but is specifically testing for the error\n # message\n\n i = index_func(5)\n\n s = Series(np.arange(len(i)), index=i)\n\n msg = \"Cannot index by location index\"\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0]\n\n msg = (\n \"cannot do positional indexing on {klass} with these \"\n r\"indexers \\[3\\.0\\] of type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0] = 0\n\n @pytest.mark.parametrize(\n \"index_func\",\n [\n tm.makeStringIndex,\n tm.makeUnicodeIndex,\n tm.makeCategoricalIndex,\n tm.makeDateIndex,\n tm.makeTimedeltaIndex,\n tm.makePeriodIndex,\n ],\n )\n def test_scalar_non_numeric(self, index_func):\n\n # GH 4892\n # float_indexers should raise exceptions\n # on appropriate Index types & accessors\n\n i = index_func(5)\n\n for s in [\n Series(np.arange(len(i)), index=i),\n DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),\n ]:\n\n # getting\n for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:\n\n # gettitem on a DataFrame is a KeyError as it is indexing\n # via labels on the columns\n if getitem and isinstance(s, DataFrame):\n error = KeyError\n msg = r\"^3(\\.0)?$\"\n else:\n error = TypeError\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float|\"\n \"Cannot index by location index with a \"\n \"non-integer key\".format(klass=type(i).__name__)\n )\n with pytest.raises(error, match=msg):\n idxr(s)[3.0]\n\n # label based can be a TypeError or KeyError\n if s.index.inferred_type in {\n \"categorical\",\n \"string\",\n \"unicode\",\n \"mixed\",\n }:\n error = KeyError\n msg = r\"^3\\.0$\"\n else:\n error = TypeError\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(error, match=msg):\n s.loc[3.0]\n\n # contains\n assert 3.0 not in s\n\n # setting with a float fails with iloc\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0] = 0\n\n # setting with an indexer\n if s.index.inferred_type in [\"categorical\"]:\n # Value or Type Error\n pass\n elif s.index.inferred_type in [\"datetime64\", \"timedelta64\", \"period\"]:\n\n # these should prob work\n # and are inconsistent between series/dataframe ATM\n # for idxr in [lambda x: x]:\n # s2 = s.copy()\n #\n # with pytest.raises(TypeError):\n # idxr(s2)[3.0] = 0\n pass\n\n else:\n\n s2 = s.copy()\n s2.loc[3.0] = 10\n assert s2.index.is_object()\n\n for idxr in [lambda x: x]:\n s2 = s.copy()\n idxr(s2)[3.0] = 0\n assert s2.index.is_object()\n\n # fallsback to position selection, series only\n s = Series(np.arange(len(i)), index=i)\n s[3]\n msg = (\n r\"cannot do (label|positional) indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=type(i).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[3.0]\n\n def test_scalar_with_mixed(self):\n\n s2 = Series([1, 2, 3], index=[\"a\", \"b\", \"c\"])\n s3 = Series([1, 2, 3], index=[\"a\", \"b\", 1.5])\n\n # lookup in a pure stringstr\n # with an invalid indexer\n for idxr in [lambda x: x, lambda x: x.iloc]:\n\n msg = (\n r\"cannot do label indexing \"\n r\"on {klass} with these indexers \\[1\\.0\\] of \"\n r\"type float|\"\n \"Cannot index by location index with a non-integer key\".format(\n klass=Index.__name__\n )\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s2)[1.0]\n\n with pytest.raises(KeyError, match=r\"^1\\.0$\"):\n s2.loc[1.0]\n\n result = s2.loc[\"b\"]\n expected = 2\n assert result == expected\n\n # mixed index so we have label\n # indexing\n for idxr in [lambda x: x]:\n\n msg = (\n r\"cannot do label indexing \"\n r\"on {klass} with these indexers \\[1\\.0\\] of \"\n r\"type float\".format(klass=Index.__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s3)[1.0]\n\n result = idxr(s3)[1]\n expected = 2\n assert result == expected\n\n msg = \"Cannot index by location index with a non-integer key\"\n with pytest.raises(TypeError, match=msg):\n s3.iloc[1.0]\n with pytest.raises(KeyError, match=r\"^1\\.0$\"):\n s3.loc[1.0]\n\n result = s3.loc[1.5]\n expected = 3\n assert result == expected\n\n @pytest.mark.parametrize(\n \"index_func\", [tm.makeIntIndex, tm.makeRangeIndex],\n )\n @pytest.mark.parametrize(\"klass\", [Series, DataFrame])\n def test_scalar_integer(self, index_func, klass):\n\n # test how scalar float indexers work on int indexes\n\n # integer index\n i = index_func(5)\n\n if klass is Series:\n obj = Series(np.arange(len(i)))\n else:\n obj = DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i)\n\n # coerce to equal int\n for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:\n\n result = idxr(obj)[3.0]\n self.check(result, obj, 3, getitem)\n\n # coerce to equal int\n for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:\n\n if isinstance(obj, Series):\n\n def compare(x, y):\n assert x == y\n\n expected = 100\n else:\n compare = tm.assert_series_equal\n if getitem:\n expected = Series(100, index=range(len(obj)), name=3)\n else:\n expected = Series(100.0, index=range(len(obj)), name=3)\n\n s2 = obj.copy()\n idxr(s2)[3.0] = 100\n\n result = idxr(s2)[3.0]\n compare(result, expected)\n\n result = idxr(s2)[3]\n compare(result, expected)\n\n # contains\n # coerce to equal int\n assert 3.0 in obj\n\n def test_scalar_float(self):\n\n # scalar float indexers work on a float index\n index = Index(np.arange(5.0))\n for s in [\n Series(np.arange(len(index)), index=index),\n DataFrame(\n np.random.randn(len(index), len(index)), index=index, columns=index\n ),\n ]:\n\n # assert all operations except for iloc are ok\n indexer = index[3]\n for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:\n\n # getting\n result = idxr(s)[indexer]\n self.check(result, s, 3, getitem)\n\n # setting\n s2 = s.copy()\n\n result = idxr(s2)[indexer]\n self.check(result, s, 3, getitem)\n\n # random integer is a KeyError\n with pytest.raises(KeyError, match=r\"^3\\.5$\"):\n idxr(s)[3.5]\n\n # contains\n assert 3.0 in s\n\n # iloc succeeds with an integer\n expected = s.iloc[3]\n s2 = s.copy()\n\n s2.iloc[3] = expected\n result = s2.iloc[3]\n self.check(result, s, 3, False)\n\n # iloc raises with a float\n msg = \"Cannot index by location index with a non-integer key\"\n with pytest.raises(TypeError, match=msg):\n s.iloc[3.0]\n\n msg = (\n r\"cannot do positional indexing \"\n r\"on {klass} with these indexers \\[3\\.0\\] of \"\n r\"type float\".format(klass=Float64Index.__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s2.iloc[3.0] = 0\n\n @pytest.mark.parametrize(\n \"index_func\",\n [\n tm.makeStringIndex,\n tm.makeUnicodeIndex,\n tm.makeDateIndex,\n tm.makeTimedeltaIndex,\n tm.makePeriodIndex,\n ],\n )\n def test_slice_non_numeric(self, index_func):\n\n # GH 4892\n # float_indexers should raise exceptions\n # on appropriate Index types & accessors\n\n index = index_func(5)\n for s in [\n Series(range(5), index=index),\n DataFrame(np.random.randn(5, 2), index=index),\n ]:\n\n # getitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n msg = (\n \"cannot do positional indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[l]\n\n for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:\n\n msg = (\n \"cannot do (slice|positional) indexing \"\n r\"on {klass} with these indexers \"\n r\"\\[(3|4)(\\.0)?\\] \"\n r\"of type (float|int)\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s)[l]\n\n # setitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n msg = (\n \"cannot do positional indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s.iloc[l] = 0\n\n for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:\n msg = (\n \"cannot do (slice|positional) indexing \"\n r\"on {klass} with these indexers \"\n r\"\\[(3|4)(\\.0)?\\] \"\n r\"of type (float|int)\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s)[l] = 0\n\n def test_slice_integer(self):\n\n # same as above, but for Integer based indexes\n # these coerce to a like integer\n # oob indicates if we are out of bounds\n # of positional indexing\n for index, oob in [\n (Int64Index(range(5)), False),\n (RangeIndex(5), False),\n (Int64Index(range(5)) + 10, True),\n ]:\n\n # s is an in-range index\n s = Series(range(5), index=index)\n\n # getitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n for idxr in [lambda x: x.loc]:\n\n result = idxr(s)[l]\n\n # these are all label indexing\n # except getitem which is positional\n # empty\n if oob:\n indexer = slice(0, 0)\n else:\n indexer = slice(3, 5)\n self.check(result, s, indexer, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # getitem out-of-bounds\n for l in [slice(-6, 6), slice(-6.0, 6.0)]:\n\n for idxr in [lambda x: x.loc]:\n result = idxr(s)[l]\n\n # these are all label indexing\n # except getitem which is positional\n # empty\n if oob:\n indexer = slice(0, 0)\n else:\n indexer = slice(-6, 6)\n self.check(result, s, indexer, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[-6\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[slice(-6.0, 6.0)]\n\n # getitem odd floats\n for l, res1 in [\n (slice(2.5, 4), slice(3, 5)),\n (slice(2, 3.5), slice(2, 4)),\n (slice(2.5, 3.5), slice(3, 4)),\n ]:\n\n for idxr in [lambda x: x.loc]:\n\n result = idxr(s)[l]\n if oob:\n res = slice(0, 0)\n else:\n res = res1\n\n self.check(result, s, res, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(2|3)\\.5\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # setitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n for idxr in [lambda x: x.loc]:\n sc = s.copy()\n idxr(sc)[l] = 0\n result = idxr(sc)[l].values.ravel()\n assert (result == 0).all()\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l] = 0\n\n def test_integer_positional_indexing(self):\n \"\"\" make sure that we are raising on positional indexing\n w.r.t. an integer index\n \"\"\"\n s = Series(range(2, 6), index=range(2, 6))\n\n result = s[2:4]\n expected = s.iloc[2:4]\n tm.assert_series_equal(result, expected)\n\n for idxr in [lambda x: x, lambda x: x.iloc]:\n\n for l in [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]:\n\n klass = RangeIndex\n msg = (\n \"cannot do (slice|positional) indexing \"\n r\"on {klass} with these indexers \\[(2|4)\\.0\\] of \"\n \"type float\".format(klass=klass.__name__)\n )\n with pytest.raises(TypeError, match=msg):\n idxr(s)[l]\n\n @pytest.mark.parametrize(\n \"index_func\", [tm.makeIntIndex, tm.makeRangeIndex],\n )\n def test_slice_integer_frame_getitem(self, index_func):\n\n # similar to above, but on the getitem dim (of a DataFrame)\n index = index_func(5)\n\n s = DataFrame(np.random.randn(5, 2), index=index)\n\n def f(idxr):\n\n # getitem\n for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]:\n\n result = idxr(s)[l]\n indexer = slice(0, 2)\n self.check(result, s, indexer, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(0|1)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # getitem out-of-bounds\n for l in [slice(-10, 10), slice(-10.0, 10.0)]:\n\n result = idxr(s)[l]\n self.check(result, s, slice(-10, 10), True)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[-10\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[slice(-10.0, 10.0)]\n\n # getitem odd floats\n for l, res in [\n (slice(0.5, 1), slice(1, 2)),\n (slice(0, 0.5), slice(0, 1)),\n (slice(0.5, 1.5), slice(1, 2)),\n ]:\n\n result = idxr(s)[l]\n self.check(result, s, res, False)\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[0\\.5\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l]\n\n # setitem\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n sc = s.copy()\n idxr(sc)[l] = 0\n result = idxr(sc)[l].values.ravel()\n assert (result == 0).all()\n\n # positional indexing\n msg = (\n \"cannot do slice indexing \"\n r\"on {klass} with these indexers \\[(3|4)\\.0\\] of \"\n \"type float\".format(klass=type(index).__name__)\n )\n with pytest.raises(TypeError, match=msg):\n s[l] = 0\n\n f(lambda x: x.loc)\n\n def test_slice_float(self):\n\n # same as above, but for floats\n index = Index(np.arange(5.0)) + 0.1\n for s in [\n Series(range(5), index=index),\n DataFrame(np.random.randn(5, 2), index=index),\n ]:\n\n for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:\n\n expected = s.iloc[3:4]\n for idxr in [lambda x: x.loc, lambda x: x]:\n\n # getitem\n result = idxr(s)[l]\n if isinstance(s, Series):\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_frame_equal(result, expected)\n # setitem\n s2 = s.copy()\n idxr(s2)[l] = 0\n result = idxr(s2)[l].values.ravel()\n assert (result == 0).all()\n\n def test_floating_index_doc_example(self):\n\n index = Index([1.5, 2, 3, 4.5, 5])\n s = Series(range(5), index=index)\n assert s[3] == 2\n assert s.loc[3] == 2\n assert s.loc[3] == 2\n assert s.iloc[3] == 3\n\n def test_floating_misc(self):\n\n # related 236\n # scalar/slicing of a float index\n s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)\n\n # label based slicing\n result1 = s[1.0:3.0]\n result2 = s.loc[1.0:3.0]\n result3 = s.loc[1.0:3.0]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n\n # exact indexing when found\n result1 = s[5.0]\n result2 = s.loc[5.0]\n result3 = s.loc[5.0]\n assert result1 == result2\n assert result1 == result3\n\n result1 = s[5]\n result2 = s.loc[5]\n result3 = s.loc[5]\n assert result1 == result2\n assert result1 == result3\n\n assert s[5.0] == s[5]\n\n # value not found (and no fallbacking at all)\n\n # scalar integers\n with pytest.raises(KeyError, match=r\"^4$\"):\n s.loc[4]\n with pytest.raises(KeyError, match=r\"^4$\"):\n s.loc[4]\n with pytest.raises(KeyError, match=r\"^4$\"):\n s[4]\n\n # fancy floats/integers create the correct entry (as nan)\n # fancy tests\n expected = Series([2, 0], index=Float64Index([5.0, 0.0]))\n for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float\n tm.assert_series_equal(s[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n\n expected = Series([2, 0], index=Index([5, 0], dtype=\"int64\"))\n for fancy_idx in [[5, 0], np.array([5, 0])]: # int\n tm.assert_series_equal(s[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n tm.assert_series_equal(s.loc[fancy_idx], expected)\n\n # all should return the same as we are slicing 'the same'\n result1 = s.loc[2:5]\n result2 = s.loc[2.0:5.0]\n result3 = s.loc[2.0:5]\n result4 = s.loc[2.1:5]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n # previously this did fallback indexing\n result1 = s[2:5]\n result2 = s[2.0:5.0]\n result3 = s[2.0:5]\n result4 = s[2.1:5]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n result1 = s.loc[2:5]\n result2 = s.loc[2.0:5.0]\n result3 = s.loc[2.0:5]\n result4 = s.loc[2.1:5]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n # combined test\n result1 = s.loc[2:5]\n result2 = s.loc[2:5]\n result3 = s[2:5]\n\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n\n # list selection\n result1 = s[[0.0, 5, 10]]\n result2 = s.loc[[0.0, 5, 10]]\n result3 = s.loc[[0.0, 5, 10]]\n result4 = s.iloc[[0, 2, 4]]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, result4)\n\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s[[1.6, 5, 10]]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[[1.6, 5, 10]]\n\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s[[0, 1, 2]]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[[0, 1, 2]]\n\n result1 = s.loc[[2.5, 5]]\n result2 = s.loc[[2.5, 5]]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))\n\n result1 = s[[2.5]]\n result2 = s.loc[[2.5]]\n result3 = s.loc[[2.5]]\n tm.assert_series_equal(result1, result2)\n tm.assert_series_equal(result1, result3)\n tm.assert_series_equal(result1, Series([1], index=[2.5]))\n\n def test_floating_tuples(self):\n # see gh-13509\n s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name=\"foo\")\n\n result = s[0.0]\n assert result == (1, 1)\n\n expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name=\"foo\")\n s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name=\"foo\")\n\n result = s[0.0]\n tm.assert_series_equal(result, expected)\n\n def test_float64index_slicing_bug(self):\n # GH 5557, related to slicing a float index\n ser = {\n 256: 2321.0,\n 1: 78.0,\n 2: 2716.0,\n 3: 0.0,\n 4: 369.0,\n 5: 0.0,\n 6: 269.0,\n 7: 0.0,\n 8: 0.0,\n 9: 0.0,\n 10: 3536.0,\n 11: 0.0,\n 12: 24.0,\n 13: 0.0,\n 14: 931.0,\n 15: 0.0,\n 16: 101.0,\n 17: 78.0,\n 18: 9643.0,\n 19: 0.0,\n 20: 0.0,\n 21: 0.0,\n 22: 63761.0,\n 23: 0.0,\n 24: 446.0,\n 25: 0.0,\n 26: 34773.0,\n 27: 0.0,\n 28: 729.0,\n 29: 78.0,\n 30: 0.0,\n 31: 0.0,\n 32: 3374.0,\n 33: 0.0,\n 34: 1391.0,\n 35: 0.0,\n 36: 361.0,\n 37: 0.0,\n 38: 61808.0,\n 39: 0.0,\n 40: 0.0,\n 41: 0.0,\n 42: 6677.0,\n 43: 0.0,\n 44: 802.0,\n 45: 0.0,\n 46: 2691.0,\n 47: 0.0,\n 48: 3582.0,\n 49: 0.0,\n 50: 734.0,\n 51: 0.0,\n 52: 627.0,\n 53: 70.0,\n 54: 2584.0,\n 55: 0.0,\n 56: 324.0,\n 57: 0.0,\n 58: 605.0,\n 59: 0.0,\n 60: 0.0,\n 61: 0.0,\n 62: 3989.0,\n 63: 10.0,\n 64: 42.0,\n 65: 0.0,\n 66: 904.0,\n 67: 0.0,\n 68: 88.0,\n 69: 70.0,\n 70: 8172.0,\n 71: 0.0,\n 72: 0.0,\n 73: 0.0,\n 74: 64902.0,\n 75: 0.0,\n 76: 347.0,\n 77: 0.0,\n 78: 36605.0,\n 79: 0.0,\n 80: 379.0,\n 81: 70.0,\n 82: 0.0,\n 83: 0.0,\n 84: 3001.0,\n 85: 0.0,\n 86: 1630.0,\n 87: 7.0,\n 88: 364.0,\n 89: 0.0,\n 90: 67404.0,\n 91: 9.0,\n 92: 0.0,\n 93: 0.0,\n 94: 7685.0,\n 95: 0.0,\n 96: 1017.0,\n 97: 0.0,\n 98: 2831.0,\n 99: 0.0,\n 100: 2963.0,\n 101: 0.0,\n 102: 854.0,\n 103: 0.0,\n 104: 0.0,\n 105: 0.0,\n 106: 0.0,\n 107: 0.0,\n 108: 0.0,\n 109: 0.0,\n 110: 0.0,\n 111: 0.0,\n 112: 0.0,\n 113: 0.0,\n 114: 0.0,\n 115: 0.0,\n 116: 0.0,\n 117: 0.0,\n 118: 0.0,\n 119: 0.0,\n 120: 0.0,\n 121: 0.0,\n 122: 0.0,\n 123: 0.0,\n 124: 0.0,\n 125: 0.0,\n 126: 67744.0,\n 127: 22.0,\n 128: 264.0,\n 129: 0.0,\n 260: 197.0,\n 268: 0.0,\n 265: 0.0,\n 269: 0.0,\n 261: 0.0,\n 266: 1198.0,\n 267: 0.0,\n 262: 2629.0,\n 258: 775.0,\n 257: 0.0,\n 263: 0.0,\n 259: 0.0,\n 264: 163.0,\n 250: 10326.0,\n 251: 0.0,\n 252: 1228.0,\n 253: 0.0,\n 254: 2769.0,\n 255: 0.0,\n }\n\n # smoke test for the repr\n s = Series(ser)\n result = s.value_counts()\n str(result)\n" ]
[ [ "pandas.timedelta_range", "pandas.tseries.offsets.MonthEnd", "pandas.Series", "pandas.core.sparse.api.SparseArray", "pandas.tseries.offsets.Hour", "pandas.bdate_range", "pandas.Period", "pandas.tseries.offsets.FY5253", "pandas.Categorical", "pandas.RangeIndex", "pandas.tseries.offsets.QuarterBegin", "pandas.tseries.offsets.DateOffset", "pandas.tseries.offsets.SemiMonthEnd", "pandas.period_range", "pandas.tseries.offsets.CustomBusinessDay", "pandas.tseries.offsets.YearBegin", "pandas.tseries.offsets.Minute", "pandas.tseries.offsets.WeekOfMonth", "pandas.tseries.offsets.SemiMonthBegin", "pandas.Timestamp", "pandas.tseries.offsets.Week", "pandas.tseries.offsets.LastWeekOfMonth", "pandas.date_range", "pandas.tseries.offsets.MonthBegin", "numpy.arange", "pandas.interval_range", "pandas.tseries.offsets.Day", "pandas.tseries.offsets.Easter", "pandas.tseries.offsets.QuarterEnd", "pandas.DataFrame", "pandas.tseries.offsets.BusinessHour", "pandas.tseries.offsets.YearEnd" ], [ "pandas.Series", "numpy.random.randn", "pandas.Float64Index", "pandas._testing.assert_frame_equal", "numpy.arange", "pandas._testing.assert_almost_equal", "pandas._testing.assert_series_equal", "pandas.RangeIndex", "numpy.array", "pandas.Index" ] ]
egryaznov/nlpmovies
[ "4f2c5ffbcff12f279dc2622471e1b19175607f67" ]
[ "Assignment 2/Second Part/russian_classifier.py" ]
[ "# IMDB Movie Review Sentiment Classification\n# Second Assignment Solution\n# NLP Course, Innopolis University, Spring 2017\n# Author: Evgeny Gryaznov\n\nimport numpy\nimport ru_otzyv as ru\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\n\n\ndef compile_model(topn=20000, max_review_length=300, embedding_vector_length=300, dropout_value=0.3):\n \"\"\" Builds, compiles and trains the LSTM model for russian moive review\n classification problem.\n Keyword arguments:\n params -- a dictionary of parameters for the model. Currently the\n following entries are suported:\n 'top_words' -- the maximal length of a vocabulary\n 'max_review_length' -- the maximal length of a review\n 'embedding_vector_length' -- the length of the input vectors after\n applying `embedding` techique.\n 'dropout_value' -- the percentage of units that will be dropped.\n Returns:\n A tuple: [model, history], where `model` is created model and `history`\n its history of epoches.\"\"\"\n# Fix random seed for reproducibility...\n numpy.random.seed(7)\n# Compiling the model...\n model = Sequential()\n model.add(Embedding(topn, embedding_vector_length, input_length=max_review_length))\n model.add(LSTM(100, dropout_W=dropout_value, dropout_U=dropout_value))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef train_model(model, x_train, y_train, nb_epochs=5):\n history = model.fit(x_train, y_train, validation_split=0.33, epochs=nb_epochs, batch_size=64)\n return history\n\n\n# Final evaluation of the model\ndef evaluate(model, x_test, y_test):\n \"\"\" Evaluates model on the given test data and returns specified metrics.\n Keyword arguments:\n model -- trained LSTM model.\n x_test -- padded and cooked test review data.\n y_test -- padded and cooked test rating data.\n Returns:\n A tuple of scores.\n \"\"\"\n scores = model.evaluate(x_test, y_test, verbose=0)\n return scores\n\n\ndef predict(model, review_filename, vocab):\n \"\"\" Predicts the rating of the given review.\n Keyword arguments:\n model -- trained LSTM model that will do the prediction.\n rivew_filename -- a name of the file where the text of the review\n is stored.\n vocab -- a compiled vocabulary of Russian tokens extracted from the\n dataset.\n Returns:\n The predicted rating of the review.\n \"\"\"\n review = ''\n with open('sample-reviews/' + review_filename, 'r') as f:\n review = f.read()\n x = sequence.pad_sequences([ru.digitize(review, vocab)], maxlen=300)\n predicted_rating = model.predict(x)\n return predicted_rating\n\n\ndef build_and_evaluate(topn=20000, max_review_length=300):\n \"\"\" Run this function to compile, train, evaluate and assess our LSTM\n model in one shot!\n Returns:\n Completed LSTM that you can play with.\n \"\"\"\n# Load the dataset but only keep the top n words, discarding others\n print('Preparing the dataset...')\n x_train, y_train, x_test, y_test = ru.cook_data(topn=topn)\n print(' Padding sequences...')\n# truncate and pad input sequences so they can fit into LSTM layer\n x_test = sequence.pad_sequences(x_test, maxlen=max_review_length)\n x_train = sequence.pad_sequences(x_train, maxlen=max_review_length)\n# Compile and train our LSTM\n print('Dataset preparation complete!\\nCompiling the model...')\n my_lstm = compile_model(topn=topn, max_review_length=max_review_length)\n print('Mode compilation complete!\\nTraining the model...')\n history = train_model(my_lstm, x_train, y_train, nb_epochs=4)\n# Plot the history of training\n print('Model training complete!\\nEvaluating performance...')\n plot_loss(history)\n plot_accuracy(history)\n# Evaluate the accuracy of our model\n scores = evaluate(my_lstm, x_test, y_test)\n print(\"Final Test Data Accuracy: %.2f%%\" % (scores[1] * 100))\n return my_lstm\n\n\ndef plot_loss(history):\n \"\"\" Plots the values of a loss function through training time (epoches). \"\"\"\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Loss of the Model')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n\ndef plot_accuracy(history):\n \"\"\" Plots the accuracy of a model through training time (epoches). \"\"\"\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Accuracy of the Model')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n\nmy_lstm = build_and_evaluate(topn=20000)\nprint('-' * 30)\nprint('Loading vocabulary...')\nvocab = ru.load('ru-vocab.json')\n# Play with the model a little...\nreview_filename = 'positive_review0.txt'\nprint('Starting prediction...')\npredicted_rating = predict(my_lstm, review_filename, vocab)\nprint('Predicted rating for this review is: ' + str(predicted_rating))\n\n\n# batch normalization -- ??\n# проверить распределение данных -- DONE\n# балансировка данных: дублирование сэмплов -- DONE, Acc + 2%\n# validation set -- ??\n# голосование алгоритмов -- не буду делать\n# TODO -- поменьше тренировку, побольше test: 70 на 30\n# seaborn -- OK\n# return subsequences true -- ??\n# TODO -- softmax -- categorical crossentropy\n# TODO -- RMSprop\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.random.seed", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
remtav/SpaceNet7_Multi-Temporal_Solutions
[ "ee535c61fc22bffa45331519239c6d1b044b1514" ]
[ "1-lxastro0/code_local/utils/create_dataset.py" ]
[ "import collections\nimport logging\nimport os\nimport warnings\nfrom pathlib import Path\nfrom typing import List, Union\n\nimport h5py\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nimport models.coordconv\nfrom utils.utils import get_key_def, ordereddict_eval, compare_config_yamls\nfrom utils.geoutils import get_key_recursive\n\n# These two import statements prevent exception when using eval(metadata) in SegmentationDataset()'s __init__()\nfrom rasterio.crs import CRS\nfrom affine import Affine\n\nlogging.getLogger(__name__)\n\n\ndef append_to_dataset(dataset, sample):\n \"\"\"\n Append a new sample to a provided dataset. The dataset has to be expanded before we can add value to it.\n :param dataset:\n :param sample: data to append\n :return: Index of the newly added sample.\n \"\"\"\n old_size = dataset.shape[0] # this function always appends samples on the first axis\n dataset.resize(old_size + 1, axis=0)\n dataset[old_size, ...] = sample\n return old_size\n\n\ndef create_files_and_datasets(samples_size: int, number_of_bands: int, meta_map, samples_folder: Union[str, Path], params):\n \"\"\"\n Function to create the hdfs files (trn, val and tst).\n :param samples_size: size of individual hdf5 samples to be created\n :param number_of_bands: number of bands in imagery\n :param meta_map:\n :param samples_folder: (str) Path to the output folder.\n :param params: (dict) Parameters found in the yaml config file.\n :return: (hdf5 datasets) trn, val ant tst datasets.\n \"\"\"\n real_num_bands = number_of_bands - MetaSegmentationDataset.get_meta_layer_count(meta_map)\n assert real_num_bands > 0, \"invalid number of bands when accounting for meta layers\"\n hdf5_files = []\n for subset in [\"trn\", \"val\", \"tst\"]:\n hdf5_file = h5py.File(os.path.join(samples_folder, f\"{subset}_samples.hdf5\"), \"w\")\n hdf5_file.create_dataset(\"sat_img\", (0, samples_size, samples_size, real_num_bands), np.uint16,\n maxshape=(None, samples_size, samples_size, real_num_bands))\n hdf5_file.create_dataset(\"map_img\", (0, samples_size, samples_size), np.int16,\n maxshape=(None, samples_size, samples_size))\n hdf5_file.create_dataset(\"meta_idx\", (0, 1), dtype=np.int16, maxshape=(None, 1))\n try:\n hdf5_file.create_dataset(\"metadata\", (0, 1), dtype=h5py.string_dtype(), maxshape=(None, 1))\n hdf5_file.create_dataset(\"sample_metadata\", (0, 1), dtype=h5py.string_dtype(), maxshape=(None, 1))\n hdf5_file.create_dataset(\"params\", (0, 1), dtype=h5py.string_dtype(), maxshape=(None, 1))\n append_to_dataset(hdf5_file[\"params\"], repr(params))\n except AttributeError:\n logging.exception(f'Update h5py to version 2.10 or higher')\n raise\n hdf5_files.append(hdf5_file)\n return hdf5_files\n\n\nclass SegmentationDataset(Dataset):\n \"\"\"Semantic segmentation dataset based on HDF5 parsing.\"\"\"\n\n def __init__(self, work_folder,\n dataset_type,\n num_bands,\n max_sample_count=None,\n dontcare=None,\n radiom_transform=None,\n geom_transform=None,\n totensor_transform=None,\n params=None,\n debug=False):\n # note: if 'max_sample_count' is None, then it will be read from the dataset at runtime\n self.work_folder = work_folder\n self.max_sample_count = max_sample_count\n self.dataset_type = dataset_type\n self.num_bands = num_bands\n self.metadata = []\n self.radiom_transform = radiom_transform\n self.geom_transform = geom_transform\n self.totensor_transform = totensor_transform\n self.debug = debug\n self.dontcare = dontcare\n self.hdf5_path = os.path.join(self.work_folder, self.dataset_type + \"_samples.hdf5\")\n with h5py.File(self.hdf5_path, \"r\") as hdf5_file:\n for i in range(hdf5_file[\"metadata\"].shape[0]):\n metadata = hdf5_file[\"metadata\"][i, ...]\n if isinstance(metadata, np.ndarray) and len(metadata) == 1:\n metadata = metadata[0]\n metadata = ordereddict_eval(metadata)\n self.metadata.append(metadata)\n if self.max_sample_count is None:\n self.max_sample_count = hdf5_file[\"sat_img\"].shape[0]\n\n # load yaml used to generate samples\n hdf5_params = hdf5_file['params'][0, 0]\n hdf5_params = ordereddict_eval(hdf5_params)\n\n if dataset_type == 'trn' and isinstance(hdf5_params, dict) and isinstance(metadata, dict):\n # check match between current yaml and sample yaml for crucial parameters\n try:\n compare_config_yamls(hdf5_params, params)\n except TypeError:\n logging.exception(\"Couldn't compare current yaml with hdf5 yaml\")\n\n def __len__(self):\n return self.max_sample_count\n\n def _remap_labels(self, map_img):\n # note: will do nothing if 'dontcare' is not set in constructor, or set to non-zero value # TODO: seems like a temporary patch... dontcare should never be == 0, right ?\n if self.dontcare is None or self.dontcare != 0:\n return map_img\n # for now, the current implementation only handles the original 'dontcare' value as zero\n # to keep the impl simple, we just reduce all indices by one so that 'dontcare' becomes -1\n assert map_img.dtype == np.int8 or map_img.dtype == np.int16 or map_img.dtype == np.int32\n map_img -= 1\n return map_img\n\n def __getitem__(self, index):\n with h5py.File(self.hdf5_path, \"r\") as hdf5_file:\n sat_img = np.float32(hdf5_file[\"sat_img\"][index, ...])\n assert self.num_bands <= sat_img.shape[-1]\n map_img = self._remap_labels(hdf5_file[\"map_img\"][index, ...])\n meta_idx = int(hdf5_file[\"meta_idx\"][index])\n metadata = self.metadata[meta_idx]\n sample_metadata = hdf5_file[\"sample_metadata\"][index, ...][0]\n sample_metadata = eval(sample_metadata.decode('UTF-8'))\n if isinstance(metadata, np.ndarray) and len(metadata) == 1:\n metadata = metadata[0]\n elif isinstance(metadata, bytes):\n metadata = metadata.decode('UTF-8')\n try:\n metadata = eval(metadata)\n metadata.update(sample_metadata)\n except TypeError:\n pass # FI\n # where bandwise array has no data values, set as np.nan\n # sat_img[sat_img == metadata['nodata']] = np.nan # TODO: problem with lack of dynamic range. See: https://rasterio.readthedocs.io/en/latest/topics/masks.html\n\n sample = {\"sat_img\": sat_img, \"map_img\": map_img, \"metadata\": metadata,\n \"hdf5_path\": self.hdf5_path}\n\n if self.radiom_transform: # radiometric transforms should always precede geometric ones\n sample = self.radiom_transform(sample)\n if self.geom_transform: # rotation, geometric scaling, flip and crop. Will also put channels first and convert to torch tensor from numpy.\n sample = self.geom_transform(sample)\n\n sample = self.totensor_transform(sample)\n\n if self.debug:\n # assert no new class values in map_img\n initial_class_ids = set(np.unique(map_img))\n if self.dontcare is not None:\n initial_class_ids.add(self.dontcare)\n final_class_ids = set(np.unique(sample['map_img'].numpy()))\n if not final_class_ids.issubset(initial_class_ids):\n logging.debug(f\"WARNING: Class ids for label before and after augmentations don't match. \"\n f\"Ignore if overwritting ignore_index in ToTensorTarget\")\n sample['index'] = index\n return sample\n\n\nclass MetaSegmentationDataset(SegmentationDataset):\n \"\"\"Semantic segmentation dataset interface that appends metadata under new tensor layers.\"\"\"\n\n metadata_handling_modes = [\"const_channel\", \"scaled_channel\"]\n\n def __init__(self, work_folder,\n dataset_type,\n num_bands,\n meta_map,\n max_sample_count=None,\n dontcare=None,\n radiom_transform=None,\n geom_transform=True,\n totensor_transform=True,\n debug=False):\n assert meta_map is None or isinstance(meta_map, dict), \"unexpected metadata mapping object type\"\n assert meta_map is None or all([isinstance(k, str) and v in self.metadata_handling_modes for k, v in meta_map.items()]), \\\n \"unexpected metadata key type or value handling mode\"\n super().__init__(work_folder=work_folder, dataset_type=dataset_type, num_bands=num_bands,\n max_sample_count=max_sample_count,\n dontcare=dontcare,\n radiom_transform=radiom_transform,\n geom_transform=geom_transform,\n totensor_transform=totensor_transform,\n debug=debug)\n assert all([isinstance(m, (dict, collections.OrderedDict)) for m in self.metadata]), \\\n \"cannot use provided metadata object type with meta-mapping dataset interface\"\n self.meta_map = meta_map\n\n @staticmethod\n def append_meta_layers(tensor, meta_map, metadata):\n if meta_map:\n assert isinstance(metadata, (dict, collections.OrderedDict)), \"unexpected metadata type\"\n for meta_key, mode in meta_map.items():\n meta_val = get_key_recursive(meta_key, metadata)\n if mode == \"const_channel\":\n assert np.isscalar(meta_val), \"constant channel-wise assignment requires scalar value\"\n layer = np.full(tensor.shape[0:2], meta_val, dtype=np.float32)\n tensor = np.insert(tensor, tensor.shape[2], layer, axis=2)\n elif mode == \"scaled_channel\":\n assert np.isscalar(meta_val), \"scaled channel-wise coords assignment requires scalar value\"\n layers = models.coordconv.get_coords_map(tensor.shape[0], tensor.shape[1]) * meta_val\n tensor = np.insert(tensor, tensor.shape[2], layers, axis=2)\n # else...\n return tensor\n\n @staticmethod\n def get_meta_layer_count(meta_map):\n meta_layers = 0\n if meta_map:\n for meta_key, mode in meta_map.items():\n if mode == \"const_channel\":\n meta_layers += 1\n elif mode == \"scaled_channel\":\n meta_layers += 2\n return meta_layers\n\n def __getitem__(self, index):\n # put metadata layer in util func for inf script?\n with h5py.File(self.hdf5_path, \"r\") as hdf5_file:\n sat_img = hdf5_file[\"sat_img\"][index, ...]\n assert self.num_bands <= sat_img.shape[-1]\n map_img = self._remap_labels(hdf5_file[\"map_img\"][index, ...])\n meta_idx = int(hdf5_file[\"meta_idx\"][index])\n metadata = self.metadata[meta_idx]\n sample_metadata = hdf5_file[\"sample_metadata\"][index, ...]\n if isinstance(metadata, np.ndarray) and len(metadata) == 1:\n metadata = metadata[0]\n sample_metadata = sample_metadata[0]\n if isinstance(metadata, str):\n metadata = eval(metadata)\n sample_metadata = eval(sample_metadata)\n metadata.update(sample_metadata)\n assert meta_idx != -1, f\"metadata unvailable in sample #{index}\"\n sat_img = self.append_meta_layers(sat_img, self.meta_map, self.metadata[meta_idx])\n sample = {\"sat_img\": sat_img, \"map_img\": map_img, \"metadata\": metadata}\n if self.radiom_transform: # radiometric transforms should always precede geometric ones\n sample = self.radiom_transform(sample) # TODO: test this for MetaSegmentationDataset\n sample[\"sat_img\"] = self.append_meta_layers(sat_img, self.meta_map, metadata) # Overwrite sat_img with sat_img with metalayers\n if self.geom_transform:\n sample = self.geom_transform(sample) # rotation, geometric scaling, flip and crop. Will also put channels first and convert to torch tensor from numpy.\n sample = self.totensor_transform(sample) # TODO: test this for MetaSegmentationDataset\n return sample" ]
[ [ "numpy.float32", "numpy.insert", "numpy.full", "numpy.isscalar", "numpy.unique" ] ]
artek0chumak/hivemind
[ "c6b2b2d84ccfc890314a2bfece8eef238372d410" ]
[ "hivemind/compression/quantization.py" ]
[ "import math\nimport os\nfrom abc import ABC, abstractmethod\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\n\nfrom hivemind.compression.base import CompressionBase, CompressionInfo\nfrom hivemind.proto import runtime_pb2\n\nEXECUTOR = ThreadPoolExecutor(max_workers=int(os.environ.get(\"QUANTIZATION_THREADS\", 128)))\n\n\nclass Quantization(CompressionBase, ABC):\n codebook_dtype, indices_dtype = np.float32, np.uint8\n\n @abstractmethod\n def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert tensor into a pair of (indices, codebook)\"\"\"\n ...\n\n def compress(self, tensor: torch.Tensor, info: CompressionInfo, allow_inplace: bool = False) -> runtime_pb2.Tensor:\n quantized, codebook = self.quantize(tensor.detach(), allow_inplace=allow_inplace)\n return runtime_pb2.Tensor(\n compression=self.compression_type,\n buffer=b\"\".join((np.int64(len(codebook)).tobytes(), codebook.tobytes(), quantized.tobytes())),\n size=tensor.shape,\n dtype=tensor.numpy().dtype.name,\n requires_grad=tensor.requires_grad,\n )\n\n def extract(self, serialized_tensor: runtime_pb2.Tensor) -> torch.Tensor:\n codebook_size = int(np.frombuffer(serialized_tensor.buffer, count=1, dtype=np.int64))\n codebook = np.frombuffer(serialized_tensor.buffer, offset=8, count=codebook_size, dtype=self.codebook_dtype)\n quantized = np.frombuffer(serialized_tensor.buffer, offset=8 + codebook.nbytes, dtype=self.indices_dtype)\n quantized = torch.as_tensor(quantized, dtype=torch.int64).reshape(tuple(serialized_tensor.size))\n codebook = torch.as_tensor(np.asarray(codebook, dtype=serialized_tensor.dtype))\n return codebook[quantized]\n\n def estimate_compression_ratio(self, info: CompressionInfo) -> float:\n return self.n_bits / torch.finfo(info.descriptor.dtype).bits\n\n @property\n def n_bits(self):\n return self.indices_dtype(1).itemsize * 8\n\n @property\n def n_bins(self):\n return 2**self.n_bits\n\n\nclass Uniform8BitQuantization(Quantization):\n RANGE_IN_SIGMAS: int = 6\n compression_type = runtime_pb2.UNIFORM_8BIT\n\n def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:\n offset = self.n_bins // 2\n shift = tensor.mean()\n centered_tensor = tensor.sub_(shift) if allow_inplace else tensor - shift\n std_unbiased = centered_tensor.norm() / math.sqrt(centered_tensor.numel() - 1)\n scale = self.RANGE_IN_SIGMAS * std_unbiased / self.n_bins\n quantized = torch.quantize_per_tensor(centered_tensor, scale, offset, torch.quint8).int_repr()\n lookup = average_buckets(tensor, quantized, self.n_bins)\n return np.asarray(quantized, dtype=self.indices_dtype), np.asarray(lookup, dtype=self.codebook_dtype)\n\n\nclass Quantile8BitQuantization(Quantization):\n compression_type = runtime_pb2.QUANTILE_8BIT\n\n def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:\n tensor = tensor.detach().float()\n borders = torch.as_tensor(quantile_qq_approximation(tensor.numpy(), self.n_bins + 1)[1:-1])\n quantized = torch.clamp_(torch.bucketize(tensor, borders), 0, self.n_bins - 1)\n codebook = average_buckets(tensor, quantized, self.n_bins)\n return quantized.numpy().astype(np.uint8), codebook.numpy()\n\n\ndef average_buckets(tensor: torch.Tensor, quant_weight: torch.Tensor, n_bins: int):\n \"\"\"Return the average value in each bucket\"\"\"\n bin_sums = torch.zeros(n_bins).scatter_add_(0, quant_weight.flatten().long(), tensor.flatten())\n bin_counts = torch.clamp_min_(torch.bincount(quant_weight.flatten(), minlength=n_bins), 1)\n lookup = bin_sums / bin_counts\n return lookup\n\n\ndef get_chunk_size(num_elements: int, min_chunk_size: int) -> int:\n \"\"\"Adjust chunk_size to minimize imbalance between chunk sizes\"\"\"\n if min_chunk_size >= num_elements:\n return min_chunk_size\n leftover_elements = num_elements % min_chunk_size\n num_chunks = num_elements // min_chunk_size\n return min_chunk_size + (leftover_elements - 1) // num_chunks + 1\n\n\ndef quantile_qq_approximation(array: np.ndarray, n_quantiles: int, min_chunk_size: int = 10**5) -> np.ndarray:\n \"\"\"Estimate uniform quantiles of data using quantile-of-quantiles. Runs in parallel.\"\"\"\n if not array.data.c_contiguous and array.data.f_contiguous:\n array = array.T\n array = np.ascontiguousarray(array.reshape(-1))\n quantiles = np.linspace(0.0, 1.0, num=n_quantiles, dtype=array.dtype)\n chunk_size = get_chunk_size(len(array), min_chunk_size)\n num_chunks = (len(array) - 1) // chunk_size + 1\n partition_quantiles = np.empty((num_chunks, len(quantiles)), dtype=array.dtype)\n\n jobs = []\n for i in range(num_chunks):\n chunk = slice(chunk_size * i, chunk_size * (i + 1))\n jobs.append(EXECUTOR.submit(np.quantile, array[chunk], quantiles, out=partition_quantiles[i]))\n\n for job in jobs:\n job.result()\n return np.quantile(partition_quantiles, quantiles)\n" ]
[ [ "torch.bucketize", "numpy.quantile", "torch.as_tensor", "numpy.asarray", "torch.finfo", "torch.quantize_per_tensor", "torch.zeros", "numpy.linspace", "numpy.frombuffer" ] ]
PangYunsheng8/CGIPool
[ "2cf22019bad510804021f768c6a0d76bf79b62f6" ]
[ "train.py" ]
[ "import os\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport csv\r\nimport glob\r\nimport argparse\r\n\r\nfrom datasets.dataloader import build_loader\r\n\r\n\r\nparser = argparse.ArgumentParser(description=\"Graph Pooling\")\r\nparser.add_argument('--model', type=str, default=\"SAGNet\", help='model name')\r\nparser.add_argument('--seed', type=int, default=777, help='seed')\r\nparser.add_argument('--batch_size', type=int, default=128, help='batch size')\r\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate')\r\nparser.add_argument('--weight_decay', type=float, default=0.001, help='weight decay')\r\nparser.add_argument('--dataset', type=str, default='IMDB-MULTI', help='DD/NCI1/NCI109/Mutagenicity/PROTEINS')\r\nparser.add_argument('--epochs', type=int, default=1000, help='maximum number of epochs')\r\nparser.add_argument('--patience', type=int, default=100, help='path to save result')\r\n\r\nargs = parser.parse_args()\r\n\r\n\r\ndef train(args, model, train_loader, val_loader):\r\n model.train()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\r\n\r\n min_loss = 1e10\r\n max_acc = 0\r\n patience_cnt = 0\r\n best_epoch = 0\r\n val_loss_values = []\r\n for epoch in range(args.epochs):\r\n loss_train = 0.0\r\n loss_dis = 0.0\r\n correct = 0\r\n for i, data in enumerate(train_loader):\r\n optimizer.zero_grad()\r\n data = data.to(args.device)\r\n output = model(data)\r\n cls_loss = F.nll_loss(output, data.y)\r\n dis_loss = model.compute_disentangle_loss()\r\n loss = cls_loss + 0.001 * dis_loss \r\n loss.backward()\r\n optimizer.step()\r\n loss_train += cls_loss.item()\r\n loss_dis += dis_loss.item() \r\n pred = output.max(dim=1)[1]\r\n correct += pred.eq(data.y).sum().item()\r\n acc_train = correct / len(train_loader.dataset)\r\n val_acc, val_loss = test(args, model, val_loader)\r\n if val_acc > max_acc:\r\n max_acc = val_acc\r\n print('Epoch: {:04d}'.format(epoch + 1), 'loss_train: {:.6f}'.format(loss_train), 'loss_dis: {:.6f}'.format(loss_dis),\r\n 'acc_train: {:.6f}'.format(acc_train), 'loss_val: {:.6f}'.format(val_loss),\r\n 'acc_val: {:.6f}'.format(val_acc), 'max_acc: {:.6f}'.format(max_acc))\r\n\r\n val_loss_values.append(val_loss)\r\n if val_loss_values[-1] < min_loss:\r\n min_loss = val_loss_values[-1]\r\n best_epoch = epoch\r\n patience_cnt = 0\r\n torch.save(model.state_dict(), 'save/' + args.dataset + '/' + str(args.seed) + '.pth')\r\n else:\r\n patience_cnt += 1\r\n\r\n if patience_cnt == args.patience:\r\n break\r\n\r\n print('Optimization Finished!')\r\n\r\n\r\ndef test(args, model, loader):\r\n model.eval()\r\n correct = 0.\r\n loss = 0.\r\n for data in loader:\r\n data = data.to(args.device)\r\n output = model(data)\r\n pred = output.max(dim=1)[1]\r\n correct += pred.eq(data.y).sum().item()\r\n loss += F.nll_loss(output, data.y, reduction='sum').item()\r\n return correct / len(loader.dataset),loss / len(loader.dataset)\r\n\r\n\r\ndef main():\r\n torch.manual_seed(args.seed)\r\n\r\n if torch.cuda.is_available():\r\n torch.cuda.manual_seed(args.seed)\r\n args.device = \"cuda\"\r\n print('cuda')\r\n else:\r\n args.device = \"cpu\"\r\n\r\n train_loader, val_loader, test_loader = build_loader(args)\r\n \r\n if args.model == \"SAGNet\":\r\n from models.SAGNet import SAGNet, Config\r\n config = Config()\r\n model = SAGNet(config, args)\r\n elif args.model == \"GSANet\":\r\n from models.GSANet import GSANet, Config\r\n config = Config()\r\n model = GSANet(config, args)\r\n elif args.model == \"HGPSLNet\":\r\n from models.HGPSLNet import HGPSLNet, Config\r\n config = Config()\r\n model = HGPSLNet(config, args)\r\n elif args.model == \"ASAPNet\":\r\n from models.ASAPNet import ASAPNet, Config\r\n config = Config()\r\n model = ASAPNet(config, args)\r\n model.to(args.device)\r\n\r\n train(args, model, train_loader, val_loader)\r\n\r\n model.load_state_dict(torch.load('save/' + args.dataset + '/' + str(args.seed) + '.pth'))\r\n test_acc, test_loss = test(args, model, test_loader)\r\n print('Test set results, loss = {:.6f}, accuracy = {:.6f}'.format(test_loss, test_acc))\r\n \r\n with open('result.txt', 'a') as f:\r\n f.write(str(test_acc) + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "torch.cuda.manual_seed", "torch.nn.functional.nll_loss", "torch.manual_seed", "torch.cuda.is_available" ] ]
rwilliams251/taichi
[ "442710331be55baf5af17f9667db650c19cbb0b2" ]
[ "python/taichi/examples/simulation/mpm3d.py" ]
[ "export_file = '' # use '/tmp/mpm3d.ply' for exporting result to disk\n\nimport numpy as np\n\nimport taichi as ti\n\nti.init(arch=ti.gpu)\n\n#dim, n_grid, steps, dt = 2, 128, 20, 2e-4\n#dim, n_grid, steps, dt = 2, 256, 32, 1e-4\ndim, n_grid, steps, dt = 3, 32, 25, 4e-4\n#dim, n_grid, steps, dt = 3, 64, 25, 2e-4\n#dim, n_grid, steps, dt = 3, 128, 25, 8e-5\n\nn_particles = n_grid**dim // 2**(dim - 1)\ndx = 1 / n_grid\n\np_rho = 1\np_vol = (dx * 0.5)**2\np_mass = p_vol * p_rho\ngravity = 9.8\nbound = 3\nE = 400\n\nx = ti.Vector.field(dim, float, n_particles)\nv = ti.Vector.field(dim, float, n_particles)\nC = ti.Matrix.field(dim, dim, float, n_particles)\nJ = ti.field(float, n_particles)\n\ngrid_v = ti.Vector.field(dim, float, (n_grid, ) * dim)\ngrid_m = ti.field(float, (n_grid, ) * dim)\n\nneighbour = (3, ) * dim\n\n\[email protected]\ndef substep():\n for I in ti.grouped(grid_m):\n grid_v[I] = ti.zero(grid_v[I])\n grid_m[I] = 0\n ti.block_dim(n_grid)\n for p in x:\n Xp = x[p] / dx\n base = int(Xp - 0.5)\n fx = Xp - base\n w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]\n stress = -dt * 4 * E * p_vol * (J[p] - 1) / dx**2\n affine = ti.Matrix.identity(float, dim) * stress + p_mass * C[p]\n for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))):\n dpos = (offset - fx) * dx\n weight = 1.0\n for i in ti.static(range(dim)):\n weight *= w[offset[i]][i]\n grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos)\n grid_m[base + offset] += weight * p_mass\n for I in ti.grouped(grid_m):\n if grid_m[I] > 0:\n grid_v[I] /= grid_m[I]\n grid_v[I][1] -= dt * gravity\n cond = (I < bound) & (grid_v[I] < 0) | \\\n (I > n_grid - bound) & (grid_v[I] > 0)\n grid_v[I] = 0 if cond else grid_v[I]\n ti.block_dim(n_grid)\n for p in x:\n Xp = x[p] / dx\n base = int(Xp - 0.5)\n fx = Xp - base\n w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]\n new_v = ti.zero(v[p])\n new_C = ti.zero(C[p])\n for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))):\n dpos = (offset - fx) * dx\n weight = 1.0\n for i in ti.static(range(dim)):\n weight *= w[offset[i]][i]\n g_v = grid_v[base + offset]\n new_v += weight * g_v\n new_C += 4 * weight * g_v.outer_product(dpos) / dx**2\n v[p] = new_v\n x[p] += dt * v[p]\n J[p] *= 1 + dt * new_C.trace()\n C[p] = new_C\n\n\[email protected]\ndef init():\n for i in range(n_particles):\n x[i] = ti.Vector([ti.random() for i in range(dim)]) * 0.4 + 0.15\n J[i] = 1\n\n\ndef T(a):\n if dim == 2:\n return a\n\n phi, theta = np.radians(28), np.radians(32)\n\n a = a - 0.5\n x, y, z = a[:, 0], a[:, 1], a[:, 2]\n c, s = np.cos(phi), np.sin(phi)\n C, S = np.cos(theta), np.sin(theta)\n x, z = x * c + z * s, z * c - x * s\n u, v = x, y * C + z * S\n return np.array([u, v]).swapaxes(0, 1) + 0.5\n\n\ninit()\ngui = ti.GUI('MPM3D', background_color=0x112F41)\nwhile gui.running and not gui.get_event(gui.ESCAPE):\n for s in range(steps):\n substep()\n pos = x.to_numpy()\n if export_file:\n writer = ti.PLYWriter(num_vertices=n_particles)\n writer.add_vertex_pos(pos[:, 0], pos[:, 1], pos[:, 2])\n writer.export_frame(gui.frame, export_file)\n gui.circles(T(pos), radius=1.5, color=0x66ccff)\n gui.show()\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.radians", "numpy.cos" ] ]
deno750/VRP_Optimization
[ "653c950b59acb3a1cd96d1e96bb334c90655eaa2" ]
[ "other_codes/perfProf.py" ]
[ "#!/usr/bin/env python2\n\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib\n#matplotlib.use('PDF')\nimport matplotlib.pyplot as plt\nimport sys\n\nfrom optparse import OptionParser\n\n# parameters\ndefLW = 1.2 # default line width\ndefMS = 7 # default marker size\ndashes = ['-', # solid line\n\t'--', # dashed line\n\t'-.', # dash-dot line\n\t':', # dotted line\n\t'-',\n\t'--']\n\nmarkers = ['+', 'x', 's', '^', 'o', 'd']\ncolors = ['r', 'b', 'y', 'g', 'm', 'c']\n\n\nclass CmdLineParser(object):\n\tdef __init__(self):\n\t\tself.parser = OptionParser(usage='usage: python2 perfprof.py [options] cvsfile.csv outputfile.pdf')\n\t\t# default options\n\t\tself.parser.add_option(\"-D\", \"--delimiter\", dest=\"delimiter\", default=None, help=\"delimiter for input files\")\n\t\tself.parser.add_option(\"-M\", \"--maxratio\", dest=\"maxratio\", default=4, type=float, help=\"maxratio for perf. profile\")\n\t\tself.parser.add_option(\"-S\", \"--shift\", dest=\"shift\", default=0, type=float, help=\"shift for data\")\n\t\tself.parser.add_option(\"-L\", \"--logplot\", dest=\"logplot\", action=\"store_true\", default=False, help=\"log scale for x\")\n\t\tself.parser.add_option(\"-T\", \"--timelimit\", dest=\"timelimit\", default=1e99, type=float, help=\"time limit for runs\")\n\t\tself.parser.add_option(\"-P\", \"--plot-title\", dest=\"plottitle\", default=None, help=\"plot title\")\n\t\tself.parser.add_option(\"-X\", \"--x-label\", dest=\"xlabel\", default='Time Ratio', help=\"x axis label\")\n\t\tself.parser.add_option(\"-B\", \"--bw\", dest=\"bw\", action=\"store_true\", default=False, help=\"plot B/W\")\n\n\tdef addOption(self, *args, **kwargs):\n\t\tself.parser.add_option(*args, **kwargs)\n\n\tdef parseArgs(self):\n\t\t(options, args) = self.parser.parse_args()\n\t\toptions.input = args[0]\n\t\toptions.output = args[1]\n\t\treturn options\n\n\ndef readTable(fp, delimiter):\n\t\"\"\"\n\tread a CSV file with performance profile specification\n\tthe format is as follows:\n\tncols algo1 algo2 ...\n\tnome_istanza tempo(algo1) tempo(algo2) ...\n\t...\n\t\"\"\"\n\tfirstline = fp.readline().strip().split(delimiter)\n\tncols = int(firstline[0])\n\tassert(ncols <= len(markers))\n\tcnames = firstline[1:]\n\trnames = []\n\trows = []\n\tfor row in fp:\n\t\trow = row.strip().split(delimiter)\n\t\trnames.append(row[0])\n\t\trdata = np.empty(ncols)\n\t\tfor j in range(ncols):\n\t\t\trdata[j] = float(row[j + 1])\n\t\trows.append(rdata)\n\tdata = np.array(rows)\n\treturn (rnames, cnames, data)\n\n\ndef main():\n\tparser = CmdLineParser()\n\topt = parser.parseArgs()\n\tprint(opt)\n\t# read data\n\trnames, cnames, data = readTable(open(opt.input, 'r'), opt.delimiter)\n\tnrows, ncols = data.shape\n\t# add shift\n\tdata = data + opt.shift\n\t# compute ratios\n\tminima = data.min(axis=1)\n\tratio = data\n\tfor j in range(ncols):\n\t\tratio[:, j] = data[:, j] / minima\n\t# compute maxratio\n\tif opt.maxratio == -1:\n\t\topt.maxratio = ratio.max()\n\t# any time >= timelimit will count as maxratio + bigM (so that it does not show up in plots)\n\tfor i in range(nrows):\n\t\tfor j in range(ncols):\n\t\t\tif data[i,j] >= opt.timelimit:\n\t\t\t\tratio[i,j] = opt.maxratio + 1e6\n\t# sort ratios\n\tratio.sort(axis=0)\n\t# plot first\n\ty = np.arange(nrows, dtype=np.float64) / nrows\n\tfor j in range(ncols):\n\t\toptions = dict(label=cnames[j],\n\t\t\t\tlinewidth=defLW, linestyle='steps-post' + dashes[j],\n\t\t\t\tmarker=markers[j], markeredgewidth=defLW, markersize=defMS)\n\t\t# plt.step(ratio[:,j], y, label=cnames[j], linewidth=defLW, marker=markers[j], markersize=defMS)\n\t\tif opt.bw:\n\t\t\toptions['markerfacecolor'] = 'w'\n\t\t\toptions['markeredgecolor'] = 'k'\n\t\t\toptions['color'] = 'k'\n\t\telse:\n\t\t\toptions['color'] = colors[j]\n\t\tif opt.logplot:\n\t\t\tplt.semilogx(ratio[:, j], y, **options)\n\t\telse:\n\t\t\tplt.plot(ratio[:, j], y, **options)\n\tplt.axis([1, opt.maxratio, 0, 1])\n\t#plt.xlim([1,1.4])\t#Comment when not using Cost\n\tplt.legend(loc='lower right')\n\tif opt.plottitle is not None:\n\t\tplt.title(opt.plottitle)\n\tplt.xlabel(opt.xlabel)\n\tplt.savefig(opt.output,dpi=600)\n\nif __name__ == '__main__':\n\tmain()" ]
[ [ "matplotlib.pyplot.legend", "numpy.empty", "matplotlib.pyplot.semilogx", "matplotlib.pyplot.savefig", "matplotlib.pyplot.axis", "numpy.arange", "matplotlib.pyplot.title", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
martonlanga/deepchem
[ "1c007fbae899a49fe3c40f41e7273ed21867fef9" ]
[ "deepchem/feat/base_classes.py" ]
[ "\"\"\"\nFeature calculations.\n\"\"\"\nimport logging\nimport types\nimport numpy as np\nimport multiprocessing\n\n__author__ = \"Steven Kearnes\"\n__copyright__ = \"Copyright 2014, Stanford University\"\n__license__ = \"BSD 3-clause\"\n\n\ndef _featurize_complex(featurizer, mol_pdb_file, protein_pdb_file, log_message):\n logging.info(log_message)\n return featurizer._featurize_complex(mol_pdb_file, protein_pdb_file)\n\n\nclass ComplexFeaturizer(object):\n \"\"\"\"\n Abstract class for calculating features for mol/protein complexes.\n \"\"\"\n\n def featurize_complexes(self, mol_files, protein_pdbs):\n \"\"\"\n Calculate features for mol/protein complexes.\n\n Parameters\n ----------\n mols: list\n List of PDB filenames for molecules.\n protein_pdbs: list\n List of PDB filenames for proteins.\n\n Returns\n -------\n features: np.array\n Array of features\n failures: list\n Indices of complexes that failed to featurize.\n \"\"\"\n pool = multiprocessing.Pool()\n results = []\n for i, (mol_file, protein_pdb) in enumerate(zip(mol_files, protein_pdbs)):\n log_message = \"Featurizing %d / %d\" % (i, len(mol_files))\n results.append(\n pool.apply_async(_featurize_complex,\n (self, mol_file, protein_pdb, log_message)))\n pool.close()\n features = []\n failures = []\n for ind, result in enumerate(results):\n new_features = result.get()\n # Handle loading failures which return None\n if new_features is not None:\n features.append(new_features)\n else:\n failures.append(ind)\n features = np.asarray(features)\n return features, failures\n\n def _featurize_complex(self, mol_pdb, complex_pdb):\n \"\"\"\n Calculate features for single mol/protein complex.\n\n Parameters\n ----------\n mol_pdb: list\n Should be a list of lines of the PDB file.\n complex_pdb: list\n Should be a list of lines of the PDB file.\n \"\"\"\n raise NotImplementedError('Featurizer is not defined.')\n\n\nclass Featurizer(object):\n \"\"\"\n Abstract class for calculating a set of features for a molecule.\n\n Child classes implement the _featurize method for calculating features\n for a single molecule.\n \"\"\"\n\n def featurize(self, mols, verbose=True, log_every_n=1000):\n \"\"\"\n Calculate features for molecules.\n\n Parameters\n ----------\n mols : iterable\n RDKit Mol objects.\n \"\"\"\n mols = list(mols)\n features = []\n for i, mol in enumerate(mols):\n if mol is not None:\n features.append(self._featurize(mol))\n else:\n features.append(np.array([]))\n\n features = np.asarray(features)\n return features\n\n def _featurize(self, mol):\n \"\"\"\n Calculate features for a single molecule.\n\n Parameters\n ----------\n mol : RDKit Mol\n Molecule.\n \"\"\"\n raise NotImplementedError('Featurizer is not defined.')\n\n def __call__(self, mols):\n \"\"\"\n Calculate features for molecules.\n\n Parameters\n ----------\n mols : iterable\n RDKit Mol objects.\n \"\"\"\n return self.featurize(mols)\n\n\nclass UserDefinedFeaturizer(Featurizer):\n \"\"\"Directs usage of user-computed featurizations.\"\"\"\n\n def __init__(self, feature_fields):\n \"\"\"Creates user-defined-featurizer.\"\"\"\n self.feature_fields = feature_fields\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
avelez93/tfx
[ "75fbb6a7d50e99138609be3ca4c3a204a13a2195" ]
[ "tfx/benchmarks/tft_benchmark_chicago_taxi.py" ]
[ "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"TFT benchmark for Chicago Taxi dataset.\"\"\"\n\nfrom absl import flags\nfrom tfx.benchmarks import tft_benchmark_base\nfrom tfx.benchmarks.datasets.chicago_taxi import dataset\n\nfrom tensorflow.python.platform import test # pylint: disable=g-direct-tensorflow-import\n\nFLAGS = flags.FLAGS\nflags.DEFINE_integer(\"num_analyzers_wide\", 10,\n \"Number of analyzers in the TFT preprocessing function. \"\n \"Only used in `TFTBenchmarkChicagoTaxiWide`.\")\n\n\nclass TFTBenchmarkChicagoTaxi(tft_benchmark_base.TFTBenchmarkBase):\n\n def __init__(self, **kwargs):\n super().__init__(dataset=dataset.get_dataset(), **kwargs)\n\n\nclass TFTBenchmarkChicagoTaxiWide(tft_benchmark_base.TFTBenchmarkBase):\n\n def __init__(self, **kwargs):\n super().__init__(\n dataset=dataset.get_wide_dataset(num_analyzers=self._num_analyzers()),\n **kwargs)\n\n def _num_analyzers(self):\n return (FLAGS.num_analyzers_wide\n if FLAGS.is_parsed() else FLAGS[\"num_analyzers_wide\"].default)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.platform.test.main" ] ]
parvex/residual-continual-learning-benchmark
[ "8eeb2e57ecf0711e075eb02e8ed06fc8e7b9f20d" ]
[ "dataloaders/wrapper.py" ]
[ "from os import path\nimport torch\nimport torch.utils.data as data\n\n\nclass CacheClassLabel(data.Dataset):\n \"\"\"\n A dataset wrapper that has a quick access to all labels of data.\n \"\"\"\n def __init__(self, dataset):\n super(CacheClassLabel, self).__init__()\n self.dataset = dataset\n self.labels = torch.LongTensor(len(dataset)).fill_(-1)\n label_cache_filename = path.join(dataset.root, dataset.__module__+'_'+str(len(dataset))+'.pth')\n if path.exists(label_cache_filename):\n self.labels = torch.load(label_cache_filename)\n else:\n for i, data in enumerate(dataset):\n self.labels[i] = data[1]\n torch.save(self.labels, label_cache_filename)\n self.number_classes = len(torch.unique(self.labels))\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img,target = self.dataset[index]\n return img, target\n\n\nclass AppendName(data.Dataset):\n \"\"\"\n A dataset wrapper that also return the name of the dataset/task\n \"\"\"\n def __init__(self, dataset, name, first_class_ind=0):\n super(AppendName,self).__init__()\n self.dataset = dataset\n self.name = name\n self.first_class_ind = first_class_ind # For remapping the class index\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img,target = self.dataset[index]\n target = target + self.first_class_ind\n return img, target, self.name\n\n\nclass Subclass(data.Dataset):\n \"\"\"\n A dataset wrapper that return the task name and remove the offset of labels (Let the labels start from 0)\n \"\"\"\n def __init__(self, dataset, class_list, remap=True):\n '''\n :param dataset: (CacheClassLabel)\n :param class_list: (list) A list of integers\n :param remap: (bool) Ex: remap class [2,4,6 ...] to [0,1,2 ...]\n '''\n super(Subclass,self).__init__()\n assert isinstance(dataset, CacheClassLabel), 'dataset must be wrapped by CacheClassLabel'\n self.dataset = dataset\n self.class_list = class_list\n self.remap = remap\n self.indices = []\n for c in class_list:\n self.indices.extend((dataset.labels==c).nonzero().flatten().tolist())\n if remap:\n self.class_mapping = {c: i for i, c in enumerate(class_list)}\n\n def __len__(self):\n return len(self.indices)\n\n def __getitem__(self, index):\n img,target = self.dataset[self.indices[index]]\n if self.remap:\n raw_target = target.item() if isinstance(target,torch.Tensor) else target\n target = self.class_mapping[raw_target]\n return img, target\n\n\nclass Permutation(data.Dataset):\n \"\"\"\n A dataset wrapper that permute the position of features\n \"\"\"\n def __init__(self, dataset, permute_idx):\n super(Permutation,self).__init__()\n self.dataset = dataset\n self.permute_idx = permute_idx\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img,target = self.dataset[index]\n shape = img.size()\n img = img.view(-1)[self.permute_idx].view(shape)\n return img, target\n\n\nclass Storage(data.Subset):\n\n def reduce(self, m):\n self.indices = self.indices[:m]\n" ]
[ [ "torch.save", "torch.load", "torch.unique" ] ]
Davjes15/ardas_platform
[ "d962e7280ac4477fc3ee71280e90aeab58f74bd6" ]
[ "hs_logisticregression.py" ]
[ "# Implementation of Random Forest model to classify failures in a hydraulic process\n# Hydraulic system can be found : https://archive.ics.uci.edu/ml/datasets/Condition+monitoring+of+hydraulic+systems\n# The data set contains raw process sensor data (i.e. without feature extraction) which are structured as matrices (tab-delimited) \n# with rows representing the cycles and the columns the data points within a cycle. \n# The sensors involved are: \n\n# Sensor Physical quantity Unit Sampling rate\n# PS1 Pressure bar 100 Hz \n# PS2 Pressure bar 100 Hz \n# PS3 Pressure bar 100 Hz \n# PS4 Pressure bar 100 Hz \n# PS5 Pressure bar 100 Hz \n# PS6 Pressure bar 100 Hz \n# EPS1 Motor power W 100 Hz \n# FS1 Volume flow l/min 10 Hz \n# FS2 Volume flow l/min 10 Hz \n# TS1 Temperature °C 1 Hz \n# TS2 Temperature °C 1 Hz \n# TS3 Temperature °C 1 Hz \n# TS4 Temperature °C 1 Hz \n# VS1 Vibration mm/s 1 Hz \n# CE Cooling efficiency (virtual) % 1 Hz \n# CP Cooling power (virtual) kW 1 Hz \n# SE Efficiency factor % 1 Hz \n\n\n#**************** Python Version : Python 3.7.3 ************\n# Package Version\n# --------------- --------\n# aniso8601 7.0.0\n# certifi 2019.3.9\n# chardet 3.0.4\n# Click 7.0\n# configparser 3.7.4\n# cycler 0.10.0\n# databricks-cli 0.8.7\n# Flask 1.1.1\n# Flask-Cors 3.0.8\n# Flask-RESTful 0.3.7\n# idna 2.8\n# itsdangerous 1.1.0\n# Jinja2 2.10.1\n# joblib 0.13.2\n# jsonify 0.5\n# kiwisolver 1.1.0\n# MarkupSafe 1.1.1\n# matplotlib 3.1.1\n# numpy 1.17.0\n# pandas 0.25.1\n# pathlib 1.0.1\n# pip 19.2.3\n# pyparsing 2.4.2\n# python-dateutil 2.8.0\n# pytz 2019.1\n# requests 2.22.0\n# scikit-learn 0.21.3\n# scipy 1.3.1\n# seaborn 0.9.0\n# setuptools 40.8.0\n# six 1.12.0\n# sklearn 0.0\n# tabulate 0.8.3\n# urllib3 1.25.3\n# virtualenv 16.6.1\n# Werkzeug 0.15.4\n\n\n# Import libraries\n\nimport pandas as pd\nimport numpy as np\nimport os, sys\nimport pickle\nimport sklearn as sk\nfrom pathlib import Path\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\n# Define directories for reading and saving files\npath = os.path.abspath(os.path.dirname(sys.argv[0]))\npath_file= Path(path)\nml_model_dir = path_file / \"ml_model\"\nos.chdir(ml_model_dir) # os.chdir changes the directory so we can import the data from a different directory depending on the computer\nprint (os.getcwd())\n\n# Import feature extracted\n\ndf_features = pd.read_csv('feature_hs.csv')\nprint (df_features.head(10))\nprint (\"Features imported 100%\")\n# Import target conditions\nnames = ['cooler_condition', 'valve_condition', 'pump_leakage', 'hydraulic_accumulator', 'stable_flag']\nconditions = pd.read_csv('profile.txt',names = names, sep=\"\\t\")\nprint (conditions.head(10))\nprint (\"Target Conditions imported 100%\")\n\n\n# Define features\nX = df_features # features file\n\n\n# Save feature importance as csv file\ndef save_fi (data, path):\n df = pd.DataFrame(data,\n index = X_train.columns,\n columns=['Importance']).sort_values('Importance',ascending=False)\n data=df.T\n data = data.iloc[:,0:6]\n export_fi = data.to_csv (path, index = None, header=True)\n return (export_fi)\n\n# Trainnig a random forest algorithm \ndef train_lr (X_train, X_test, y_train, y_test, element):\n\t# Initialize model \n\tlr = LogisticRegression(multi_class = 'ovr', solver = 'liblinear')\n #Train the model on training data\n\tmodel_lr= lr.fit(X_train, y_train);\n\tprint (element + \" Model Training Ready\")\n\t# Use the LR's predict method on the test data\n\tpredictions = lr.predict(X_test)\n\tprint(element + ' Accuracy Condition: %.2f%%' % (accuracy_score(predictions,y_test)*100))\n\treturn (model_lr)\n\ndef save_model_object(model_object,model_name,model_params):\n file_name=model_name+\"_\"+str(model_params).replace('[',\"\").replace(']',\"\").replace(',',\"_\").replace(' ',\"_\")+\".obj\"\n with open(file_name,'wb') as handle:\n try:\n pickle.dump(model_object,handle)\n except:\n print(\"ERROR\")\n print(file_name,\" saved successfully\")\n# ---------------------------------------------------------------\n\n# Train model for cooler condition classification\nY = conditions[\"cooler_condition\"] # define target value\n# split the data into training and testing setssplit_data (X,Y)\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3, random_state = 42)\nlr_cooler = train_lr (X_train, X_test, y_train, y_test, \"Cooler\")\n# X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.30,random_state = 42)\n# rf = RandomForestClassifier(n_estimators= 1000, random_state=42)\n# rf.fit(X_train, y_train);\n# print ('Model training 100%')\n# predictions = rf.predict(X_test)\n# print('Accuracy Cooler Condition: %.2f%%' % (accuracy_score(predictions,y_test)*100))\n\n#Save machine learning model\nsave_model_object(lr_cooler,\"logistic_regression\",\"c\")\n\n# Create a dataframe with feature importance\n#fic_path = path_file / 'static' / 'hs_database'/ 'lr_feature_cooler.csv'\n#fi_c=rf_cooler.feature_importances_\n#save_fi (fi_c, fic_path)\n\n#-----------------------------------------------------------------\n\n# Train model for valve condition classification\nYval = conditions[\"valve_condition\"]\nX_train, X_test, y_train, y_test = train_test_split(X, Yval, test_size = 0.3, random_state = 42)\nlr_valve = train_lr (X_train, X_test, y_train, y_test, \"Valve\")\nsave_model_object(lr_valve,\"logistic_regression\",\"v\")\n#fiv_path = path_file / 'static' / 'hs_database'/ 'feature_valve.csv'\n#fi_v=rf_valve.feature_importances_\n#save_fi (fi_v, fiv_path)\n#-----------------------------------------------------------------\n\n# Train model for pump condition classification\nYpum = conditions[\"pump_leakage\"]\nX_train, X_test, y_train, y_test = train_test_split(X, Ypum, test_size = 0.3, random_state = 42)\nlr_pump = train_lr (X_train, X_test, y_train, y_test, \"Pump\")\nsave_model_object(lr_pump,\"logistic_regression\",\"p\")\n#fip_path = path_file / 'static' / 'hs_database'/ 'feature_pump.csv'\n#fi_p=rf_pump.feature_importances_\n#save_fi (fi_p, fip_path)\n\n#-----------------------------------------------------------------\n\n# Train model for accumulator condition classification\n\nYacc = conditions[\"hydraulic_accumulator\"]\nX_train, X_test, y_train, y_test = train_test_split(X, Yacc, test_size = 0.3, random_state = 42)\nlr_acc = train_lr (X_train, X_test, y_train, y_test, \"Accumulator\")\nsave_model_object(lr_acc ,\"logistic_regression\",\"a\")\n#fia_path = path_file / 'static' / 'hs_database'/ 'feature_acc.csv'\n#fi_a=rf_acc.feature_importances_\n#save_fi (fi_a, fia_path)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "sklearn.metrics.accuracy_score", "sklearn.linear_model.LogisticRegression", "sklearn.model_selection.train_test_split" ] ]
NCIA-Diffusion/ScoreSDE
[ "b5a562908daf66e6dcf0b791beb83f1fcb61174b" ]
[ "run_lib.py" ]
[ "import os\nimport logging\nimport copy\nfrom tqdm import trange\nfrom datetime import datetime\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision.utils import save_image\n\nfrom utils import ema\nfrom lib.dataset import DataLooper \nfrom lib.sde import VPSDE\nfrom lib.model.ddpm import DDPM\nfrom lib.trainer import DiffusionTrainer\nfrom lib.sampler import DiffusionSampler\n\n\ndef train(config, logdir, resume=True):\n \"\"\"Running a training pipeline\"\"\"\n # Dataset setup\n datalooper = DataLooper(\n config,\n batch_size=config.train.batch_size,\n )\n\n # Model setup\n if config.model.name.lower() == 'ddpm':\n net_model = DDPM(\n config.dataset.ch,\n config.model.ch,\n config.model.ch_mult,\n config.model.attn,\n config.model.num_res_blocks,\n config.model.dropout,\n )\n else:\n raise ValueError\n\n ema_model = copy.deepcopy(net_model)\n\n if config.parallel:\n net_model = torch.nn.DataParallel(net_model)\n ema_model = torch.nn.DataParallel(ema_model)\n\n # SDE setup\n if config.sde.name == 'VPSDE':\n sde = VPSDE(\n config.sde.beta_min,\n config.sde.beta_max,\n config.sde.N,\n )\n else:\n raise ValueError\n\n # Trainer setup\n trainer = DiffusionTrainer(\n sde,\n net_model,\n config.model.pred_type,\n ).to(config.device)\n trainer.train()\n\n # Optimizer setup\n optim = torch.optim.Adam(\n net_model.parameters(),\n lr=config.train.lr,\n )\n warmup = config.train.warmup\n sched = torch.optim.lr_scheduler.LambdaLR(\n optim,\n lr_lambda=lambda step: min(step, warmup) / warmup,\n )\n\n # Sampler setup\n sampler = DiffusionSampler(\n sde,\n ema_model,\n config.model.pred_type,\n ).to(config.device)\n sampler.eval()\n \n # Log setup \n sample_dir = os.path.join(logdir, 'samples')\n os.makedirs(sample_dir, exist_ok=True)\n writer = SummaryWriter(logdir)\n\n # Show model size\n model_size = sum(p.numel() for p in net_model.parameters())\n logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')\n\n # Load checkpoint (if exists)\n try:\n assert resume\n ckpt = torch.load(os.path.join(logdir, f'ckpt_latest.pt'))\n net_model.load_state_dict(ckpt['net_model'])\n ema_model.load_state_dict(ckpt['ema_model'])\n optim.load_state_dict(ckpt['optimizer'])\n sched.load_state_dict(ckpt['scheduler'])\n init_step = ckpt['step'] + 1\n logging.info(f'Checkpoint loaded! Re-start from step {init_step}.')\n except:\n init_step = 0\n logging.info(f'No checkpoint found. Start from step {init_step}.')\n\n # Start training\n with trange(init_step, config.train.total_steps, dynamic_ncols=True) as pbar:\n for step in pbar:\n # Train\n optim.zero_grad()\n x_0 = next(datalooper)\n x_0 = x_0.to(config.device)\n loss = trainer(x_0)\n loss = loss.mean()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(\n net_model.parameters(),\n config.train.grad_clip,\n )\n optim.step()\n sched.step()\n ema(net_model, ema_model, config.train.ema_decay)\n\n # Log\n writer.add_scalar('loss', loss, step)\n pbar.set_postfix(loss=f'{loss:.3f}')\n\n # Sample\n if config.train.sample_step > 0 and step % config.train.sample_step == 0:\n xs = []\n total_steps = config.eval.sample_size // config.eval.batch_size\n for i in range(0, config.eval.sample_size, config.eval.batch_size):\n x_T = torch.randn_like(x_0)\n with torch.no_grad():\n x = sampler(\n x_T,\n pbar,\n corrector_n_steps=1,\n corrector_langevin_snr=0.16,\n )\n xs.append((x.detach().cpu() + 1.) / 2)\n pbar.set_postfix(option=f'({i+1}/{total_steps})')\n xs = torch.cat(xs, dim=0)\n save_image(\n xs[:64],\n os.path.join(sample_dir, f'sample_{step}.png'),\n nrow=8,\n )\n\n # Save\n if config.train.save_step > 0 and step % config.train.save_step == 0:\n ckpt = {\n 'net_model': net_model.state_dict(),\n 'ema_model': ema_model.state_dict(),\n 'optimizer': optim.state_dict(),\n 'scheduler': sched.state_dict(),\n 'step': step,\n }\n torch.save(ckpt, os.path.join(logdir, f'ckpt_latest.pt'))\n\n # Archive\n if config.train.archive_step > 0 and step % config.train.archive_step == 0:\n ckpt = {\n 'net_model': net_model.state_dict(),\n 'ema_model': ema_model.state_dict(),\n 'optimizer': optim.state_dict(),\n 'scheduler': sched.state_dict(),\n 'step': step,\n }\n torch.save(ckpt, os.path.join(logdir, f'ckpt_{step}.pt'))\n\n writer.close()\n\n\ndef eval(config, logdir):\n \"\"\"Running an evaluation pipeline\"\"\"\n # Datalooper setup\n eval_datalooper = DataLooper(\n config,\n batch_size=config.eval.batch_size,\n )\n sample_size = config.eval.sample_size\n batch_size = config.eval.batch_size\n\n # Model setup\n if config.model.name.lower() == 'ddpm':\n model = DDPM(\n config.dataset.ch,\n config.model.ch,\n config.model.ch_mult,\n config.model.attn,\n config.model.num_res_blocks,\n config.model.dropout,\n )\n else:\n raise ValueError\n\n if config.parallel:\n model = torch.nn.DataParallel(model)\n \n # SDE setup\n if config.sde.name == 'VPSDE':\n sde = VPSDE(\n config.sde.beta_min,\n config.sde.beta_max,\n config.sde.N,\n )\n else:\n raise ValueError\n\n # Sampler setup\n sampler = DiffusionSampler(\n sde,\n model,\n config.model.pred_type,\n ).to(config.device)\n sampler.eval()\n\n # Show model size\n model_size = sum(p.numel() for p in model.parameters())\n logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')\n\n # Load checkpoint\n ckpt = torch.load(\n os.path.join(logdir, f'ckpt_latest.pt'),\n map_location=config.device\n )\n logging.info(f'Checkpoint step : {ckpt[\"step\"]}')\n model.load_state_dict(ckpt['ema_model'])\n\n # Directory setup\n eval_dir = os.path.join(logdir, 'eval')\n sample_dir = os.path.join(eval_dir, 'samples')\n os.makedirs(eval_dir, exist_ok=True)\n os.makedirs(sample_dir, exist_ok=True)\n\n xs = []\n x_0 = next(eval_datalooper).to(config.device)\n with trange(0, sample_size, batch_size, dynamic_ncols=True) as pbar:\n for _ in pbar:\n x_T = torch.randn_like(x_0)\n with torch.no_grad():\n x = sampler(\n x_T,\n pbar,\n corrector_n_steps=3,\n corrector_langevin_snr=0.16,\n )\n xs.append((x.detach().cpu() + 1.) / 2)\n xs = torch.cat(xs, dim=0)\n now = datetime.now()\n save_image(\n xs[:64],\n os.path.join(sample_dir, f'samples_{now}.png'),\n nrow=8,\n )" ]
[ [ "torch.randn_like", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.nn.DataParallel", "torch.cat" ] ]
joshfp/fastai
[ "794365cd7f734b5e1027d7e19c99e648fbb9a12b" ]
[ "tests/test_core.py" ]
[ "import pytest, torch\nimport numpy as np\nfrom fastai import *\nfrom tempfile import TemporaryDirectory\n\ndef test_cpus(): assert num_cpus() >= 1\n\[email protected](\"p, q, expected\", [\n (5 , 1 , [5]),\n (5 , [1,1], [5, 5]),\n ([5], 1 , [5]),\n ([5], [1,1], [5, 5]),\n (\"ab\" , \"cd\" , [\"a\", \"b\"]),\n (\"ab\" , [\"cd\", \"ef\"], [\"a\", \"b\"]),\n ([\"ab\"], \"cd\" , [\"ab\", \"ab\"]),\n ([\"ab\"], [\"cd\", \"ef\"], [\"ab\", \"ab\"]),\n])\ndef test_listify(p, q, expected):\n assert listify(p, q) == expected\n\ndef test_ifnone():\n assert ifnone(None, 5) == 5\n assert ifnone(5, None) == 5\n assert ifnone(1, 5) == 1\n assert ifnone(0, 5) == 0\n\ndef test_uniqueify():\n assert uniqueify([1,1,3,3,5]) == [1,3,5]\n assert uniqueify([1,3,5]) == [1,3,5]\n assert uniqueify([1,1,1,3,5]) == [1,3,5]\n\ndef test_listy():\n assert is_listy([1,1,3,3,5]) == True\n assert is_listy((1,1,3,3,5)) == True\n assert is_listy([1,\"2\",3,3,5]) == True\n assert is_listy((1,\"2\",3,3,5)) == True\n assert is_listy(1) == False\n assert is_listy(\"2\") == False\n assert is_listy({1, 2}) == False\n assert is_listy(set([1,1,3,3,5])) == False\n\ndef test_tuple():\n assert is_tuple((1,1,3,3,5)) == True\n assert is_tuple([1]) == False\n assert is_tuple(1) == False\n\ndef test_noop():\n assert noop(1) is 1\n\ndef test_to_int():\n assert to_int((\"1\",\"1\",\"3\",\"3\",\"5\")) == [1,1,3,3,5]\n assert to_int([1,\"2\",3.3,3,5]) == [1,2,3,3,5]\n assert to_int(1) == 1\n assert to_int(1.2) == 1\n assert to_int(\"1\") == 1\n\ndef test_partition_functionality():\n\n def test_partition(a, sz, ex):\n result = partition(a, sz)\n assert len(result) == len(ex)\n assert all([a == b for a, b in zip(result, ex)])\n\n a = [1,2,3,4,5]\n\n sz = 2\n ex = [[1,2],[3,4],[5]]\n test_partition(a, sz, ex)\n\n sz = 3\n ex = [[1,2,3],[4,5]]\n test_partition(a, sz, ex)\n\n sz = 1\n ex = [[1],[2],[3],[4],[5]]\n test_partition(a, sz, ex)\n\n sz = 6\n ex = [[1,2,3,4,5]]\n test_partition(a, sz, ex)\n\n sz = 3\n a = []\n result = partition(a, sz)\n assert len(result) == 0\n\ndef test_idx_dict():\n assert idx_dict(np.array([1,2,3]))=={1: 0, 2: 1, 3: 2}\n assert idx_dict([1, 2, 3])=={1: 0, 2: 1, 3: 2}\n assert idx_dict((1, 2, 3))=={1: 0, 2: 1, 3: 2}\n\ndef test_find_classes():\n path = Path('./classes_test').resolve()\n os.mkdir(path)\n classes = ['class_0', 'class_1', 'class_2']\n for class_num in classes:\n os.mkdir(path/class_num)\n try:\n assert find_classes(path)==[Path('./classes_test/class_0').resolve(),Path('./classes_test/class_1').resolve(),Path('./classes_test/class_2').resolve()]\n finally:\n shutil.rmtree(path)\n\ndef test_arrays_split():\n a = arrays_split([0,3],[1, 2, 3, 4, 5], ['a', 'b', 'c', 'd', 'e'])\n b = [(array([1, 4]),array(['a', 'd'])), (array([5, 2]),(array(['e','b'])))]\n np.testing.assert_array_equal(a,b)\n\n c = arrays_split([0,3],[1, 2, 3, 4, 5])\n d = [(array([1, 4]),), (array([5, 2]),)]\n np.testing.assert_array_equal(c,d)\n\n with pytest.raises(Exception): arrays_split([0,5],[1, 2, 3, 4, 5])\n with pytest.raises(Exception): arrays_split([0,3],[1, 2, 3, 4, 5], [1, 2, 3, 4])\n\ndef test_random_split():\n valid_pct = 0.4\n a = [len(arr) for arr in random_split(valid_pct, [1,2,3,4,5], ['a', 'b', 'c', 'd', 'e'])]\n b = [2, 2]\n assert a == b\n\n with pytest.raises(Exception): random_split(1.1, [1,2,3])\n with pytest.raises(Exception): random_split(0.1, [1,2,3], [1,2,3,4])\n\ndef test_camel2snake():\n a = camel2snake('someString')\n b = 'some_string'\n assert a == b\n\n c = camel2snake('some2String')\n d = 'some2_string'\n assert c == d\n\n e = camel2snake('longStringExmpl')\n f = 'long_string_exmpl'\n assert e == f\n\ndef test_even_mults():\n a = even_mults(start=1, stop=8, n=4)\n b = array([1.,2.,4.,8.])\n np.testing.assert_array_equal(a,b)\n\ndef test_series2cat():\n df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], 'col3':[5, 6]})\n cols = 'col1','col2'\n series2cat(df,*cols)\n for col in cols:\n assert (df[col].dtypes == 'category')\n assert (df['col3'].dtypes == 'int64')\n\ndef _write_file(path): f = open(path, 'w'); f.write(str(path.name)); f.close()\nclass TestMaybeCopy(object):\n def test_copies_if_does_not_exist(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'src')\n maybe_copy([str(tmpdir/'src')], [str(tmpdir/'dst')]) # works with strings\n assert os.path.exists(tmpdir/'dst')\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'src')\n maybe_copy([tmpdir/'src'], [tmpdir/'dst']) # works with Paths\n assert os.path.exists(tmpdir/'dst')\n\n def test_copies_if_older(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'first')\n _write_file(tmpdir/'second')\n os.utime(tmpdir/'first', (1,1))\n os.utime(tmpdir/'second', (2,2))\n maybe_copy([tmpdir/'second'], [tmpdir/'first'])\n assert open(tmpdir/'first').read() == 'second'\n\n def test_does_not_copy_if_newer(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'first')\n _write_file(tmpdir/'second')\n os.utime(tmpdir/'first', (1,1))\n os.utime(tmpdir/'second', (2,2))\n maybe_copy([tmpdir/'first'], [tmpdir/'second'])\n assert open(tmpdir/'second').read() == 'second'\n\n def test_creates_dst_dir_if_does_not_exist(self):\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n _write_file(tmpdir/'file')\n maybe_copy([tmpdir/'file'], [tmpdir/'dir'/'file'])\n assert os.path.exists(tmpdir/'dir'/'file')\n" ]
[ [ "numpy.array", "numpy.testing.assert_array_equal" ] ]
bratao/-PySeqLab
[ "fea1c4bd4d43565b1bb20a789d78946e1022d0ff" ]
[ "pyseqlab/utilities.py" ]
[ "\"\"\"\n@author: ahmed allam <[email protected]>\n\"\"\"\nimport os\nimport pickle\nimport shutil\nfrom datetime import datetime\nfrom copy import deepcopy\nfrom itertools import combinations\nimport heapq\nimport numpy\n\n\nclass SequenceStruct(object):\n r\"\"\"class for representing each sequence/segment\n \n Args:\n Y: list containing the sequence of states/labels (i.e. ['P','O','O','L','L'])\n X: list containing dictionary elements of observation sequences and/or features of the input\n seg_other_symbol: string or None (default), if specified then the task is a segmentation problem \n where it represents the non-entity symbol else (None) then it is considered \n as sequence labeling problem\n \n Attributes:\n Y: list containing the sequence of states/labels (i.e. ['P','O','O','L','L'])\n X: list containing dictionary elements of observation sequences and/or features of the input\n seg_other_symbol: string or None(default), if specified then the task is a segmentation problem \n where it represents the non-entity symbol else (None) then it is considered \n as sequence labeling problem\n T: int, length of a sequence (i.e. len(X))\n seg_attr: dictionary comprising the extracted attributes per each boundary of a sequence\n L: int, longest length of an identified segment in the sequence\n flat_y: list of labels/tags \n y_sboundaries: sorted list of boundaries of the :attr:`Y` of the sequence\n y_range: range of the sequence\n \n \"\"\"\n\n def __init__(self, X, Y, seg_other_symbol=None):\n self.seg_attr = {}\n self.X = X\n self.Y = (Y, seg_other_symbol)\n\n @property\n def X(self):\n return self._X\n\n @X.setter\n def X(self, l):\n \"\"\"setup the observation sequence \n \n Args:\n l: a list of elements (i.e. ``X = [{'w':'Michael'}, {'w':'is'}, {'w':'in'}, {'w':'New'}, {'w':'Haven'}]``)\n \n \n Example::\n \n the output X becomes:\n {1:{'w':'Michael'},\n 2:{'w':'is'}, \n 3:{'w':'in'}, \n 4:{'w':'New'},\n 5:{'w':'Haven'}\n }\n \"\"\"\n self._X = {}\n T = len(l)\n for i in range(T):\n self._X[i + 1] = l[i]\n\n # new assignment clear seg_attr\n if self.seg_attr:\n self.seg_attr.clear()\n self.T = T\n\n @property\n def Y(self):\n return self._Y\n\n @Y.setter\n def Y(self, elmtup):\n \"\"\"setup the label sequence\n \n Args:\n elmtup: tuple consisting of:\n - **Y** a list of elements (i.e. ``Y = ['P','O','O','L','L']``) \n representing the labels of the elements in X\n - **non_entity_symbol** which represents the Other category (i.e. non entity element which is 'O' in above example)\n \n Example:\n \n Y after the transformation becomes ``{(1, 1): 'P', (2,2): 'O', (3, 3): 'O', (4, 5): 'L'}``\n \"\"\"\n try:\n Y_ref, non_entity_symb = elmtup\n except ValueError:\n raise ValueError(\"tuple containing Y and non-entity symbol must be passed\")\n else:\n self._Y = {}\n # length of longest entity in a segment\n L = 1\n if non_entity_symb:\n label_indices = {}\n for i in range(len(Y_ref)):\n label = Y_ref[i]\n if label in label_indices:\n label_indices[label].append(i + 1)\n else:\n label_indices[label] = [i + 1]\n\n for label, indices_list in label_indices.items():\n if label == non_entity_symb or len(indices_list) == 1:\n for indx in indices_list:\n boundary = (indx, indx)\n self._Y[boundary] = label\n\n else:\n indx_stack = []\n for indx in indices_list:\n if not indx_stack:\n indx_stack.append(indx)\n else:\n diff = indx - indx_stack[-1]\n if diff > 1:\n boundary = (indx_stack[0], indx_stack[-1])\n self._Y[boundary] = label\n l = indx_stack[-1] - indx_stack[0] + 1\n if l > L:\n L = l\n indx_stack = [indx]\n else:\n indx_stack.append(indx)\n if indx_stack:\n boundary = (indx_stack[0], indx_stack[-1])\n self._Y[boundary] = label\n l = indx_stack[-1] - indx_stack[0] + 1\n if l > L:\n L = l\n indx_stack = [indx]\n\n else:\n for i in range(len(Y_ref)):\n label = Y_ref[i]\n boundary = (i + 1, i + 1)\n self._Y[boundary] = label\n\n # store the length of longest entity\n self.L = L\n # keep a copy of Y in as flat list (i.e. ['P','O','O','L','L'])\n self.flat_y = Y_ref\n\n # construct a map from the yboundaries to the pos in the list\n y_sboundaries = self.get_y_boundaries()\n self.y_sboundaries = y_sboundaries\n\n self.y_boundpos_map = {}\n pos = 0\n for boundary in y_sboundaries:\n self.y_boundpos_map[boundary] = pos\n pos += 1\n self.y_range = set(range(0, pos))\n\n # def update_boundaries(self):\n # self.y_boundaries = self.get_y_boundaries()\n # self.x_boundaries = self.get_x_boundaries()\n\n def flatten_y(self, Y):\n r\"\"\"flatten the :attr:`Y` attribute \n \n Args:\n Y: dictionary of this form ``{(1, 1): 'P', (2,2): 'O', (3, 3): 'O', (4, 5): 'L'}``\n \n Example:\n \n flattened y becomes ``['P','O','O','L','L']``\n \"\"\"\n s_boundaries = sorted(Y)\n flat_y = []\n for u, v in s_boundaries:\n for _ in range(u, v + 1):\n flat_y.append(Y[(u, v)])\n return flat_y\n\n def get_y_boundaries(self):\n \"\"\"return the sorted boundaries of the labels of the sequence\"\"\"\n return sorted(self.Y.keys())\n\n def get_x_boundaries(self):\n \"\"\"return the boundaries of the observation sequence\"\"\"\n boundaries = []\n for u in self.X:\n boundaries.append((u, u))\n return boundaries\n\n def __str__(self):\n \"\"\"return string representation of the parsed sequence\"\"\"\n out_str = \"Y sequence:\\n {}\\nX sequence:\\n {}\\n{}\".format(\n self.flat_y, self.X, \"-\" * 40\n )\n return out_str\n\n\nclass DataFileParser(object):\n \"\"\"class to parse a data file comprising the training/testing data\n \n Attributes:\n seqs: list comprising of sequences that are instances of :class:`SequenceStruct` class\n header: list of attribute names read from the file\n\n \"\"\"\n\n def __init__(self):\n self.header = []\n\n def read_file(\n self, file_path, header, y_ref=True, seg_other_symbol=None, column_sep=\" \"\n ):\n r\"\"\"read and parse a file the contains the sequences following a predefined format\n \n the file should contain label and observation tracks each separated in a column \n \n .. note::\n \n label column is the **LAST** column in the file (i.e. X_a X_b Y)\n \n Args:\n file_path: string representing the file path to the data file\n header: specifies how the header is reported in the file containing the sequences\n options include:\n - 'main' -> one header in the beginning of the file\n - 'per_sequence' -> a header for every sequence\n - list of keywords as header (i.e. ['w', 'part_of_speech'])\n \n Keyword Arguments:\n y_ref: boolean specifying if the reference label column in the data file\n seg_other_sybmol: string or None(default), if specified then the task is a segmentation problem \n where `seg_other_symbol` represents the non-entity symbol. In this case semi-CRF models\n are used. Else (i.e. `seg_other_symbol` is not None) then it is considered \n as sequence labeling problem.\n column_sep: string, separator used between the columns in the file\n\n \"\"\"\n if y_ref:\n update_seq = self.update_XY\n else:\n update_seq = self.update_X\n\n with open(file_path) as file_obj:\n counter = 0\n X = []\n Y = []\n for line in file_obj:\n counter += 1\n line = line.rstrip()\n # print(line)\n if line:\n # print(line)\n if y_ref:\n *x_arg, y = line.split(column_sep)\n self._xarg = x_arg\n self._y = y\n else:\n x_arg = line.split(column_sep)\n self._xarg = x_arg\n\n # print(x_arg)\n # first line of a sequence\n if counter == 1:\n if header == \"main\":\n if self.header:\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n else:\n self.parse_header(x_arg)\n\n elif header == \"per_sequence\":\n if not self.header:\n self.parse_header(x_arg)\n else:\n if self.header:\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n else:\n self.parse_header(header)\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n else:\n update_seq(X, Y)\n # X.append(self.parse_line(x_arg))\n # Y.append(y)\n\n else:\n seq = SequenceStruct(X, Y, seg_other_symbol)\n # reset counter for filling new sequence\n counter = 0\n X = []\n Y = []\n self._xarg = None\n self._y = None\n yield seq\n\n if X and Y:\n seq = SequenceStruct(X, Y, seg_other_symbol)\n # reset counter for filling new sequence\n counter = 0\n X = []\n Y = []\n self._xarg = None\n self._y = None\n yield seq\n\n def update_XY(self, X, Y):\n \"\"\"update sequence observations and corresponding labels\"\"\"\n X.append(self.parse_line(self._xarg))\n Y.append(self._y)\n\n def update_X(self, X, Y):\n \"\"\"update sequence observations\"\"\"\n X.append(self.parse_line(self._xarg))\n\n def parse_line(self, x_arg):\n \"\"\"parse the read line\n \n Args:\n x_arg: tuple of observation columns\n \"\"\"\n # fill the sequences X and Y with observations and tags respectively\n header = self.header\n x = {}\n for i in range(len(x_arg)):\n x[header[i]] = x_arg[i]\n return x\n\n def parse_header(self, x_arg):\n \"\"\"parse header\n \n Args:\n x_arg: tuple of attribute/observation names \n \"\"\"\n seq_header = [input_src for input_src in x_arg]\n self.header = seq_header\n\n\nclass ReaderWriter(object):\n \"\"\"class for dumping, reading and logging data\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def dump_data(data, file_name, mode=\"wb\"):\n \"\"\"dump data by pickling \n \n Args:\n data: data to be pickled\n file_name: file path where data will be dumped\n mode: specify writing options i.e. binary or unicode\n \"\"\"\n with open(file_name, mode) as f:\n pickle.dump(data, f, protocol=4)\n\n @staticmethod\n def read_data(file_name, mode=\"rb\"):\n \"\"\"read dumped/pickled data\n \n Args:\n file_name: file path where data will be dumped\n mode: specify writing options i.e. binary or unicode\n \"\"\"\n with open(file_name, mode) as f:\n data = pickle.load(f)\n return data\n\n @staticmethod\n def log_progress(line, outfile, mode=\"a\"):\n \"\"\"write data to a file\n \n Args:\n line: string representing data to be written out\n outfile: file path where data will be written/logged\n mode: specify writing options i.e. append, write\n \"\"\"\n with open(outfile, mode) as f:\n f.write(line)\n\n\nclass AStarNode(object):\n \"\"\"class representing A* node to be used with A* searcher and viterbi for generating k-decoded list\n \n Args:\n cost: float representing the score/unnormalized probability of a sequence up to given position\n position: integer representing the current position in the sequence\n pi_c: prefix or state code of the label\n label: label of the current position in a sequence\n frwdlink: a link to :class:`AStarNode` node\n \n Attributes:\n cost: float representing the score/unnormalized probability of a sequence up to given position\n position: integer representing the current position in the sequence\n pi_c: prefix or state code of the label\n label: label of the current position in a sequence\n frwdlink: a link to :class:`AStarNode` node\n \n \"\"\"\n\n def __init__(self, cost, position, pi_c, label, frwdlink):\n self.cost = cost\n self.position = position\n self.pi_c = pi_c\n self.label = label\n self.frwdlink = frwdlink\n\n def print_node(self):\n \"\"\"print the info about a node\"\"\"\n statement = \"cost: {}, position: {}, pi_code: {}, label: {}, \".format(\n self.cost, self.position, self.pi_c, self.label\n )\n if self.frwdlink:\n statement += \"forward_link: {}\".format(self.frwdlink)\n else:\n statement += \"forward_link: None\"\n print(statement)\n\n\nclass AStarAgenda(object):\n \"\"\"class containing a heap where instances of :class:`AStarNode` class will be pushed \n \n the push operation will use the score matrix (built using viterbi algorithm)\n representing the unnormalized probability of the sequences ending at every position \n with the different available prefixes/states\n \n Attributes:\n qagenda: queue where instances of :class:`AStarNode` are pushed\n entry_count: counter that keeps track of the entries and associate each entry(node)\n with a unique number. It is useful for resolving nodes with equal costs\n \n \"\"\"\n\n def __init__(self):\n self.qagenda = []\n self.entry_count = 0\n\n def push(self, astar_node, cost):\n \"\"\"push instance of :class:`AStarNode` with its associated cost to the heap\n \n Args:\n astar_node: instance of :class:`AStarNode` class\n cost: float representing the score/unnormalized probability of a sequence up to given position\n \"\"\"\n heapq.heappush(self.qagenda, (-cost, self.entry_count, astar_node))\n self.entry_count += 1\n\n def pop(self):\n \"\"\"pop nodes with highest score from the heap\n \"\"\"\n astar_node = heapq.heappop(self.qagenda)[-1]\n return astar_node\n\n\nclass FO_AStarSearcher(object):\n \"\"\"A* star searcher associated with first-order CRF model such as :class:`FirstOrderCRF`\n \n Args:\n Y_codebook_rev: a reversed version of dictionary comprising the set of states each assigned a unique code\n \n Attributes:\n Y_codebook_rev: a reversed version of dictionary comprising the set of states each assigned a unique code\n \"\"\"\n\n def __init__(self, Y_codebook_rev):\n self.Y_codebook_rev = Y_codebook_rev\n\n def infer_labels(self, top_node, back_track):\n \"\"\"decode sequence by inferring labels\n \n Args:\n top_node: instance of :class:`AStarNode` class\n back_track: dictionary containing back pointers built using dynamic programming algorithm\n \"\"\"\n Y_codebook_rev = self.Y_codebook_rev\n # decoding the sequence\n # print(\"we are decoding\")\n # top_node.print_node()\n y_c = top_node.pi_c\n pos = top_node.position\n Y_decoded = []\n Y_decoded.append(y_c)\n t = pos - 1\n while t > 0:\n y_c_tplus1 = Y_decoded[-1]\n y_c_t = back_track[t + 1, y_c_tplus1]\n Y_decoded.append(y_c_t)\n t -= 1\n Y_decoded.reverse()\n Y_decoded = [Y_codebook_rev[y_code] for y_code in Y_decoded]\n\n while top_node.frwdlink:\n y = top_node.frwdlink.label\n Y_decoded.append(y)\n top_node = top_node.frwdlink\n # print(Y_decoded)\n return Y_decoded\n\n def search(self, alpha, back_track, T, K):\n \"\"\"A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences\n \n Args:\n alpha: score matrix build using the viterbi algorithm\n back_track: back_pointers dictionary tracking the best paths to every state\n T: last decoded position of a sequence (in this context, it is the alpha.shape[0])\n K: number of top decoded sequences to be returned\n \n Returns:\n topk_list: top-K list of decoded sequences\n \n \n \"\"\"\n # push the best astar nodes to the queue (i.e. the states at time T)\n q = AStarAgenda()\n r = set()\n c = 0\n Y_codebook_rev = self.Y_codebook_rev\n # create nodes from the states at time T\n for y_c in Y_codebook_rev:\n cost = alpha[T, y_c]\n pos = T\n frwdlink = None\n label = Y_codebook_rev[y_c]\n node = AStarNode(cost, pos, y_c, label, frwdlink)\n # node.print_node()\n q.push(node, cost)\n\n track = []\n topk_list = []\n try:\n while c < K:\n # print(\"heap size \", len(q.qagenda))\n top_node = q.pop()\n track.append(top_node)\n\n for i in reversed(range(2, top_node.position + 1)):\n # best previous state at pos = i-1\n curr_y_c = top_node.pi_c\n bestprev_y_c = back_track[i, curr_y_c]\n pos = i - 1\n for prev_y_c in Y_codebook_rev:\n # create a new astar node\n if prev_y_c != bestprev_y_c:\n label = Y_codebook_rev[prev_y_c]\n cost = alpha[pos, prev_y_c]\n s = AStarNode(cost, pos, prev_y_c, label, top_node)\n q.push(s, cost)\n\n # create the backlink of the previous top_node (i.e. create a node from the best_y_c)\n cost = alpha[pos, bestprev_y_c]\n label = Y_codebook_rev[bestprev_y_c]\n top_node = AStarNode(cost, pos, y_c, label, top_node)\n\n # decode and check if it is not saved already in topk list\n y_labels = self.infer_labels(track[-1], back_track)\n # print(y_labels)\n signature = \"\".join(y_labels)\n if signature not in r:\n r.add(signature)\n topk_list.append(y_labels)\n c += 1\n track.pop()\n except (KeyError, IndexError) as e:\n # consider logging the error\n print(e)\n\n finally:\n # print('r ', r)\n # print('topk ', topk_list)\n return topk_list\n\n\nclass HO_AStarSearcher(object):\n \"\"\"A* star searcher associated with higher-order CRF model such as :class:`HOCRFAD`\n \n Args:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n\n Attributes:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n \"\"\"\n\n def __init__(self, P_codebook_rev, P_elems):\n self.P_codebook_rev = P_codebook_rev\n self.P_elems = P_elems\n\n def get_node_label(self, pi_code):\n \"\"\"get the the label/state given a prefix code\n \n Args:\n pi_code: prefix code which is an element of :attr:`P_codebook_rev`\n \"\"\"\n\n pi = self.P_codebook_rev[pi_code]\n y = self.P_elems[pi][-1]\n return y\n\n def infer_labels(self, top_node, back_track):\n \"\"\"decode sequence by inferring labels\n \n Args:\n top_node: instance of :class:`AStarNode` class\n back_track: dictionary containing back pointers tracking the best paths to every state\n \"\"\"\n # decoding the sequence\n # print(\"we are decoding\")\n # top_node.print_node()\n y = top_node.label\n pi_c = top_node.pi_c\n pos = top_node.position\n Y_decoded = []\n Y_decoded.append((pi_c, y))\n # print(\"t={}, p_T_code={}, p_T={}, y_T ={}\".format(T, p_T_code, p_T, y_T))\n t = pos - 1\n while t > 0:\n p_tplus1_c = Y_decoded[-1][0]\n p_t_c, y_t = back_track[t + 1, p_tplus1_c]\n # print(\"t={}, (t+1, p_t_code)=({}, {})->({},{})\".format(t, t+1, P_codebook[p_tplus1], p_t, y_t))\n Y_decoded.append((p_t_c, y_t))\n t -= 1\n Y_decoded.reverse()\n Y_decoded = [y for (__, y) in Y_decoded]\n\n while top_node.frwdlink:\n y = top_node.frwdlink.label\n Y_decoded.append(y)\n top_node = top_node.frwdlink\n # print(Y_decoded)\n return Y_decoded\n\n def search(self, alpha, back_track, T, K):\n \"\"\"A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences\n \n Args:\n alpha: score matrix build using the viterbi algorithm\n back_track: back_pointers dictionary tracking the best paths to every state\n T: last decoded position of a sequence (in this context, it is the alpha.shape[0])\n K: number of top decoded sequences to be returned\n \n Returns:\n topk_list: top-K list of decoded sequences\n \n \n \"\"\"\n # push the best astar nodes to the queue (i.e. the pi's at time T)\n q = AStarAgenda()\n r = set()\n c = 0\n P_codebook_rev = self.P_codebook_rev\n # create nodes from the pi's at time T\n for pi_c in P_codebook_rev:\n cost = alpha[T, pi_c]\n pos = T\n frwdlink = None\n label = self.get_node_label(pi_c)\n node = AStarNode(cost, pos, pi_c, label, frwdlink)\n # node.print_node()\n q.push(node, cost)\n\n track = []\n topk_list = []\n try:\n while c < K:\n # print(\"heap size \", len(q.qagenda))\n top_node = q.pop()\n track.append(top_node)\n\n for i in reversed(range(2, top_node.position + 1)):\n best_prev_pi_c, best_y = back_track[i, top_node.pi_c]\n pos = i - 1\n for prev_pi_c in P_codebook_rev:\n # create a new astar node\n if prev_pi_c != best_prev_pi_c:\n label = self.get_node_label(prev_pi_c)\n cost = alpha[pos, prev_pi_c]\n s = AStarNode(cost, pos, prev_pi_c, label, top_node)\n q.push(s, cost)\n\n # create the backlink of the top_node\n cost = alpha[pos, best_prev_pi_c]\n top_node = AStarNode(cost, pos, best_prev_pi_c, best_y, top_node)\n\n # decode and check if it is not saved already in topk list\n y_labels = self.infer_labels(track[-1], back_track)\n # print(y_labels)\n sig = \"\".join(y_labels)\n if sig not in r:\n r.add(sig)\n topk_list.append(y_labels)\n c += 1\n track.pop()\n except (KeyError, IndexError) as e:\n # consider logging the error\n print(e)\n\n finally:\n # print('r ', r)\n # print('topk ', topk_list)\n return topk_list\n\n\nclass HOSemi_AStarSearcher(object):\n \"\"\"A* star searcher associated with higher-order CRF model such as :class:`HOSemiCRFAD`\n \n Args:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n\n Attributes:\n P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set\n e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``\n P_elems: dictionary comprising the composing elements of every prefix in the `P` set\n e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``\n \"\"\"\n\n def __init__(self, P_codebook_rev, pi_elems):\n self.P_codebook_rev = P_codebook_rev\n self.pi_elems = pi_elems\n\n def get_node_label(self, pi_code):\n \"\"\"get the the label/state given a prefix code\n \n Args:\n pi_code: prefix code which is an element of :attr:`P_codebook_rev`\n \"\"\"\n pi = self.P_codebook_rev[pi_code]\n y = self.pi_elems[pi][-1]\n return y\n\n def infer_labels(self, top_node, back_track):\n \"\"\"decode sequence by inferring labels\n \n Args:\n top_node: instance of :class:`AStarNode` class\n back_track: dictionary containing back pointers tracking the best paths to every state\n \"\"\"\n # decoding the sequence\n # print(\"we are decoding\")\n # top_node.print_node()\n y = top_node.label\n pi_c = top_node.pi_c\n pos = top_node.position\n Y_decoded = []\n\n d, pt_c, yt = back_track[pos, pi_c]\n for _ in range(d + 1):\n Y_decoded.append(y)\n\n t = pos - d - 1\n while t > 0:\n new_d, new_pt_c, new_yt = back_track[t, pt_c]\n for _ in range(new_d + 1):\n Y_decoded.append(yt)\n t = t - new_d - 1\n pt_c = new_pt_c\n yt = new_yt\n Y_decoded.reverse()\n\n while top_node.frwdlink:\n y = top_node.frwdlink.label\n Y_decoded.append(y)\n top_node = top_node.frwdlink\n # print(Y_decoded)\n return Y_decoded\n\n def search(self, alpha, back_track, T, K):\n \"\"\"A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences\n \n Args:\n alpha: score matrix build using the viterbi algorithm\n back_track: back_pointers dictionary tracking the best paths to every state\n T: last decoded position of a sequence (in this context, it is the alpha.shape[0])\n K: number of top decoded sequences to be returned\n \n Returns:\n topk_list: top-K list of decoded sequences\n \n \n \"\"\"\n # push the best astar nodes to the queue (i.e. the pi's at time T)\n q = AStarAgenda()\n r = set()\n c = 0\n P_codebook_rev = self.P_codebook_rev\n\n # create nodes from the pi's at time T\n for pi_c in P_codebook_rev:\n cost = alpha[T, pi_c]\n pos = T\n frwdlink = None\n label = self.get_node_label(pi_c)\n node = AStarNode(cost, pos, pi_c, label, frwdlink)\n # node.print_node()\n q.push(node, cost)\n\n track = []\n topk_list = []\n try:\n while c < K:\n # print(\"heap size \", len(q.qagenda))\n top_node = q.pop()\n track.append(top_node)\n while True:\n curr_pos = top_node.position\n if curr_pos == 1:\n break\n d, best_prev_pi_c, best_prev_y = back_track[curr_pos, top_node.pi_c]\n prev_pos = curr_pos - d - 1\n for prev_pi_c in P_codebook_rev:\n # create a new astar node\n if prev_pi_c != best_prev_pi_c:\n label = self.get_node_label(prev_pi_c)\n cost = alpha[prev_pos, prev_pi_c]\n s = AStarNode(cost, prev_pos, prev_pi_c, label, top_node)\n q.push(s, cost)\n\n # create the backlink of the top_node\n cost = alpha[prev_pos, best_prev_pi_c]\n top_node = AStarNode(\n cost, prev_pos, best_prev_pi_c, best_prev_y, top_node\n )\n\n # decode and check if it is not saved already in topk list\n y_labels = self.infer_labels(track[-1], back_track)\n # print(y_labels)\n sig = \"\".join(y_labels)\n if sig not in r:\n r.add(sig)\n topk_list.append(y_labels)\n c += 1\n track.pop()\n except (KeyError, IndexError) as e:\n # consider logging the error\n print(e)\n\n finally:\n # print('r ', r)\n # print('topk ', topk_list)\n return topk_list\n\n\nclass TemplateGenerator(object):\n \"\"\"template generator class for feature/function template generation\n \"\"\"\n\n def __init__(self):\n pass\n\n def generate_template_XY(self, attr_name, x_spec, y_spec, template):\n r\"\"\"generate template XY for the feature extraction\n \n Args:\n attr_name: string representing the attribute name of the atomic observations/tokens\n x_spec: tuple of the form (n-gram, range)\n that is we can specify the n-gram features required in a specific range/window\n for an observation token ``attr_name``\n y_spec: string specifying how to join/combine the features on the X observation level\n with labels on the Y level. \n \n Example of passed options would be:\n - one state (i.e. current state) by passing ``1-state`` or \n - two states (i.e. current and previous state) by passing ``2-states`` or\n - one and two states (i.e. mix/combine observation features with one state model and two states models)\n by passing ``1-state:2-states``. Higher order models support models with states > 2 such as ``3-states`` and above. \n template: dictionary that accumulates the generated feature template for all attributes\n \n Example:\n \n suppose we have `word` attribute referenced by 'w' and we need to use the current word\n with the current label (i.e. unigram of words with the current label) in a range of (0,1)\n \n ::\n \n templateXY = {}\n generate_template_XY('w', ('1-gram', range(0, 1)), '1-state', templateXY)\n \n we can also specify a two states/labels features at the Y level\n \n ::\n \n generate_template_XY('w', ('1-gram', range(0, 1)), '1-state:2-states', templateXY)\n \n .. note ::\n this can be applied for every attribute name and accumulated in the `template` dictionary\n \"\"\"\n ngram_options, wsize = x_spec\n templateX = self._traverse_x(attr_name, ngram_options, wsize)\n templateY = self.generate_template_Y(y_spec)\n templateXY = self._mix_template_XY(templateX, templateY)\n # update the template we are building\n self._update_template(template, templateXY)\n\n def _update_template(self, template, templateXY):\n \"\"\"update the accumulated template with the current generated templateXY\n \n Args:\n template: dictionary of the accumulated template for the different offsets\n and attribute names\n templateXY: dictionary of the form ``{attr_name:{x_offset:(y_offsets)}}``\n \"\"\"\n for attr_name in templateXY:\n if attr_name in template:\n for x_offset in templateXY[attr_name]:\n template[attr_name][x_offset] = templateXY[attr_name][x_offset]\n else:\n template[attr_name] = templateXY[attr_name]\n\n def _traverse_x(self, attr_name, ngram_options, wsize):\n \"\"\"generate template on the X observation level only\n \n Args:\n attr_name: string representing the attribute name of the atomic observations/tokens\n ngram_options: string specifying the n-grams (i.e. ``1-gram``) it also supports multiple\n specification such as ``1-gram:2-gram`` where each is separated by a colon\n wsize: a range specifying the window size where the template operates\n \n \"\"\"\n options = ngram_options.split(\":\")\n l = list(wsize)\n template = {attr_name: {}}\n for option in options:\n n = int(option.split(\"-\")[0])\n ngram_list = self.generate_ngram(l, n)\n for offset in ngram_list:\n template[attr_name][offset] = None\n return template\n\n def generate_template_Y(self, ngram_options):\n \"\"\"generate template on the Y labels level\n \n Args:\n ngram_options: string specifying the number of states to be use (i.e. ``1-state``).\n It also supports multiple specification such as ``1-state:2-states`` \n where each is separated by a colon\n \n \"\"\"\n template = {\"Y\": []}\n options = ngram_options.split(\":\")\n for option in options:\n max_order = int(option.split(\"-\")[0])\n template[\"Y\"] += self._traverse_y(max_order, accumulative=False)[\"Y\"]\n return template\n\n @staticmethod\n def _traverse_y(max_order, accumulative=True):\n \"\"\"generate the y template\"\"\"\n attr_name = \"Y\"\n template = {attr_name: []}\n if accumulative:\n for j in range(max_order):\n offsets_y = [-i for i in range(j + 1)]\n offsets_y = tuple(reversed(offsets_y))\n template[attr_name].append(offsets_y)\n else:\n offsets_y = [-i for i in range(max_order)]\n offsets_y = tuple(reversed(offsets_y))\n template[attr_name].append(offsets_y)\n\n return template\n\n @staticmethod\n def _mix_template_XY(templateX, templateY):\n \"\"\"mix and join the template on the X observation level with the Y level\n \n Args:\n templateX: dictionary of the form ``{attr_name:{x_offset:None}}``\n e.g. ``{'w': {(0,): None}}``\n templateY: dictionary of the form ``{'Y':[y_offset]}``\n e.g. ``{'Y': [(0,), (-1, 0)]}``\n .. note::\n \n - x_offset is a tuple of offsets representing the ngram options needed \n such as (0,) for unigram and (-1,0) for bigram\n \n - y_offset is a tuple of offsets representing the number of states options needed \n such as (0,) for 1-state and (-1,0) for 2-states and (-2,-1,0) for 3-states\n \"\"\"\n template_XY = deepcopy(templateX)\n for attr_name in template_XY:\n for offset_x in template_XY[attr_name]:\n template_XY[attr_name][offset_x] = tuple(templateY[\"Y\"])\n return template_XY\n\n @staticmethod\n def generate_ngram(l, n):\n \"\"\"n-gram generator based on the length of the window and the ngram option\n \n Args:\n l: list of positions of the range representing the window size (i.e. list(wsize))\n n: integer representing the n-gram option (i.e. 1 for unigram, 2 for bigram, etc..)\n \"\"\"\n ngram_list = []\n for i in range(0, len(l)):\n elem = tuple(l[i : i + n])\n if len(elem) != n:\n break\n ngram_list.append(elem)\n\n return ngram_list\n\n @staticmethod\n def generate_combinations(n):\n \"\"\"generates all possible combinations based on the maximum number of ngrams n\n \n Args:\n n: integer specifying the maximum/greatest ngram option\n \n \"\"\"\n option_names = []\n start = 1\n for i in range(start, n + 1):\n option_names.append(\"{}-gram\".format(i))\n\n config = {}\n for i in range(start, n + 1):\n config[i] = list(combinations(option_names, i))\n\n config_combinations = {}\n for c_list in config.values():\n for c_tup in c_list:\n key_name = \":\".join(c_tup)\n config_combinations[key_name] = set()\n elemkeys = config_combinations.keys()\n for option_i in config_combinations:\n s = config_combinations[option_i]\n for option_j in elemkeys:\n s.add(option_j)\n config_combinations[option_i] = s\n return config_combinations\n\n\nclass BoundNode(object):\n \"\"\"boundary entity class used when generating all possible partitions within specified constraint\n \n Args:\n parent: instance of :class:`BoundNode` \n boundary: tuple (u,v) representing the current boundary\n \"\"\"\n\n def __init__(self, parent, boundary):\n self.parent = parent\n self.boundary = boundary\n self.children = []\n\n def add_child(self, child):\n \"\"\"add link to the child nodes\"\"\"\n self.children.append(child)\n\n def get_child(self):\n \"\"\"retrieve child nodes\"\"\"\n return self.children.pop()\n\n def get_signature(self):\n \"\"\"retrieve the id of the node\"\"\"\n return id(self)\n\n\ndef generate_partitions(\n boundary, L, patt_len, bound_node_map, depth_node_map, parent_node, depth=1\n):\n \"\"\"generate all possible partitions within the range of segment length and model order\n \n it transforms the partitions into a tree of nodes starting from the root node\n that uses `boundary` argument in its construction\n \n Args:\n boundary: tuple (u,v) representing the current boundary in a sequence\n L: integer representing the maximum length a segment could be constructed\n patt_len: integer representing the maximum model order\n bound_node_map: dictionary that keeps track of all possible partitions represented as\n instances of :class:`BoundNode`\n depth_node_map: dictionary that arranges the generated nodes by their depth in the tree\n parent_node: instance of :class:`BoundNode` or None in case of the root node\n depth: integer representing the maximum depth of the tree to be reached before stopping \n \"\"\"\n if depth >= patt_len:\n return\n\n if parent_node:\n if boundary in bound_node_map:\n curr_node = bound_node_map[boundary]\n else:\n curr_node = BoundNode(parent_node, boundary)\n bound_node_map[boundary] = curr_node\n if depth in depth_node_map:\n depth_node_map[depth].append(curr_node)\n else:\n depth_node_map[depth] = [curr_node]\n else:\n # setup root node\n curr_node = BoundNode(None, boundary)\n bound_node_map[boundary] = curr_node\n depth_node_map[depth] = [curr_node]\n\n u = boundary[0] - 1\n v = u\n depth += 1\n\n for d in range(L):\n if u - d < 1:\n break\n upd_boundary = (u - d, v)\n if upd_boundary in bound_node_map:\n child = bound_node_map[upd_boundary]\n else:\n child = BoundNode(curr_node, upd_boundary)\n bound_node_map[upd_boundary] = child\n if depth in depth_node_map:\n depth_node_map[depth].append(child)\n else:\n depth_node_map[depth] = [child]\n curr_node.add_child(child)\n generate_partitions(\n upd_boundary, L, patt_len, bound_node_map, depth_node_map, child, depth\n )\n\n\ndef generate_partition_boundaries(depth_node_map):\n \"\"\"generate partitions of the boundaries generated in :func:`generate_partitions` function\n \n Args:\n depth_node_map: dictionary that arranges the generated nodes by their depth in the tree\n it is constructed using :func:`generate_partitions` function\n \"\"\"\n g = {}\n depths = sorted(depth_node_map, reverse=True)\n\n for depth in depths:\n g[depth] = []\n nodes = depth_node_map[depth]\n for curr_node in nodes:\n l = []\n l.append(curr_node.boundary)\n while True:\n curr_node = curr_node.parent\n if curr_node:\n l.append(curr_node.boundary)\n else:\n g[depth].append(l)\n break\n\n return g\n\n\ndef delete_directory(directory):\n if os.path.isdir(directory):\n shutil.rmtree(directory)\n\n\ndef delete_file(filepath):\n check = os.path.isfile(filepath)\n if check:\n os.remove(filepath)\n\n\ndef create_directory(folder_name, directory=\"current\"):\n \"\"\"create directory/folder (if it does not exist) and returns the path of the directory\n \n Args:\n folder_name: string representing the name of the folder to be created\n \n Keyword Arguments:\n directory: string representing the directory where to create the folder\n if `current` then the folder will be created in the current directory\n \"\"\"\n if directory == \"current\":\n path_current_dir = os.path.dirname(__file__)\n else:\n path_current_dir = directory\n path_new_dir = os.path.join(path_current_dir, folder_name)\n if not os.path.exists(path_new_dir):\n os.makedirs(path_new_dir)\n return path_new_dir\n\n\ndef generate_datetime_str():\n \"\"\"generate string composed of the date and time\"\"\"\n datetime_now = datetime.now()\n datetime_str = \"{}_{}_{}-{}_{}_{}_{}\".format(\n datetime_now.year,\n datetime_now.month,\n datetime_now.day,\n datetime_now.hour,\n datetime_now.minute,\n datetime_now.second,\n datetime_now.microsecond,\n )\n return datetime_str\n\n\n# def vectorized_logsumexp(vec):\n# \"\"\"vectorized version of log sum exponential operation\n#\n# Args:\n# vec: numpy vector where entries are in the log domain\n# \"\"\"\n# with numpy.errstate(invalid='warn'):\n# max_a = numpy.max(vec)\n# try:\n# res = max_a + numpy.log(numpy.sum(numpy.exp(vec - max_a)))\n# except Warning:\n# res = max_a\n# return(res)\n\n\ndef vectorized_logsumexp(vec):\n \"\"\"vectorized version of log sum exponential operation\n \n Args:\n vec: numpy vector where entries are in the log domain\n \"\"\"\n max_a = numpy.max(vec)\n if max_a != -numpy.inf:\n return max_a + numpy.log(numpy.sum(numpy.exp(vec - max_a)))\n # case where max_a == -numpy.inf\n return max_a\n\n\ndef generate_updated_model(\n modelparts_dir,\n modelrepr_class,\n model_class,\n aextractor_obj,\n fextractor_class,\n seqrepresenter_class,\n ascaler_class=None,\n):\n \"\"\"update/regenerate CRF models using the saved parts/components\n \n Args:\n modelparts_dir: string representing the directory where model parts are saved\n modelrepr_class: name of the model representation class to be used which has \n suffix `ModelRepresentation` such as :class:`HOCRFADModelRepresentation`\n model_class: name of the CRF model class such as :class:`HOCRFAD`\n aextractor_class: name of the attribute extractor class such as :class:`NERSegmentAttributeExtractor`\n fextractor_class: name of the feature extractor class used such as :class:`HOFeatureExtractor`\n seqrepresenter_class: name of the sequence representer class such as :class:`SeqsRepresenter`\n ascaler_class: name of the attribute scaler class such as :class:`AttributeScaler`\n \n .. note::\n \n This function is equivalent to :func:`generate_trained_model` function. However, this function\n uses explicit specification of the arguments (i.e. specifying explicitly the classes to be used)\n \n \n \"\"\"\n from pyseqlab.attributes_extraction import GenericAttributeExtractor\n\n ycodebook = ReaderWriter.read_data(os.path.join(modelparts_dir, \"MR_Ycodebook\"))\n mfeatures = ReaderWriter.read_data(os.path.join(modelparts_dir, \"MR_modelfeatures\"))\n mfeatures_codebook = ReaderWriter.read_data(\n os.path.join(modelparts_dir, \"MR_modelfeaturescodebook\")\n )\n L = ReaderWriter.read_data(os.path.join(modelparts_dir, \"MR_L\"))\n\n # generate model representation\n new_mrepr = modelrepr_class()\n new_mrepr.modelfeatures = mfeatures\n new_mrepr.modelfeatures_codebook = mfeatures_codebook\n new_mrepr.Y_codebook = ycodebook\n new_mrepr.L = L\n new_mrepr.generate_instance_properties()\n\n # generate attribute extractor\n if type(aextractor_obj) == type(GenericAttributeExtractor): # case it is a class\n new_attrextractor = aextractor_obj()\n else: # case it is an instance of a class\n new_attrextractor = aextractor_obj\n\n # generate feature extractor\n templateX = ReaderWriter.read_data(os.path.join(modelparts_dir, \"FE_templateX\"))\n templateY = ReaderWriter.read_data(os.path.join(modelparts_dir, \"FE_templateY\"))\n new_fextractor = fextractor_class(templateX, templateY, new_attrextractor.attr_desc)\n\n # generate sequence representer\n new_seqrepr = seqrepresenter_class(new_attrextractor, new_fextractor)\n\n # generate attribute scaler if applicable\n if ascaler_class:\n scaling_info = ReaderWriter.read_data(\n os.path.join(modelparts_dir, \"AS_scalinginfo\")\n )\n method = ReaderWriter.read_data(os.path.join(modelparts_dir, \"AS_method\"))\n new_attrscaler = ascaler_class(scaling_info, method)\n new_seqrepr.attr_scaler = new_attrscaler\n\n # generate crf instance\n new_crfmodel = model_class(new_mrepr, new_seqrepr, {})\n new_crfmodel.weights = ReaderWriter.read_data(\n os.path.join(modelparts_dir, \"weights\")\n )\n return new_crfmodel\n\n\ndef generate_trained_model(modelparts_dir, aextractor_obj):\n \"\"\"regenerate trained CRF models using the saved trained model parts/components\n \n Args:\n modelparts_dir: string representing the directory where model parts are saved\n aextractor_class: name of the attribute extractor class such as :class:`NERSegmentAttributeExtractor`\n\n \"\"\"\n # parse the class description file\n class_desc = []\n with open(os.path.join(modelparts_dir, \"class_desc.txt\"), \"r\") as f:\n for line in f:\n class_desc.append(line.strip())\n\n from pyseqlab.features_extraction import (\n HOFeatureExtractor,\n FOFeatureExtractor,\n SeqsRepresenter,\n )\n\n seqrepresenter_class = SeqsRepresenter\n if class_desc[1] == \"HOCRFAD\":\n from pyseqlab.ho_crf_ad import HOCRFAD, HOCRFADModelRepresentation\n\n modelrepr_class = HOCRFADModelRepresentation\n model_class = HOCRFAD\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"HOCRF\":\n from pyseqlab.ho_crf import HOCRF, HOCRFModelRepresentation\n\n modelrepr_class = HOCRFModelRepresentation\n model_class = HOCRF\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"HOSemiCRFAD\":\n from pyseqlab.hosemi_crf_ad import HOSemiCRFAD, HOSemiCRFADModelRepresentation\n\n modelrepr_class = HOSemiCRFADModelRepresentation\n model_class = HOSemiCRFAD\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"HOSemiCRF\":\n from pyseqlab.hosemi_crf import HOSemiCRF, HOSemiCRFModelRepresentation\n\n modelrepr_class = HOSemiCRFModelRepresentation\n model_class = HOSemiCRF\n fextractor_class = HOFeatureExtractor\n elif class_desc[1] == \"FirstOrderCRF\":\n from pyseqlab.fo_crf import FirstOrderCRF, FirstOrderCRFModelRepresentation\n\n modelrepr_class = FirstOrderCRFModelRepresentation\n model_class = FirstOrderCRF\n fextractor_class = FOFeatureExtractor\n\n # generate attribute scaler if applicable\n if class_desc[-1] != \"None\":\n from pyseqlab.attributes_extraction import AttributeScaler\n\n ascaler_class = AttributeScaler\n else:\n ascaler_class = None\n\n trained_model = generate_updated_model(\n modelparts_dir,\n modelrepr_class,\n model_class,\n aextractor_obj,\n fextractor_class,\n seqrepresenter_class,\n ascaler_class,\n )\n\n return trained_model\n\n\ndef split_data(seqs_id, options):\n r\"\"\"utility function for splitting dataset (i.e. training/testing and cross validation)\n \n Args:\n seqs_id: list of processed sequence ids\n options: dictionary comprising of the options on how to split data\n \n Example:\n To perform cross validation, we need to specify\n - cross-validation for the `method`\n - the number of folds for the `k_fold`\n \n ::\n \n options = {'method':'cross_validation',\n 'k_fold':number\n }\n \n To perform random splitting, we need to specify\n - random for the `method`\n - number of splits for the `num_splits`\n - size of the training set in percentage for the `trainset_size`\n \n ::\n \n options = {'method':'random',\n 'num_splits':number,\n 'trainset_size':percentage\n }\n \"\"\"\n N = len(seqs_id)\n data_split = {}\n method = options.get(\"method\")\n if method == None:\n method = \"cross_validation\"\n if method == \"cross_validation\":\n k_fold = options.get(\"k_fold\")\n if type(k_fold) != int:\n # use 10 fold cross validation\n k_fold = 10\n elif k_fold <= 0:\n k_fold = 10\n batch_size = int(numpy.ceil(N / k_fold))\n test_seqs = seqs_id.copy()\n seqs_len = len(test_seqs)\n # numpy.random.shuffle(test_seqs)\n indx = numpy.arange(0, seqs_len + 1, batch_size)\n if indx[-1] < seqs_len:\n indx = numpy.append(indx, [seqs_len])\n\n for i in range(len(indx) - 1):\n data_split[i] = {}\n current_test_seqs = test_seqs[indx[i] : indx[i + 1]]\n data_split[i][\"test\"] = current_test_seqs\n data_split[i][\"train\"] = list(set(seqs_id) - set(current_test_seqs))\n\n elif method == \"random\":\n num_splits = options.get(\"num_splits\")\n if type(num_splits) != int:\n num_splits = 5\n trainset_size = options.get(\"trainset_size\")\n if type(trainset_size) != int:\n # 80% of the data set is training and 20% for testing\n trainset_size = 80\n elif trainset_size <= 0 or trainset_size >= 100:\n trainset_size = 80\n for i in range(num_splits):\n data_split[i] = {}\n current_train_seqs = numpy.random.choice(\n seqs_id, int(N * trainset_size / 100), replace=False\n )\n data_split[i][\"train\"] = list(current_train_seqs)\n data_split[i][\"test\"] = list(set(seqs_id) - set(current_train_seqs))\n\n return data_split\n\n\n\"\"\"split data based on sequences length\n we need to execute the three functions in order:\n (1) :func:`group_seqs_by_length`\n (2) :func:`weighted_sample`\n (3) :func:`aggregate_weightedsample`\n\"\"\"\n\n\ndef group_seqs_by_length(seqs_info):\n \"\"\"group sequences by their length\n \n Args:\n seqs_info: dictionary comprsing info about the sequences\n it has this form {seq_id:{T:length of sequence}}\n \n .. note::\n \n sequences that are with unique sequence length are grouped together as singeltons\n \"\"\"\n grouped_seqs = {}\n for seq_id, seq_info in seqs_info.items():\n T = seq_info[\"T\"]\n if T in grouped_seqs:\n grouped_seqs[T].append(seq_id)\n else:\n grouped_seqs[T] = [seq_id]\n # loop to regroup single sequences\n singelton = [T for T, seqs_id in grouped_seqs.items() if len(seqs_id) == 1]\n singelton_seqs = []\n for T in singelton:\n singelton_seqs += grouped_seqs[T]\n del grouped_seqs[T]\n\n grouped_seqs[\"singleton\"] = singelton_seqs\n return grouped_seqs\n\n\ndef weighted_sample(grouped_seqs, trainset_size):\n \"\"\"get a random split of the grouped sequences\n \n Args:\n grouped_seqs: dictionary of the grouped sequences based on their length\n it is obtained using :func:`group_seqs_by_length` function\n trainset_size: integer representing the size of the training set in percentage\n \n \"\"\"\n options = {\"method\": \"random\", \"num_splits\": 1, \"trainset_size\": trainset_size}\n wsample = {}\n for group_var, seqs_id in grouped_seqs.items():\n # quota = trainset_size*count_seqs[group_var]/total\n data_split = split_data(seqs_id, options)\n wsample[group_var] = data_split[0]\n return wsample\n\n\ndef aggregate_weightedsample(w_sample):\n \"\"\"represent the random picked sample for training/testing\n \n Args:\n w_sample: dictionary representing a random split of the grouped sequences\n by their length. it is obtained using :func:`weighted_sample` function\n \"\"\"\n wdata_split = {\"train\": [], \"test\": []}\n for grouping_var in w_sample:\n for data_cat in w_sample[grouping_var]:\n wdata_split[data_cat] += w_sample[grouping_var][data_cat]\n return {0: wdata_split}\n\n\n##################################\n\n\ndef nested_cv(seqs_id, outer_kfold, inner_kfold):\n \"\"\"generate nested cross-validation division of sequence ids\n \"\"\"\n outer_split = split_data(\n seqs_id, {\"method\": \"cross_validation\", \"k_fold\": outer_kfold}\n )\n cv_hierarchy = {}\n for outerfold, outer_datasplit in outer_split.items():\n cv_hierarchy[\"{}_{}\".format(\"outer\", outerfold)] = outer_datasplit\n curr_train_seqs = outer_datasplit[\"train\"]\n inner_split = split_data(\n curr_train_seqs, {\"method\": \"cross_validation\", \"k_fold\": inner_kfold}\n )\n for innerfold, inner_datasplit in inner_split.items():\n cv_hierarchy[\n \"{}_{}_{}_{}\".format(\"outer\", outerfold, \"inner\", innerfold)\n ] = inner_datasplit\n return cv_hierarchy\n\n\ndef get_conll00():\n current_dir = os.path.dirname(os.path.realpath(__file__))\n root_dir = os.path.dirname(current_dir)\n files_info = {\n \"train_short_main.txt\": (\"main\", True, \" \"),\n \"train_short_none.txt\": ((\"w\", \"pos\"), True, \" \"),\n \"train_short_per_sequence.txt\": (\"per_sequence\", True, \" \"),\n }\n for file_name in files_info:\n parser = DataFileParser()\n print(file_name)\n file_path = os.path.join(root_dir, \"tests\", \"dataset\", \"conll00\", file_name)\n for seq in parser.read_file(\n file_path,\n header=files_info[file_name][0],\n y_ref=files_info[file_name][1],\n column_sep=files_info[file_name][2],\n ):\n print(seq)\n\n\nif __name__ == \"__main__\":\n pass\n # get_conll00()\n" ]
[ [ "numpy.append", "numpy.ceil", "numpy.exp", "numpy.arange", "numpy.max" ] ]
r-peng/pyscf
[ "9a14f9bcc63bc75f5939cb4d00eb47861d8d8989" ]
[ "pyscf/cc/__init__.py" ]
[ "# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n'''\nCoupled Cluster\n===============\n\nSimple usage::\n\n >>> from pyscf import gto, scf, cc\n >>> mol = gto.M(atom='H 0 0 0; H 0 0 1')\n >>> mf = scf.RHF(mol).run()\n >>> cc.CCSD(mf).run()\n\n:func:`cc.CCSD` returns an instance of CCSD class. Followings are parameters\nto control CCSD calculation.\n\n verbose : int\n Print level. Default value equals to :class:`Mole.verbose`\n max_memory : float or int\n Allowed memory in MB. Default value equals to :class:`Mole.max_memory`\n conv_tol : float\n converge threshold. Default is 1e-7.\n conv_tol_normt : float\n converge threshold for norm(t1,t2). Default is 1e-5.\n max_cycle : int\n max number of iterations. Default is 50.\n diis_space : int\n DIIS space size. Default is 6.\n diis_start_cycle : int\n The step to start DIIS. Default is 0.\n direct : bool\n AO-direct CCSD. Default is False.\n async_io : bool\n Allow for asynchronous function execution. Default is True.\n incore_complete : bool\n Avoid all I/O. Default is False.\n frozen : int or list\n If integer is given, the inner-most orbitals are frozen from CC\n amplitudes. Given the orbital indices (0-based) in a list, both\n occupied and virtual orbitals can be frozen in CC calculation.\n\n\nSaved results\n\n converged : bool\n CCSD converged or not\n e_tot : float\n Total CCSD energy (HF + correlation)\n t1, t2 : \n t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt)\n l1, l2 : \n Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt)\n'''\n\nfrom pyscf.cc import ccsd\nfrom pyscf.cc import ccsd_lambda\nfrom pyscf.cc import ccsd_rdm\nfrom pyscf.cc import addons\nfrom pyscf.cc import rccsd\nfrom pyscf.cc import uccsd\nfrom pyscf.cc import gccsd\nfrom pyscf.cc import eom_rccsd\nfrom pyscf.cc import eom_uccsd\nfrom pyscf.cc import eom_gccsd\nfrom pyscf import scf\n\ndef CCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = ccsd.CCSD.__doc__\n if isinstance(mf, scf.uhf.UHF):\n return UCCSD(mf, frozen, mo_coeff, mo_occ)\n elif isinstance(mf, scf.ghf.GHF):\n return GCCSD(mf, frozen, mo_coeff, mo_occ)\n else:\n return RCCSD(mf, frozen, mo_coeff, mo_occ)\n\nscf.hf.SCF.CCSD = CCSD\n\n\ndef RCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = ccsd.CCSD.__doc__\n import numpy\n from pyscf import lib\n from pyscf.soscf import newton_ah\n from pyscf.cc import dfccsd\n\n if isinstance(mf, scf.uhf.UHF):\n raise RuntimeError('RCCSD cannot be used with UHF method.')\n elif isinstance(mf, scf.rohf.ROHF):\n lib.logger.warn(mf, 'RCCSD method does not support ROHF method. ROHF object '\n 'is converted to UHF object and UCCSD method is called.')\n mf = scf.addons.convert_to_uhf(mf)\n return UCCSD(mf, frozen, mo_coeff, mo_occ)\n\n if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.hf.RHF):\n mf = scf.addons.convert_to_rhf(mf)\n\n if getattr(mf, 'with_df', None):\n return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)\n\n elif numpy.iscomplexobj(mo_coeff) or numpy.iscomplexobj(mf.mo_coeff):\n return rccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)\n\n else:\n return ccsd.CCSD(mf, frozen, mo_coeff, mo_occ)\n\n\ndef UCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = uccsd.UCCSD.__doc__\n from pyscf.soscf import newton_ah\n\n if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.uhf.UHF):\n mf = scf.addons.convert_to_uhf(mf)\n\n if getattr(mf, 'with_df', None):\n raise NotImplementedError('DF-UCCSD')\n else:\n return uccsd.UCCSD(mf, frozen, mo_coeff, mo_occ)\n\n\ndef GCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):\n __doc__ = gccsd.GCCSD.__doc__\n from pyscf.soscf import newton_ah\n\n if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.ghf.GHF):\n mf = scf.addons.convert_to_ghf(mf)\n\n if getattr(mf, 'with_df', None):\n raise NotImplementedError('DF-GCCSD')\n else:\n return gccsd.GCCSD(mf, frozen, mo_coeff, mo_occ)\n\n\ndef FNOCCSD(mf, thresh=1e-6, pct_occ=None, nvir_act=None):\n \"\"\"Frozen natural orbital CCSD\n\n Attributes:\n thresh : float\n Threshold on NO occupation numbers. Default is 1e-6.\n pct_occ : float\n Percentage of total occupation number. Default is None. If present, overrides `thresh`.\n \"\"\"\n #from pyscf import mp\n #pt = mp.MP2(mf).set(verbose=0).run()\n from pyscf.mp.mp2 import MP2\n pt = MP2(mf).set(verbose=0).run()\n frozen, no_coeff = pt.make_fno(thresh=thresh, pct_occ=pct_occ, nvir_act=nvir_act)\n #pt_no = mp.MP2(mf, frozen=frozen, mo_coeff=no_coeff).set(verbose=0).run() #avoid DF\n pt_no = MP2(mf, frozen=frozen, mo_coeff=no_coeff).set(verbose=0).run()\n mycc = ccsd.CCSD(mf, frozen=frozen, mo_coeff=no_coeff) #avoid DF\n mycc.delta_emp2 = pt.e_corr - pt_no.e_corr\n from pyscf.lib import logger\n def _finalize(self):\n '''Hook for dumping results and clearing up the object.'''\n if self.converged:\n logger.info(self, 'FNO-%s converged', self.__class__.__name__)\n else:\n logger.note(self, 'FNO-%s not converged', self.__class__.__name__)\n logger.note(self, 'E(FNO-%s) = %.16g E_corr = %.16g',\n self.__class__.__name__, self.e_tot, self.e_corr)\n logger.note(self, 'E(FNO-%s+delta-MP2) = %.16g E_corr = %.16g',\n self.__class__.__name__, self.e_tot+self.delta_emp2, \n self.e_corr+self.delta_emp2)\n return self\n mycc._finalize = _finalize.__get__(mycc, mycc.__class__)\n return mycc\n" ]
[ [ "numpy.iscomplexobj" ] ]
toobaz/statsmodels
[ "2d4aad9a14619ce0c84d4c7bca9dacd66b2be566" ]
[ "statsmodels/tsa/vector_ar/irf.py" ]
[ "\"\"\"\nImpulse reponse-related code\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nimport numpy.linalg as la\nimport scipy.linalg as L\n\nfrom scipy import stats\n\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.tools.tools import chain_dot\n#from statsmodels.tsa.api import VAR\n\nimport statsmodels.tsa.tsatools as tsa\nimport statsmodels.tsa.vector_ar.plotting as plotting\nimport statsmodels.tsa.vector_ar.util as util\n\nmat = np.array\n\nclass BaseIRAnalysis(object):\n \"\"\"\n Base class for plotting and computing IRF-related statistics, want to be\n able to handle known and estimated processes\n \"\"\"\n\n def __init__(self, model, P=None, periods=10, order=None, svar=False):\n self.model = model\n self.periods = periods\n self.neqs, self.lags, self.T = model.neqs, model.k_ar, model.nobs\n\n self.order = order\n\n if P is None:\n sigma = model.sigma_u\n\n # TODO, may be difficult at the moment\n # if order is not None:\n # indexer = [model.get_eq_index(name) for name in order]\n # sigma = sigma[:, indexer][indexer, :]\n\n # if sigma.shape != model.sigma_u.shape:\n # raise ValueError('variable order is wrong length')\n\n P = la.cholesky(sigma)\n\n self.P = P\n\n self.svar = svar\n\n self.irfs = model.ma_rep(periods)\n if svar:\n self.svar_irfs = model.svar_ma_rep(periods, P=P)\n else:\n self.orth_irfs = model.orth_ma_rep(periods)\n\n self.cum_effects = self.irfs.cumsum(axis=0)\n if svar:\n self.svar_cum_effects = self.svar_irfs.cumsum(axis=0)\n else:\n self.orth_cum_effects = self.orth_irfs.cumsum(axis=0)\n\n self.lr_effects = model.long_run_effects()\n if svar:\n self.svar_lr_effects = np.dot(model.long_run_effects(), P)\n else:\n self.orth_lr_effects = np.dot(model.long_run_effects(), P)\n\n\n # auxiliary stuff\n self._A = util.comp_matrix(model.coefs)\n\n def cov(self, *args, **kwargs):\n raise NotImplementedError\n\n def cum_effect_cov(self, *args, **kwargs):\n raise NotImplementedError\n\n def plot(self, orth=False, impulse=None, response=None,\n signif=0.05, plot_params=None, subplot_params=None,\n plot_stderr=True, stderr_type='asym', repl=1000,\n seed=None, component=None):\n \"\"\"\n Plot impulse responses\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n impulse : string or int\n variable providing the impulse\n response : string or int\n variable affected by the impulse\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n subplot_params : dict\n To pass to subplot plotting funcions. Example: if fonts are too big,\n pass {'fontsize' : 8} or some number to your taste.\n plot_params : dict\n\n plot_stderr: bool, default True\n Plot standard impulse response error bands\n stderr_type: string\n 'asym': default, computes asymptotic standard errors\n 'mc': monte carlo standard errors (use rpl)\n repl: int, default 1000\n Number of replications for Monte Carlo and Sims-Zha standard errors\n seed: int\n np.random.seed for Monte Carlo replications\n component: array or vector of principal component indices\n \"\"\"\n periods = self.periods\n model = self.model\n svar = self.svar\n\n if orth and svar:\n raise ValueError(\"For SVAR system, set orth=False\")\n\n if orth:\n title = 'Impulse responses (orthogonalized)'\n irfs = self.orth_irfs\n elif svar:\n title = 'Impulse responses (structural)'\n irfs = self.svar_irfs\n else:\n title = 'Impulse responses'\n irfs = self.irfs\n\n if plot_stderr == False:\n stderr = None\n\n elif stderr_type not in ['asym', 'mc', 'sz1', 'sz2','sz3']:\n raise ValueError(\"Error type must be either 'asym', 'mc','sz1','sz2', or 'sz3'\")\n else:\n if stderr_type == 'asym':\n stderr = self.cov(orth=orth)\n if stderr_type == 'mc':\n stderr = self.errband_mc(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed)\n if stderr_type == 'sz1':\n stderr = self.err_band_sz1(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed,\n component=component)\n if stderr_type == 'sz2':\n stderr = self.err_band_sz2(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed,\n component=component)\n if stderr_type == 'sz3':\n stderr = self.err_band_sz3(orth=orth, svar=svar,\n repl=repl, signif=signif,\n seed=seed,\n component=component)\n\n plotting.irf_grid_plot(irfs, stderr, impulse, response,\n self.model.names, title, signif=signif,\n subplot_params=subplot_params,\n plot_params=plot_params, stderr_type=stderr_type)\n\n def plot_cum_effects(self, orth=False, impulse=None, response=None,\n signif=0.05, plot_params=None,\n subplot_params=None, plot_stderr=True,\n stderr_type='asym', repl=1000, seed=None):\n \"\"\"\n Plot cumulative impulse response functions\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n impulse : string or int\n variable providing the impulse\n response : string or int\n variable affected by the impulse\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n subplot_params : dict\n To pass to subplot plotting funcions. Example: if fonts are too big,\n pass {'fontsize' : 8} or some number to your taste.\n plot_params : dict\n\n plot_stderr: bool, default True\n Plot standard impulse response error bands\n stderr_type: string\n 'asym': default, computes asymptotic standard errors\n 'mc': monte carlo standard errors (use rpl)\n repl: int, default 1000\n Number of replications for monte carlo standard errors\n seed: int\n np.random.seed for Monte Carlo replications\n\n \"\"\"\n\n if orth:\n title = 'Cumulative responses responses (orthogonalized)'\n cum_effects = self.orth_cum_effects\n lr_effects = self.orth_lr_effects\n else:\n title = 'Cumulative responses'\n cum_effects = self.cum_effects\n lr_effects = self.lr_effects\n\n if stderr_type not in ['asym', 'mc']:\n raise TypeError\n else:\n if stderr_type == 'asym':\n stderr = self.cum_effect_cov(orth=orth)\n if stderr_type == 'mc':\n stderr = self.cum_errband_mc(orth=orth, repl=repl,\n signif=signif, seed=seed)\n if not plot_stderr:\n stderr = None\n\n plotting.irf_grid_plot(cum_effects, stderr, impulse, response,\n self.model.names, title, signif=signif,\n hlines=lr_effects, subplot_params=subplot_params,\n plot_params=plot_params, stderr_type=stderr_type)\n\nclass IRAnalysis(BaseIRAnalysis):\n \"\"\"\n Impulse response analysis class. Computes impulse responses, asymptotic\n standard errors, and produces relevant plots\n\n Parameters\n ----------\n model : VAR instance\n\n Notes\n -----\n Using Lutkepohl (2005) notation\n \"\"\"\n def __init__(self, model, P=None, periods=10, order=None, svar=False):\n BaseIRAnalysis.__init__(self, model, P=P, periods=periods,\n order=order, svar=svar)\n\n self.cov_a = model._cov_alpha\n self.cov_sig = model._cov_sigma\n\n # memoize dict for G matrix function\n self._g_memo = {}\n\n def cov(self, orth=False):\n \"\"\"\n Compute asymptotic standard errors for impulse response coefficients\n\n Notes\n -----\n Lutkepohl eq 3.7.5\n\n Returns\n -------\n \"\"\"\n if orth:\n return self._orth_cov()\n\n covs = self._empty_covm(self.periods + 1)\n covs[0] = np.zeros((self.neqs ** 2, self.neqs ** 2))\n for i in range(1, self.periods + 1):\n Gi = self.G[i - 1]\n covs[i] = chain_dot(Gi, self.cov_a, Gi.T)\n\n return covs\n\n def errband_mc(self, orth=False, svar=False, repl=1000,\n signif=0.05, seed=None, burn=100):\n \"\"\"\n IRF Monte Carlo integrated error bands\n \"\"\"\n model = self.model\n periods = self.periods\n if svar == True:\n return model.sirf_errband_mc(orth=orth, repl=repl, T=periods,\n signif=signif, seed=seed,\n burn=burn, cum=False)\n else:\n return model.irf_errband_mc(orth=orth, repl=repl, T=periods,\n signif=signif, seed=seed,\n burn=burn, cum=False)\n def err_band_sz1(self, orth=False, svar=False, repl=1000,\n signif=0.05, seed=None, burn=100, component=None):\n \"\"\"\n IRF Sims-Zha error band method 1. Assumes symmetric error bands around\n mean.\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n repl : int, default 1000\n Number of MC replications\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n seed : int, default None\n np.random seed\n burn : int, default 100\n Number of initial simulated obs to discard\n component : neqs x neqs array, default to largest for each\n Index of column of eigenvector/value to use for each error band\n Note: period of impulse (t=0) is not included when computing\n principle component\n\n References\n ----------\n Sims, Christopher A., and Tao Zha. 1999. \"Error Bands for Impulse\n Response\". Econometrica 67: 1113-1155.\n \"\"\"\n\n model = self.model\n periods = self.periods\n if orth:\n irfs = self.orth_irfs\n elif svar:\n irfs = self.svar_irfs\n else:\n irfs = self.irfs\n neqs = self.neqs\n irf_resim = model.irf_resim(orth=orth, repl=repl, T=periods, seed=seed,\n burn=100)\n q = util.norm_signif_level(signif)\n\n W, eigva, k =self._eigval_decomp_SZ(irf_resim)\n\n if component != None:\n if np.shape(component) != (neqs,neqs):\n raise ValueError(\"Component array must be \" + str(neqs) + \" x \" + str(neqs))\n if np.argmax(component) >= neqs*periods:\n raise ValueError(\"Atleast one of the components does not exist\")\n else:\n k = component\n\n # here take the kth column of W, which we determine by finding the largest eigenvalue of the covaraince matrix\n lower = np.copy(irfs)\n upper = np.copy(irfs)\n for i in xrange(neqs):\n for j in xrange(neqs):\n lower[1:,i,j] = irfs[1:,i,j] + W[i,j,:,k[i,j]]*q*np.sqrt(eigva[i,j,k[i,j]])\n upper[1:,i,j] = irfs[1:,i,j] - W[i,j,:,k[i,j]]*q*np.sqrt(eigva[i,j,k[i,j]])\n\n\n return lower, upper\n\n def err_band_sz2(self, orth=False, repl=1000, signif=0.05,\n seed=None, burn=100, component=None):\n \"\"\"\n IRF Sims-Zha error band method 2.\n\n This method Does not assume symmetric error bands around mean.\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n repl : int, default 1000\n Number of MC replications\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n seed : int, default None\n np.random seed\n burn : int, default 100\n Number of initial simulated obs to discard\n component : neqs x neqs array, default to largest for each\n Index of column of eigenvector/value to use for each error band\n Note: period of impulse (t=0) is not included when computing\n principle component\n\n References\n ----------\n Sims, Christopher A., and Tao Zha. 1999. \"Error Bands for Impulse\n Response\". Econometrica 67: 1113-1155.\n \"\"\"\n model = self.model\n periods = self.periods\n if orth:\n irfs = self.orth_irfs\n elif svar:\n irfs = self.svar_irfs\n else:\n irfs = self.irfs\n neqs = self.neqs\n irf_resim = model.irf_resim(orth=orth, repl=repl, T=periods, seed=seed,\n burn=100)\n\n W, eigva, k = self._eigval_decomp_SZ(irf_resim)\n\n if component != None:\n if np.shape(component) != (neqs,neqs):\n raise ValueError(\"Component array must be \" + str(neqs) + \" x \" + str(neqs))\n if np.argmax(component) >= neqs*periods:\n raise ValueError(\"Atleast one of the components does not exist\")\n else:\n k = component\n\n gamma = np.zeros((repl, periods+1, neqs, neqs))\n for p in xrange(repl):\n for i in xrange(neqs):\n for j in xrange(neqs):\n gamma[p,1:,i,j] = W[i,j,k[i,j],:] * irf_resim[p,1:,i,j]\n\n gamma_sort = np.sort(gamma, axis=0) #sort to get quantiles\n indx = round(signif/2*repl)-1,round((1-signif/2)*repl)-1\n\n lower = np.copy(irfs)\n upper = np.copy(irfs)\n for i in xrange(neqs):\n for j in xrange(neqs):\n lower[:,i,j] = irfs[:,i,j] + gamma_sort[indx[0],:,i,j]\n upper[:,i,j] = irfs[:,i,j] + gamma_sort[indx[1],:,i,j]\n\n return lower, upper\n\n def err_band_sz3(self, orth=False, repl=1000, signif=0.05,\n seed=None, burn=100, component=None):\n \"\"\"\n IRF Sims-Zha error band method 3. Does not assume symmetric error bands around mean.\n\n Parameters\n ----------\n orth : bool, default False\n Compute orthogonalized impulse responses\n repl : int, default 1000\n Number of MC replications\n signif : float (0 < signif < 1)\n Significance level for error bars, defaults to 95% CI\n seed : int, default None\n np.random seed\n burn : int, default 100\n Number of initial simulated obs to discard\n component : vector length neqs, default to largest for each\n Index of column of eigenvector/value to use for each error band\n Note: period of impulse (t=0) is not included when computing\n principle component\n\n References\n ----------\n Sims, Christopher A., and Tao Zha. 1999. \"Error Bands for Impulse\n Response\". Econometrica 67: 1113-1155.\n \"\"\"\n\n model = self.model\n periods = self.periods\n if orth:\n irfs = self.orth_irfs\n elif svar:\n irfs = self.svar_irfs\n else:\n irfs = self.irfs\n neqs = self.neqs\n irf_resim = model.irf_resim(orth=orth, repl=repl, T=periods, seed=seed,\n burn=100)\n stack = np.zeros((neqs, repl, periods*neqs))\n\n #stack left to right, up and down\n\n for p in xrange(repl):\n for i in xrange(neqs):\n stack[i, p,:] = np.ravel(irf_resim[p,1:,:,i].T)\n\n stack_cov=np.zeros((neqs, periods*neqs, periods*neqs))\n W = np.zeros((neqs, periods*neqs, periods*neqs))\n eigva = np.zeros((neqs, periods*neqs))\n k = np.zeros((neqs))\n\n if component != None:\n if np.size(component) != (neqs):\n raise ValueError(\"Component array must be of length \" + str(neqs))\n if np.argmax(component) >= neqs*periods:\n raise ValueError(\"Atleast one of the components does not exist\")\n else:\n k = component\n\n #compute for eigen decomp for each stack\n for i in xrange(neqs):\n stack_cov[i] = np.cov(stack[i],rowvar=0)\n W[i], eigva[i], k[i] = util.eigval_decomp(stack_cov[i])\n\n gamma = np.zeros((repl, periods+1, neqs, neqs))\n for p in xrange(repl):\n c=0\n for j in xrange(neqs):\n for i in xrange(neqs):\n gamma[p,1:,i,j] = W[j,k[j],i*periods:(i+1)*periods] * irf_resim[p,1:,i,j]\n if i == neqs-1:\n gamma[p,1:,i,j] = W[j,k[j],i*periods:] * irf_resim[p,1:,i,j]\n\n gamma_sort = np.sort(gamma, axis=0) #sort to get quantiles\n indx = round(signif/2*repl)-1,round((1-signif/2)*repl)-1\n\n lower = np.copy(irfs)\n upper = np.copy(irfs)\n for i in xrange(neqs):\n for j in xrange(neqs):\n lower[:,i,j] = irfs[:,i,j] + gamma_sort[indx[0],:,i,j]\n upper[:,i,j] = irfs[:,i,j] + gamma_sort[indx[1],:,i,j]\n\n return lower, upper\n\n def _eigval_decomp_SZ(self, irf_resim):\n \"\"\"\n Returns\n -------\n W: array of eigenvectors\n eigva: list of eigenvalues\n k: matrix indicating column # of largest eigenvalue for each c_i,j\n\n \"\"\"\n neqs = self.neqs\n periods = self.periods\n\n cov_hold = np.zeros((neqs, neqs, periods, periods))\n for i in xrange(neqs):\n for j in xrange(neqs):\n cov_hold[i,j,:,:] = np.cov(irf_resim[:,1:,i,j],rowvar=0)\n\n W = np.zeros((neqs, neqs, periods, periods))\n eigva = np.zeros((neqs, neqs, periods, 1))\n k = np.zeros((neqs, neqs))\n\n for i in xrange(neqs):\n for j in xrange(neqs):\n W[i,j,:,:], eigva[i,j,:,0], k[i,j] = util.eigval_decomp(cov_hold[i,j,:,:])\n return W, eigva, k\n\n @cache_readonly\n def G(self):\n # Gi matrices as defined on p. 111\n\n K = self.neqs\n\n # nlags = self.model.p\n # J = np.hstack((np.eye(K),) + (np.zeros((K, K)),) * (nlags - 1))\n\n def _make_g(i):\n # p. 111 Lutkepohl\n G = 0.\n for m in range(i):\n # be a bit cute to go faster\n idx = i - 1 - m\n if idx in self._g_memo:\n apow = self._g_memo[idx]\n else:\n apow = la.matrix_power(self._A.T, idx)\n # apow = np.dot(J, apow)\n apow = apow[:K]\n self._g_memo[idx] = apow\n\n # take first K rows\n piece = np.kron(apow, self.irfs[m])\n G = G + piece\n\n return G\n\n return [_make_g(i) for i in range(1, self.periods + 1)]\n\n def _orth_cov(self):\n # Lutkepohl 3.7.8\n\n Ik = np.eye(self.neqs)\n PIk = np.kron(self.P.T, Ik)\n H = self.H\n\n covs = self._empty_covm(self.periods + 1)\n for i in range(self.periods + 1):\n if i == 0:\n apiece = 0\n else:\n Ci = np.dot(PIk, self.G[i-1])\n apiece = chain_dot(Ci, self.cov_a, Ci.T)\n\n Cibar = np.dot(np.kron(Ik, self.irfs[i]), H)\n bpiece = chain_dot(Cibar, self.cov_sig, Cibar.T) / self.T\n\n # Lutkepohl typo, cov_sig correct\n covs[i] = apiece + bpiece\n\n return covs\n\n def cum_effect_cov(self, orth=False):\n \"\"\"\n Compute asymptotic standard errors for cumulative impulse response\n coefficients\n\n Parameters\n ----------\n orth : boolean\n\n Notes\n -----\n eq. 3.7.7 (non-orth), 3.7.10 (orth)\n\n Returns\n -------\n\n \"\"\"\n Ik = np.eye(self.neqs)\n PIk = np.kron(self.P.T, Ik)\n\n F = 0.\n covs = self._empty_covm(self.periods + 1)\n for i in range(self.periods + 1):\n if i > 0:\n F = F + self.G[i - 1]\n\n if orth:\n if i == 0:\n apiece = 0\n else:\n Bn = np.dot(PIk, F)\n apiece = chain_dot(Bn, self.cov_a, Bn.T)\n\n Bnbar = np.dot(np.kron(Ik, self.cum_effects[i]), self.H)\n bpiece = chain_dot(Bnbar, self.cov_sig, Bnbar.T) / self.T\n\n covs[i] = apiece + bpiece\n else:\n if i == 0:\n covs[i] = np.zeros((self.neqs**2, self.neqs**2))\n continue\n\n covs[i] = chain_dot(F, self.cov_a, F.T)\n\n return covs\n\n def cum_errband_mc(self, orth=False, repl=1000,\n signif=0.05, seed=None, burn=100):\n \"\"\"\n IRF Monte Carlo integrated error bands of cumulative effect\n \"\"\"\n model = self.model\n periods = self.periods\n return model.irf_errband_mc(orth=orth, repl=repl,\n T=periods, signif=signif, seed=seed, burn=burn, cum=True)\n\n def lr_effect_cov(self, orth=False):\n \"\"\"\n Returns\n -------\n\n \"\"\"\n lre = self.lr_effects\n Finfty = np.kron(np.tile(lre.T, self.lags), lre)\n Ik = np.eye(self.neqs)\n\n if orth:\n Binf = np.dot(np.kron(self.P.T, np.eye(self.neqs)), Finfty)\n Binfbar = np.dot(np.kron(Ik, lre), self.H)\n\n return (chain_dot(Binf, self.cov_a, Binf.T) +\n chain_dot(Binfbar, self.cov_sig, Binfbar.T))\n else:\n return chain_dot(Finfty, self.cov_a, Finfty.T)\n\n def stderr(self, orth=False):\n return np.array([tsa.unvec(np.sqrt(np.diag(c)))\n for c in self.cov(orth=orth)])\n\n def cum_effect_stderr(self, orth=False):\n return np.array([tsa.unvec(np.sqrt(np.diag(c)))\n for c in self.cum_effect_cov(orth=orth)])\n\n def lr_effect_stderr(self, orth=False):\n cov = self.lr_effect_cov(orth=orth)\n return tsa.unvec(np.sqrt(np.diag(cov)))\n\n def _empty_covm(self, periods):\n return np.zeros((periods, self.neqs ** 2, self.neqs ** 2),\n dtype=float)\n\n @cache_readonly\n def H(self):\n k = self.neqs\n Lk = tsa.elimination_matrix(k)\n Kkk = tsa.commutation_matrix(k, k)\n Ik = np.eye(k)\n\n # B = chain_dot(Lk, np.eye(k**2) + commutation_matrix(k, k),\n # np.kron(self.P, np.eye(k)), Lk.T)\n\n # return np.dot(Lk.T, L.inv(B))\n\n B = chain_dot(Lk,\n np.dot(np.kron(Ik, self.P), Kkk) + np.kron(self.P, Ik),\n Lk.T)\n\n return np.dot(Lk.T, L.inv(B))\n\n def fevd_table(self):\n pass\n\n\n" ]
[ [ "numpy.sqrt", "numpy.eye", "numpy.tile", "numpy.linalg.matrix_power", "numpy.zeros", "numpy.dot", "numpy.diag", "numpy.copy", "scipy.linalg.inv", "numpy.linalg.cholesky", "numpy.argmax", "numpy.ravel", "numpy.size", "numpy.shape", "numpy.sort", "numpy.kron", "numpy.cov" ] ]
elischwat/hsfm-geomorph
[ "ddd7cd8a5434d04fef9cab7f16f15e7efde868c8" ]
[ "identify-imagery/nagap/identify-imagery-70s-90s.py" ]
[ "# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.5.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Identify Imagery\n\n# +\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport contextily as ctx\n\nimport os\nimport cv2\nimport fiona \nimport geopandas as gpd\nimport re\nimport pandas as pd\n# enable fiona KML driver\ngpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw'\n# -\n\ndata_dir = '/data2/elilouis/hsfm-geomorph/data/'\n\n# ## Open up KML Files\n# Make sure to open all layers explicitly\n\n# +\nfile_paths = ['NAGAP_1970s.kml', 'NAGAP_1980s.kml', 'NAGAP_1990s.kml']\n\ndf_list = []\nfor path in file_paths:\n path = os.path.join(data_dir, path)\n for layer in fiona.listlayers(path):\n try:\n df_list.append(gpd.read_file(path, driver='KML', layer=layer))\n except ValueError:\n None\ndf = pd.concat(df_list)\n# -\n\nlen(df)\n\ndf.head()\n\n# Change CRS to Web Mercator for easy plotting\n\ndf = df.to_crs(epsg=3857)\n\n\n# ## Parse the Description data column\n\n# +\ndef parse_description(row):\n s = row.Description\n if s == '' or s is None:\n return pd.Series([None,None,None,None,None,None,None,None,None])\n else:\n lines = re.split(r'(<br>|</br>)\\s*', s)\n src = next((i for i in lines if 'src' in i), None)\n date = next((i for i in lines if 'Date' in i), None)\n location = next((i for i in lines if 'Location' in i), None)\n roll = next((i for i in lines if 'Roll' in i), None)\n frame = next((i for i in lines if 'Frame' in i), None)\n latitude = next((i for i in lines if 'Latitude' in i), None)\n longitude = next((i for i in lines if 'Longitude' in i), None)\n altitude = next((i for i in lines if 'Altitude' in i), None)\n type_ = next((i for i in lines if 'Type' in i), None)\n return pd.Series([\n None if src is None else src.split(':')[-1].replace('\"/>', \"\").replace(\"//\", \"\"),\n None if date is None else date.split(':')[-1],\n None if location is None else location.split(':')[-1],\n None if roll is None else roll.split(':')[-1],\n None if frame is None else frame.split(':')[-1],\n None if latitude is None else latitude.split(':')[-1].replace('°', ''),\n None if longitude is None else longitude.split(':')[-1].replace('°', ''),\n None if altitude is None else altitude.split(':')[-1],\n None if type_ is None else type_.split(':')[-1]\n ])\n\ndf[['src', 'date', 'location', 'roll', 'frame', \n 'latitude', 'longitude', 'altitude', 'type']] = df.apply(parse_description, axis=1)\n# -\n\nlen(df)\n\ndf.head(3)\n\ndf[df['roll'] == '74V5']\n\n# ## Read in the AOIs\n#\n# Lets change crs to web mercator right off the bat too.\n\naoi_gdf = gpd.read_file(data_dir + 'aois.geojson')\naoi_gdf = aoi_gdf.to_crs(epsg=3857)\n\nax = aoi_gdf.plot()\nctx.add_basemap(ax)\nplt.gcf().set_size_inches(8,8)\n\nbaker_polygon = aoi_gdf[aoi_gdf.name == 'Mt. Baker'].geometry.iloc[0]\nglacier_polygon = aoi_gdf[aoi_gdf.name == 'Glacier Peak'].geometry.iloc[0]\nrainier_polygon = aoi_gdf[aoi_gdf.name == 'Mt. Rainier'].geometry.iloc[0]\n\n# ## Look at locations of all images\n\nlen(df.date.unique())\n\nsrc = df[df.geometry.type=='Point']\nax = src.plot(markersize=0.25, facecolor='red')\nctx.add_basemap(ax)\nplt.gcf().set_size_inches(16,16)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n\nsns.distplot(src.geometry.to_crs('epsg:4326').x)\n\nsrc = df[df.geometry.type=='Point']\nsrc = src[src.geometry.to_crs('epsg:4326').y < 49]\nsrc = src[src.geometry.to_crs('epsg:4326').y > 45]\nsrc = src[src.geometry.to_crs('epsg:4326').x < -116]\nax = src.plot(markersize=0.25, facecolor='red')\nctx.add_basemap(ax)\nplt.gcf().set_size_inches(16,16)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n\ngdf.filename.str[:-4].unique()\n\n# +\nfig, axes = plt.subplots(1,2,figsize=(16,16),sharex=True, sharey=True)\n\ngdf = gpd.read_file(\"/data2/elilouis/baker/timesifted_image_footprints.geojson\").to_crs('epsg:3857')\n\n\nax = gdf.plot(ax=axes[1], alpha=0.5, edgecolor='k', )\nctx.add_basemap(axes[1])\naxes[1].get_xaxis().set_visible(False)\naxes[1].get_yaxis().set_visible(False)\n\nsrc = gdf[gdf.filename.str.contains('NAGAP_74V5')]\nsrc.plot(ax=axes[0], alpha=0.5, edgecolor='k', )\nctx.add_basemap(axes[0])\naxes[0].get_xaxis().set_visible(False)\naxes[0].get_yaxis().set_visible(False)\n\n# -\n\n# ## Look at locations of images in our AOIs\n\ndf.crs.to_epsg(), aoi_gdf.crs.to_epsg()\n\naoi_frames_and_paths = gpd.sjoin(df, aoi_gdf)\n\n# Format date column...\n\n# +\naoi_frames_and_paths['datetime'] = pd.to_datetime(aoi_frames_and_paths.date, errors='coerce')\naoi_frames_and_paths.date = aoi_frames_and_paths.datetime.dt.date\n\naoi_frames_df = aoi_frames_and_paths[\n aoi_frames_and_paths.geometry.type=='Point']\n\naoi_paths_df = aoi_frames_and_paths[\n aoi_frames_and_paths.geometry.type!='Point']\n\n# -\n\n# Fix the data for the paths\n# \n# For all the path rows, `Name` really contains the `date` and `Name` columns smushed together\n\naoi_paths_df['date'] = aoi_paths_df.Name.apply(lambda x: pd.to_datetime(x.split('-')[-1]))\naoi_paths_df['Name'] = aoi_paths_df.Name.apply(lambda x: x.split('-')[0])\n\nax = aoi_frames_df.plot(markersize=7, facecolor='red', legend=True, \n column='date', categorical=True, \n legend_kwds={'bbox_to_anchor': (1.6, 1)})\nplt.gcf().set_size_inches(8,8)\nax.set(xlim=(-1.37e7,-1.345e7), ylim=(5.87e6,6.3e6))\nctx.add_basemap(ax)\n\nax = aoi_paths_df.plot(linewidth=1, column='date', categorical=True, legend=True,\n legend_kwds={'bbox_to_anchor': (1.6, 1)})\nplt.gcf().set_size_inches(8,8)\nax.set(xlim=(-1.37e7,-1.345e7), ylim=(5.87e6,6.3e6))\nctx.add_basemap(ax)\n\n# ## Examine image dates \n\naoi_paths_df.date.unique(),aoi_frames_df.date.unique()\n\nset(aoi_paths_df.date.unique()).difference(set(aoi_frames_df.date.unique()))\n\nset(aoi_frames_df.date.unique()).difference(set(aoi_paths_df.date.unique()))\n\n# # Identify Mt Rainier Imagery\n\n# ## Look at all images on Mt. Rainier\n\nrainier_frames_gdf = aoi_frames_df[\n aoi_frames_df.geometry.within(rainier_polygon)]\n\nax = rainier_frames_gdf.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\nplt.gcf().set_size_inches(10,10)\n\n# ## Look at all images in watershed-delineated subsections of Mt Rainier\n\n# If I want to focus on the Nisqally Glacier/River system, it looks like I should investigate imagery from all dates... I need a polygon for the Nisqally watershed.\n\n# ## Load Washington watershed geometries\n\n# !ls $data_dir\n\nwau_gdf = gpd.read_file(f'{data_dir}/Watershed_Administrative_Units-shp/wau.shp')\n\nwau_gdf.plot()\n\nwau_in_aois_gdf = gpd.sjoin(wau_gdf, aoi_gdf)\nrainier_waus_gdf = wau_in_aois_gdf[wau_in_aois_gdf.name == 'Mt. Rainier']\n\nax = rainier_waus_gdf.plot(column='WAU_ALIAS_',legend=True, markersize=80, legend_kwds={'bbox_to_anchor': (1.6, 1)}, alpha=0.6)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\nplt.gcf().set_size_inches(5,5)\n\n\n# ## Look at image locations and watershed delineations\n\n# +\ndef plot_frames_and_aoi_polygon(points, aoi_polygon = None, lims = None):\n ax = points.plot(column='date', categorical=True, markersize=20, legend=True)\n if aoi_polygon is not None:\n gpd.GeoDataFrame(ax = ax, geometry = pd.Series(aoi_polygon)).plot(legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\n ctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\n if lims is not None:\n ax.set(xlim=lims[0], ylim=lims[1])\n plt.gcf().set_size_inches(8,8)\n \nimport math \ndef plot_frames_and_aoi_date_separated(points, aoi_polygon = None, lims=None):\n groupby = points.groupby('date')\n fig, axes = plt.subplots(math.ceil(len(groupby.size().tolist())/4),4, figsize=(20,20), sharex=True, sharey=True)\n axes_flat = [item for sublist in axes for item in sublist]\n for key, group in groupby:\n ax = axes_flat.pop(0)\n if aoi_polygon is not None:\n gpd.GeoDataFrame(geometry = pd.Series(aoi_polygon)).plot(ax=ax, legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\n group.plot(ax=ax, column='date', categorical=True, markersize=40, legend=True)\n ctx.add_basemap(ax, source=ctx.providers.Stamen.Terrain)\n while len(axes_flat) > 0:\n ax = axes_flat.pop(0)\n if aoi_polygon is not None:\n gpd.GeoDataFrame(geometry = pd.Series(aoi_polygon)).plot(ax=ax, legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\n group.plot(ax=ax, column='date', categorical=True, markersize=40, legend=True)\n ctx.add_basemap(ax, source=ctx.providers.Stamen.Terrain)\n# plt.tight_layout()\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)\n\n\n# -\n\nax = rainier_waus_gdf.plot(legend_kwds={'bbox_to_anchor': (1.6, 1)}, edgecolor='red', lw=2, facecolor=\"none\")\nax = rainier_frames_gdf.plot(column='date', categorical=True, markersize=20, ax=ax)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldImagery)\nax.set(xlim=(-1.357e7,-1.3535e7), ylim=(5.905e6, 5.94e6))\nplt.gcf().set_size_inches(20,20)\n\n# The Kautz, Carbon, and Frying Pan watersheds look to have lots of images on different dates\n\n# ## Look at image locations in the Kautz, Carbon, and Frying Pan Watersheds\n\nwau_gdf.WAU_ALIAS_.where(\n wau_gdf.WAU_ALIAS_.str.contains('KAUTZ', na=False)\n).dropna()\n\nwau_gdf.WAU_ALIAS_.where(\n wau_gdf.WAU_ALIAS_.str.contains('CARBON', na=False)\n).dropna()\n\nwau_gdf.WAU_ALIAS_.where(\n wau_gdf.WAU_ALIAS_.str.contains('FRYING', na=False)\n).dropna()\n\nkautz_frames_df = aoi_frames_df[aoi_frames_df.geometry.within(wau_gdf.geometry.iloc[594])]\ncarbon_frames_df = aoi_frames_df[aoi_frames_df.geometry.within(wau_gdf.geometry.iloc[536])]\nfryingpan_frames_df = aoi_frames_df[aoi_frames_df.geometry.within(wau_gdf.geometry.iloc[564])]\n\nax = kautz_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\n\nax = carbon_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\n\nax = fryingpan_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\nx, y, arrow_length = 0.5, 0.5, 0.1\nax.annotate('N', xy=(x, y), xytext=(x, y-arrow_length),\n arrowprops=dict(facecolor='black', width=5, headwidth=15),\n ha='center', va='center', fontsize=20,\n xycoords=ax.transAxes)\n\nax = fryingpan_frames_df.plot(column='date', categorical=True, legend=True, markersize=80)\nctx.add_basemap(ax, source=ctx.providers.Esri.WorldTopoMap)\nplt.gcf().set_size_inches(14,14)\n\nplot_frames_and_aoi_polygon(fryingpan_frames_df, None)\n\nplot_frames_and_aoi_date_separated(fryingpan_frames_df, None)\n\n# ## Look at data in smaller watersheds, Nisqually and Carbon\n\nrainier_sub_aois = gpd.read_file(f\"{data_dir}/rainier_sub_aois.geojson\")\nrainier_sub_aois = rainier_sub_aois.to_crs(epsg=3857)\nnisqually_polygon = rainier_sub_aois[rainier_sub_aois.name=='nisqually'].geometry.iloc[0]\ncarbon_polygon = rainier_sub_aois[rainier_sub_aois.name=='carbon'].geometry.iloc[0]\nnisqually_frames = rainier_frames_gdf[rainier_frames_gdf.geometry.within(nisqually_polygon)]\ncarbon_frames = rainier_frames_gdf[rainier_frames_gdf.geometry.within(carbon_polygon)]\n\nlen(nisqually_frames), len(carbon_frames)\n\nplot_frames_and_aoi_polygon(nisqually_frames, nisqually_polygon)\n\nplot_frames_and_aoi_date_separated(nisqually_frames, nisqually_polygon)\n\nplot_frames_and_aoi_polygon(carbon_frames, carbon_polygon)\n\nplot_frames_and_aoi_date_separated(carbon_frames, carbon_polygon)\n\n\n# # Save Datasets to CSV for the HSFM Pipeline\n\ndef create_targets_list(kml_derived_df, output_path):\n #Open image name/UUID dataset\n pids_df = pd.read_csv(f'{data_dir}/glacier_names_pids.csv')\n filenames = kml_derived_df.apply(lambda r: ('NAGAP_' + r.roll + '_' + r.frame).replace(' ', ''), axis=1)\n pid_df = pids_df[pids_df.fileName.isin(filenames)]\n pid_df[[\n 'Year','Date','Location','Latitude','Longitude','Altitude','fileName','pid_tn','pid_jpeg','pid_tiff','_merge'\n ]].to_csv(output_path, index=None)\n return output_path\n\n\npids_df = pd.read_csv(f'{data_dir}/glacier_names_pids.csv')\n\n# ### Nisqually 1977\n\nsrc = nisqually_frames.groupby('date').get_group('1977-02-11')\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_nisqually_1977.csv'\n)\n\n# ### Nisqually 1980\n\nsrc = nisqually_frames.groupby('date').get_group('1980-9-10')\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_nisqually_1980.csv'\n)\n\n# ### Nisqually All\n\n# +\nsrc = nisqually_frames\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_nisqually_all_dates.csv'\n\n)\n# -\n\n# ### Carbon All\n\n# +\nsrc = carbon_frames\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_carbon_all_dates.csv'\n\n)\n# -\n# ### Frying Pan Watershed All\n\n# +\nsrc = fryingpan_frames_df\nprint(len(src))\ncreate_targets_list(\n src,\n 'targets_carbon_all_dates.csv'\n\n)\n# -\n\nsrc.roll = src.roll.apply(lambda x: x.strip())\n\nsrc.roll.iloc[0]\n\nsrc.Name.apply(lambda x: x[:4]) == src.roll\n\nsrc[src.Name.apply(lambda x: x[:4]) != src.roll]\n\n# ### Bandaid - Missing Lat/Long values\n#\n#\n# I later noticed missing Lat/Long values for a subset of images in 1974. Fix that here by getting lat/long info from the KML files.\n#\n# Note also that \"Name\" and \"roll\" columns do not agree.\n\nfixing = pd.read_csv('targets_carbon_all_dates.csv')\n\nfixing[fixing.Year == 1974]\n\ncarbon_frames['fileName'] = 'NAGAP_' + carbon_frames.roll + '_' + carbon_frames.frame\nto_merge = carbon_frames[carbon_frames.roll=='74V5'][['fileName', 'latitude', 'longitude']]\nto_merge\n\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_')]\n\n# These rows are in the same order so I can go ahead and assign the lat long values from the `to_merge` dataframe.\n\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_'), 'Latitude'] = to_merge['latitude'].tolist()\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_'), 'Longitude'] = to_merge['longitude'].tolist()\n\nfixing.loc[fixing['fileName'].str.startswith('NAGAP_74V5_')]\n\nfixing.to_csv('targets_carbon_all_dates.csv')\n\n\n" ]
[ [ "pandas.Series", "pandas.read_csv", "matplotlib.pyplot.gcf", "matplotlib.pyplot.subplots", "pandas.to_datetime", "matplotlib.pyplot.subplots_adjust", "pandas.concat" ] ]
shreyasahasram08/growth-too-marshal
[ "f3fbf0043b50d3ffe56e6b4e06b0c1472fc10220" ]
[ "growth/too/tests/test_gcn.py" ]
[ "import datetime\nfrom unittest import mock\n\nfrom astropy import time\nfrom astropy import units as u\nimport gcn\nimport lxml.etree\nimport numpy as np\nimport pkg_resources\nimport pytest\n\nfrom .. import models\nfrom ..jinja import btoa\nfrom ..flask import app\nfrom ..gcn import handle, listen\nfrom . import mock_download_file\n\n\[email protected]_time('2017-08-17')\ndef test_freeze_time():\n \"\"\"Test that freezing time works.\"\"\"\n assert datetime.date.today() == datetime.date(2017, 8, 17)\n assert datetime.datetime.now() == datetime.datetime(2017, 8, 17)\n assert time.Time.now() == time.Time('2017-08-17')\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_grb180116a_gnd_pos(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/GRB180116A_Fermi_GBM_Gnd_Pos.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n # Check that we didn't write the unhelpful \"unknown\" short/long class\n dateobs = '2018-01-16T00:36:53'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['Fermi', 'GRB']\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.twilio.call_everyone.run')\[email protected]('growth.too.tasks.slack.slack_everyone.run')\[email protected]('astropy.io.fits.file.download_file', mock_download_file)\[email protected]_time('2019-08-21')\ndef test_grb180116a_fin_pos(mock_call_everyone, mock_slack_everyone,\n mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/GRB180116A_Fermi_GBM_Fin_Pos.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2018-01-16T00:36:53'\n event = models.Event.query.get(dateobs)\n assert event is not None\n *_, gcn_notice = event.gcn_notices\n assert gcn_notice.content == payload\n assert gcn_notice.notice_type == gcn.NoticeType.FERMI_GBM_FIN_POS\n assert time.Time(gcn_notice.date) == time.Time('2018-01-16T00:46:05')\n assert gcn_notice.ivorn == 'ivo://nasa.gsfc.gcn/Fermi#GBM_Fin_Pos2018-01-16T00:36:52.81_537755817_0-026' # noqa: E501\n assert gcn_notice.stream == 'Fermi'\n assert time.Time(gcn_notice.dateobs) - time.Time(dateobs) < 0.5 * u.second\n assert event.tags == ['Fermi', 'long', 'GRB']\n\n mock_call_everyone.assert_not_called()\n mock_slack_everyone.assert_not_called()\n\n localization, = event.localizations\n assert np.isclose(localization.flat_2d.sum(), 1.0)\n\n telescope = 'ZTF'\n filt = ['g', 'r', 'g']\n exposuretimes = [300.0, 300.0, 300.0]\n doReferences, doDither = True, False\n filterScheduleType = 'block'\n schedule_type = 'greedy'\n probability = 0.9\n plan_name = \"%s_%s_%s_%d_%d_%s_%d_%d\" % (localization.localization_name,\n \"\".join(filt), schedule_type,\n doDither, doReferences,\n filterScheduleType,\n exposuretimes[0],\n 100*probability)\n plan = models.Plan.query.filter_by(plan_name=plan_name,\n telescope=telescope).one()\n\n assert time.Time(plan.dateobs) - time.Time(dateobs) < 0.5 * u.second\n\n exposures = models.PlannedObservation.query.filter_by(\n dateobs=event.dateobs,\n telescope=telescope,\n plan_name=plan.plan_name).all()\n\n for exposure in exposures:\n field_id = exposure.field_id\n assert np.all(np.array(field_id) < 2000)\n assert np.all(np.array(exposure.exposure_time) > 0)\n assert np.all(np.array(exposure.weight) <= 1)\n\n assert np.isclose(plan.area, 651.6459456904389)\n\n # Try submitting some of the observing plans.\n flask.post(\n '/event/{}/plan'.format(dateobs),\n data={\n 'go': True,\n '{}_{}'.format(btoa(telescope), btoa(plan_name)): True\n }\n )\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\[email protected]('growth.too.tasks.skymaps.download.run')\ndef test_grb180116a_multiple_gcns(mock_download, mock_from_cone, mock_tile,\n mock_contour, celery, flask, mail):\n \"\"\"Test reading and ingesting all three GCNs. Make sure that there are\n no database conflicts.\"\"\"\n for notice_type in ['Alert', 'Flt_Pos', 'Gnd_Pos', 'Fin_Pos']:\n filename = 'data/GRB180116A_Fermi_GBM_' + notice_type + '.xml'\n payload = pkg_resources.resource_string(__name__, filename)\n root = lxml.etree.fromstring(payload)\n handle(payload, root)\n\n\[email protected](app.jinja_env.globals,\n {'now': lambda: time.Time('2018-04-22T21:55:30').datetime})\[email protected]('growth.too.tasks.twilio.text_everyone.run')\[email protected]('growth.too.tasks.twilio.call_everyone.run')\[email protected]('growth.too.tasks.slack.slack_everyone.run')\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\[email protected]('astropy.io.fits.file.download_file', mock_download_file)\ndef test_gbm_subthreshold(mock_from_cone, mock_tile, mock_contour,\n mock_call_everyone, mock_text_everyone,\n mock_slack_everyone, celery,\n flask, mail):\n \"\"\"Test reading and ingesting all three GCNs. Make sure that there are\n no database conflicts.\"\"\"\n filename = 'data/GRB180422.913_Subthreshold.xml'\n payload = pkg_resources.resource_string(__name__, filename)\n root = lxml.etree.fromstring(payload)\n handle(payload, root)\n\n event = models.Event.query.get('2018-04-22T21:54:11')\n assert event is not None\n gcn_notice, = event.gcn_notices\n assert gcn_notice.notice_type == gcn.NoticeType.FERMI_GBM_SUBTHRESH\n assert gcn_notice.stream == 'Fermi'\n assert event.tags == ['Fermi', 'short', 'transient']\n\n mock_text_everyone.assert_not_called()\n mock_call_everyone.assert_not_called()\n mock_slack_everyone.assert_not_called()\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_amon_151115(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/AMON_151115.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2015-11-15T11:53:44'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['AMON']\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_amon_icecube_gold_190730(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/AMON_ICECUBE_GOLD_190730.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2019-07-30T20:50:41'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['AMON']\n\n\[email protected]('growth.too.tasks.skymaps.contour.run')\[email protected]('growth.too.tasks.tiles.tile.run')\[email protected]('growth.too.tasks.skymaps.from_cone.run')\ndef test_amon_icecube_bronze_190819(mock_from_cone, mock_tile, mock_contour,\n celery, flask, mail):\n # Read test GCN\n payload = pkg_resources.resource_string(\n __name__, 'data/AMON_ICECUBE_BRONZE_190819.xml')\n root = lxml.etree.fromstring(payload)\n\n # Run function under test\n handle(payload, root)\n\n dateobs = '2019-08-19T17:34:24'\n event = models.Event.query.get(dateobs)\n assert event.tags == ['AMON']\n\n\[email protected]('gcn.listen')\ndef test_listen(mock_listen):\n # Run function under test\n listen()\n\n # Check that GCN listener was invoked\n assert mock_listen.called_once_with(handle=handle)\n" ]
[ [ "numpy.array", "numpy.isclose" ] ]
zgongaware/gonzo
[ "290eae429eb115793cdac3d0be69a064eb73f9a2" ]
[ "goza/chart.py" ]
[ "import matplotlib.pyplot as plt\n\n\nclass Chart:\n \"\"\"\n Chart class to create and format a basic pyplot figure\n \"\"\"\n def __init__(self, title=None, xlabel=None, ylabel=None, figsize=None):\n\n self.title = title if title else \"Unnamed Chart\"\n self.xlabel = xlabel if xlabel else \"X-Axis\"\n self.ylabel = ylabel if ylabel else \"Y-Axis\"\n self.figsize = figsize if figsize else (10, 8)\n\n # Create figure\n self.figure, self.ax = self.create_figure(self.figsize)\n\n # Format\n self.format_title()\n self.format_axes()\n\n def create_figure(self, figsize):\n \"\"\"\n Create plplot figure and axes objects and assign to Chart\n :param figsize:\n :return:\n \"\"\"\n self.figure, self.ax = plt.subplots(1, 1, figsize=figsize)\n\n return self.figure, self.ax\n\n def format_title(self, color=\"black\", fontsize=14):\n \"\"\"\n Format title, x label, and y label\n :return:\n \"\"\"\n self.ax.set_title(self.title, color=color, fontsize=fontsize)\n\n def format_axes(self, color=\"#605770\", fontsize=12):\n \"\"\"\n Format axes to my preference. Remove top/right spines and set colors on\n left/bottom spines, ticks, and tick labels\n :param color:\n :return:\n \"\"\"\n\n # Turn off top / right spines\n self.ax.spines[\"top\"].set_visible(False)\n self.ax.spines[\"right\"].set_visible(False)\n\n # Format left / bottom spines\n self.ax.spines[\"left\"].set_color(color)\n self.ax.spines[\"bottom\"].set_color(color)\n\n # Format ticks\n self.ax.tick_params(axis=\"x\", colors=color)\n self.ax.tick_params(axis=\"y\", colors=color)\n\n # Format labels\n self.ax.set_xlabel(self.xlabel, fontsize=fontsize)\n self.ax.set_ylabel(self.ylabel, fontsize=fontsize)\n\n @staticmethod\n def show():\n \"\"\"\n Show chart\n :return:\n \"\"\"\n plt.show()\n\n # TODO: save_figure method saving blank image.\n @staticmethod\n def save_figure(*args, **kwargs):\n \"\"\"\n Save figure to file\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n plt.savefig(*args, **kwargs)\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
luomou97/ELMoForManyLangs
[ "3e97600baa3a4dde229c1e78c513785e7d50e8e1" ]
[ "elmoformanylangs/modules/lstm.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport copy\n\n\n# \\ref page 4, layers=2, forward + backward, concat[forward_projection, backward_projection]\nclass LstmbiLm(nn.Module):\n def __init__(self, config, use_cuda=False):\n super(LstmbiLm, self).__init__()\n self.config = config\n self.use_cuda = use_cuda\n \n self.encoder = nn.LSTM(self.config['encoder']['projection_dim'],\n self.config['encoder']['dim'],\n num_layers=self.config['encoder']['n_layers'], \n bidirectional=True,\n batch_first=True, \n dropout=self.config['dropout'])\n self.projection = nn.Linear(self.config['encoder']['dim'], self.config['encoder']['projection_dim'], bias=True)\n\n def forward(self, inputs):\n forward, backward = self.encoder(inputs)[0].split(self.config['encoder']['dim'], 2) # split dim=2 in stride config['encoder']['dim'], here half\n return torch.cat([self.projection(forward), self.projection(backward)], dim=2)\n" ]
[ [ "torch.nn.LSTM", "torch.nn.Linear" ] ]
rahuln/adapter-transformers
[ "ac3284547064686d31b95e5e1b078447a2199779" ]
[ "src/transformers/adapters/model_mixin.py" ]
[ "import logging\nimport os\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom os.path import join\nfrom typing import Iterable, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\n\nfrom .composition import AdapterCompositionBlock, Fuse, Stack, parse_composition\nfrom .configuration import AdapterConfig, AdapterConfigBase, AdapterFusionConfig, get_adapter_config_hash\nfrom .context import AdapterSetup, ForwardContext\nfrom .hub_mixin import PushAdapterToHubMixin\nfrom .layer import AdapterLayer, AdapterLayerBase\nfrom .loading import AdapterFusionLoader, AdapterLoader, PredictionHeadLoader, WeightsLoader\nfrom .modeling import Adapter, GLOWCouplingBlock, NICECouplingBlock\nfrom .prefix_tuning import PrefixTuningPool, PrefixTuningShim\nfrom .utils import EMBEDDING_FILE, TOKENIZER_PATH, inherit_doc\nfrom .wrappers.configuration import wrap_config\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass InvertibleAdaptersMixin:\n \"\"\"Mixin for Transformer models adding invertible adapters.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.invertible_adapters = nn.ModuleDict(dict())\n\n # Make sure config is wrapped\n self.config = wrap_config(self.config)\n\n def add_invertible_adapter(self, adapter_name: str):\n \"\"\"\n Adds an invertible adapter module for the adapter with the given name. If the given adapter does not specify an\n invertible adapter config, this method does nothing.\n\n Args:\n adapter_name (str): The name of the adapter for which to add an invertible adapter module.\n \"\"\"\n if adapter_name in self.invertible_adapters:\n raise ValueError(f\"Model already contains an adapter module for '{adapter_name}'.\")\n adapter_config = self.config.adapters.match(\n adapter_name,\n config_type=AdapterConfig,\n location_key=\"inv_adapter\",\n )\n if adapter_config and adapter_config[\"inv_adapter\"]:\n if adapter_config[\"inv_adapter\"] == \"nice\":\n inv_adap = NICECouplingBlock(\n [[self.config.hidden_size]],\n non_linearity=adapter_config[\"non_linearity\"],\n reduction_factor=adapter_config[\"inv_adapter_reduction_factor\"],\n )\n elif adapter_config[\"inv_adapter\"] == \"glow\":\n inv_adap = GLOWCouplingBlock(\n [[self.config.hidden_size]],\n non_linearity=adapter_config[\"non_linearity\"],\n reduction_factor=adapter_config[\"inv_adapter_reduction_factor\"],\n )\n else:\n raise ValueError(f\"Invalid invertible adapter type '{adapter_config['inv_adapter']}'.\")\n self.invertible_adapters[adapter_name] = inv_adap\n self.invertible_adapters[adapter_name].apply(Adapter.init_bert_weights)\n\n def delete_invertible_adapter(self, adapter_name: str):\n if adapter_name in self.invertible_adapters:\n del self.invertible_adapters[adapter_name]\n\n def get_invertible_adapter(self):\n # TODO: Currently no fusion over invertible adapters, takes only very first language adapter position\n if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:\n first_adapter = self.config.adapters.active_setup.first()\n if first_adapter in self.invertible_adapters:\n return self.invertible_adapters[first_adapter]\n return None\n\n def enable_invertible_adapters(self, adapter_names):\n for adapter_name in adapter_names:\n if adapter_name in self.invertible_adapters:\n for param in self.invertible_adapters[adapter_name].parameters():\n param.requires_grad = True\n\n def invertible_adapters_forward(self, hidden_states, rev=False):\n # TODO: Currently no fusion over invertible adapters, takes only very first language adapter position\n if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:\n first_adapter = self.config.adapters.active_setup.first()\n if first_adapter in self.invertible_adapters:\n hidden_states = self.invertible_adapters[first_adapter](hidden_states, rev=rev)\n\n return hidden_states\n\n\nclass ModelAdaptersMixin(PushAdapterToHubMixin, ABC):\n \"\"\"Mixin for transformer models adding support for loading/ saving adapters.\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n if config.name_or_path and not os.path.exists(config.name_or_path):\n self.model_name = config.name_or_path\n else:\n self.model_name = None\n self.loaded_embeddings = {}\n self.shared_parameters = nn.ModuleDict()\n self._active_embedding = \"default\"\n\n # Make sure config is wrapped\n self.config = wrap_config(self.config)\n\n def _link_prefix_to_pool(self, layer):\n if isinstance(layer, PrefixTuningShim):\n layer.set_pool(self.base_model.prefix_tuning)\n\n def _init_adapter_modules(self, add_prefix_tuning_pool=True):\n \"\"\"\n This method initializes adapter modules and fusion modules from the model config.\n \"\"\"\n # Link all prefix tunings\n if add_prefix_tuning_pool:\n self.base_model.prefix_tuning = PrefixTuningPool(self.config)\n self.apply_to_adapter_layers(lambda i, layer: self._link_prefix_to_pool(layer))\n\n # Initialize adapters from config\n for adapter_name in self.config.adapters:\n self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))\n # Initialize fusion from config\n for fusion_name in self.config.adapters.fusions:\n self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(fusion_name))\n\n self.loaded_embeddings[\"default\"] = self.get_input_embeddings()\n\n # These methods have to be implemented by every deriving class:\n\n @abstractmethod\n def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:\n \"\"\"\n Iterates over all layers of the model.\n\n This abstract method has to ne implemented by every implementing model.\n \"\"\"\n pass\n\n def apply_to_adapter_layers(self, fn):\n \"\"\"\n Applies a function to all adapter layers of the model.\n \"\"\"\n for i, layer in self.iter_layers():\n for module in layer.modules():\n if isinstance(module, AdapterLayerBase):\n fn(i, module)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"Sets the model into mode for training the given adapters.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, True, False))\n for adapter_name in adapter_setup:\n if adapter_name in self.shared_parameters:\n for param in self.shared_parameters[adapter_name].values():\n param.requires_grad = True\n\n if isinstance(self, InvertibleAdaptersMixin):\n self.enable_invertible_adapters(adapter_setup.flatten())\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n if train_embeddings:\n self.get_input_embeddings().train()\n\n def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n warnings.warn(\n \"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.\",\n FutureWarning,\n )\n self.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, unfreeze_adapters, True))\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n # TODO implement fusion for invertible adapters\n\n def has_adapters(self):\n if not getattr(self.config, \"is_adaptable\", None):\n return False\n return len(self.config.adapters.adapters) > 0\n\n @property\n def has_parallel_adapters(self) -> bool:\n if self.config.adapters.active_setup:\n return self.config.adapters.active_setup.parallel_channels > 1\n else:\n return False\n\n @property\n def active_adapters(self) -> AdapterCompositionBlock:\n return self.config.adapters.active_setup\n\n @active_adapters.setter\n def active_adapters(self, adapter_setup: Union[list, AdapterCompositionBlock]):\n self.set_active_adapters(adapter_setup)\n\n def set_shared_parameters(self, param):\n self.shared_parameters = param\n\n def set_active_adapters(\n self, adapter_setup: Union[list, AdapterCompositionBlock], skip_layers: Optional[List[int]] = None\n ):\n \"\"\"\n Sets the adapter modules to be used by default in every forward pass. If no adapter with the given name is\n found, no module of the respective type will be activated.\n\n Args:\n adapter_setup (list):\n The list of adapters to be activated by default. Can be a fusion or stacking configuration.\n \"\"\"\n adapter_setup = parse_composition(adapter_setup, model_type=self.config.model_type)\n if adapter_setup:\n for adapter_name in adapter_setup.flatten():\n if adapter_name not in self.config.adapters.adapters:\n raise ValueError(\n f\"No adapter with name '{adapter_name}' found. Please make sure that all specified adapters are correctly loaded.\"\n )\n\n self.config.adapters.active_setup = adapter_setup\n self.config.adapters.skip_layers = skip_layers\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n\n adapter_name (str): The name of the adapter module to be added. config (str or dict or AdapterConfigBase,\n optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an\n exception is thrown. set_active (bool, optional): Set the adapter to be the active one. By default (False),\n the adapter is added but not activated.\n \"\"\"\n if isinstance(config, dict):\n config = AdapterConfigBase.load(config) # ensure config is ok and up-to-date\n # In case adapter already exists and we allow overwriting, explicitly delete the existing one first\n if overwrite_ok and adapter_name in self.config.adapters:\n self.delete_adapter(adapter_name)\n self.config.adapters.add(adapter_name, config=config)\n try:\n self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))\n # PHM Layer\n if self.config.adapters.match(adapter_name, AdapterConfig, location_key=\"phm_layer\"):\n self._add_shared_parameters(adapter_name, config)\n # Prefix Tuning\n for module in self.modules():\n if isinstance(module, PrefixTuningPool):\n module.confirm_prefix(adapter_name)\n if isinstance(self, InvertibleAdaptersMixin):\n self.add_invertible_adapter(adapter_name)\n except ValueError as ex:\n self.delete_adapter(adapter_name)\n raise ex\n if set_active:\n self.set_active_adapters(adapter_name)\n\n def _add_shared_parameters(self, adapter_name, adapter_config: AdapterConfig):\n self.shared_parameters[adapter_name] = (\n list(self.get_adapter(adapter_name)[0].values())[0].adapter_down[0].init_shared_parameters()\n )\n\n def add_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):\n warnings.warn(\n \"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.\",\n FutureWarning,\n )\n adapter_fusion_config = AdapterFusionConfig.from_dict(adapter_fusion_config).replace(**override_kwargs)\n self.add_adapter_fusion(adapter_names, adapter_fusion_config)\n\n def add_adapter_fusion(\n self,\n adapter_names: Union[Fuse, list, str],\n config=None,\n overwrite_ok: bool = False,\n set_active: bool = False,\n ):\n \"\"\"\n Adds AdapterFusion to the model with alll the necessary configurations and weight initializations\n\n Args:\n adapter_names (Fuse or list or str): AdapterFusion layer to add. Can be either:\n\n - a ``Fuse`` composition block\n - a list of adapter names to fuse\n - a comma-separated string of adapter names to fuse\n config (str or dict): adapter fusion configuration, can be either:\n\n - a string identifying a pre-defined adapter fusion configuration\n - a dictionary representing the adapter fusion configuration\n - the path to a file containing the adapter fusion configuration\n overwrite_ok (bool, optional):\n Overwrite an AdapterFusion layer with the same name if it exists. By default (False), an exception is\n thrown.\n set_active (bool, optional):\n Activate the added AdapterFusion. By default (False), the AdapterFusion is added but not activated.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_names = adapter_names.children\n elif isinstance(adapter_names, str):\n adapter_names = adapter_names.split(\",\")\n\n if isinstance(config, dict):\n config = AdapterFusionConfig.from_dict(config) # ensure config is ok and up-to-date\n # In case adapter already exists and we allow overwriting, explicitly delete the existing one first\n if overwrite_ok and self.config.adapters.get_fusion(adapter_names) is not None:\n self.delete_adapter_fusion(adapter_names)\n self.config.adapters.add_fusion(adapter_names, config=config)\n self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(adapter_names))\n if set_active:\n if not isinstance(adapter_names, list):\n adapter_names = adapter_names.split(\",\")\n self.set_active_adapters(Fuse(*adapter_names))\n\n def delete_adapter(self, adapter_name: str):\n \"\"\"\n Deletes the adapter with the specified name from the model.\n\n Args:\n adapter_name (str): The name of the adapter.\n \"\"\"\n if adapter_name not in self.config.adapters:\n logger.info(\"No adapter '%s' found for deletion. Skipping.\", adapter_name)\n return\n del self.config.adapters.adapters[adapter_name]\n self.apply_to_adapter_layers(lambda i, layer: layer.delete_adapter(adapter_name))\n if isinstance(self, InvertibleAdaptersMixin):\n self.delete_invertible_adapter(adapter_name)\n # Reset active adapters if this was the only active adapter\n if self.active_adapters == Stack(adapter_name):\n self.active_adapters = None\n\n def delete_adapter_fusion(self, adapter_names: Union[Fuse, list, str]):\n \"\"\"\n Deletes the AdapterFusion layer of the specified adapters.\n\n Args:\n adapter_names (Union[Fuse, list, str]): AdapterFusion layer to delete.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_fusion_name = \",\".join(adapter_names.children)\n elif isinstance(adapter_names, list):\n adapter_fusion_name = \",\".join(adapter_names)\n elif isinstance(adapter_names, str):\n adapter_fusion_name = adapter_names\n else:\n raise ValueError(\"Invalid AdapterFusion definition: {}\".format(adapter_names))\n\n if adapter_fusion_name not in self.config.adapters.fusions:\n logger.info(\"No AdapterFusion '%s' found for deletion. Skipping.\", adapter_fusion_name)\n return\n del self.config.adapters.fusions[adapter_fusion_name]\n self.apply_to_adapter_layers(lambda i, layer: layer.delete_fusion_layer(adapter_fusion_name))\n # Reset active adapters if this was the active setup\n if self.active_adapters == adapter_names:\n self.active_adapters = None\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using\n `load_adapter()`.\n\n Args:\n save_directory (str): Path to a directory where the adapter should be saved.\n adapter_name (str): Name of the adapter to be saved.\n\n Raises:\n ValueError: If the given adapter name is invalid.\n \"\"\"\n loader = AdapterLoader(self)\n loader.save(save_directory, adapter_name, meta_dict)\n # save additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.save(save_directory, adapter_name)\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_fusion_name = \",\".join(adapter_names.children)\n elif isinstance(adapter_names, list):\n adapter_fusion_name = \",\".join(adapter_names)\n elif isinstance(adapter_names, str):\n adapter_fusion_name = adapter_names\n else:\n raise ValueError(\"Invalid AdapterFusion definition: {}\".format(adapter_names))\n\n loader = AdapterFusionLoader(self)\n loader.save(save_directory, adapter_fusion_name, meta_dict)\n # save additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.save(save_directory, adapter_fusion_name)\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n \"\"\"\n Loads a pre-trained pytorch adapter module from the local file system or a remote location.\n\n Args:\n adapter_name_or_path (str): can be either:\n\n - the identifier of a pre-trained task adapter to be loaded from Adapter Hub\n - a path to a directory containing adapter weights saved using `model.saved_adapter()`\n - a URL pointing to a zip folder containing a saved adapter module\n config (dict or str, optional): The requested configuration of the adapter.\n If not specified, will be either: - the default adapter config for the requested adapter if specified -\n the global default adapter config\n version (str, optional): The version of the adapter to be loaded.\n model_name (str, optional): The string identifier of the pre-trained model.\n load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was\n saved will be used.\n source (str, optional): Identifier of the source(s) from where to load the adapter. Can be:\n\n - \"ah\" (default): search on AdapterHub.\n - \"hf\": search on HuggingFace model hub.\n - None: search on all sources\n leave_out: Dynamically drop adapter modules in the specified Transformer layers when loading the adapter.\n set_active (bool, optional):\n Set the loaded adapter to be the active one. By default (False), the adapter is loaded but not\n activated.\n\n Returns:\n str: The name with which the adapter was added to the model.\n \"\"\"\n loader = AdapterLoader(self)\n load_dir, load_name = loader.load(\n adapter_name_or_path,\n config,\n version,\n model_name,\n load_as,\n source=source,\n leave_out=leave_out,\n set_active=set_active,\n **kwargs,\n )\n # load additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.load(\n load_dir,\n load_as=load_as,\n loading_info=kwargs.get(\"loading_info\", None),\n main_load_name=load_name,\n id2label=id2label,\n set_active=set_active,\n )\n return load_name\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n \"\"\"\n Loads a pre-trained AdapterFusion layer from the local file system.\n\n Args:\n adapter_fusion_name_or_path (str):\n a path to a directory containing AdapterFusion weights saved using `model.save_adapter_fusion()`.\n load_as (str, optional): Load the AdapterFusion using this name.\n By default, the name with which the AdapterFusion layer was saved will be used.\n set_active (bool, optional):\n Activate the loaded AdapterFusion. By default (False), the AdapterFusion is loaded but not activated.\n\n Returns:\n str: The name with which the AdapterFusion was added to the model.\n \"\"\"\n\n loader = AdapterFusionLoader(self)\n load_dir, load_name = loader.load(adapter_fusion_name_or_path, load_as, set_active=set_active)\n # load additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.load(\n load_dir,\n load_as=load_as,\n loading_info=kwargs.get(\"loading_info\", None),\n main_load_name=load_name,\n set_active=set_active,\n )\n return load_name\n\n def save_all_adapters(\n self,\n save_directory: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves all adapters of this model together with their configuration to subfolders of the given location.\n\n Args:\n save_directory (str): Path to a directory where the adapters should be saved.\n \"\"\"\n for name in self.config.adapters:\n adapter_config = self.config.adapters.get(name)\n h = get_adapter_config_hash(adapter_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter(save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders)\n\n def save_all_adapter_fusions(\n self,\n save_directory: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves all AdapterFusion layers of this model together with their configuration to subfolders of the given\n location.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion layers should be saved.\n \"\"\"\n for name in self.config.adapters.fusions:\n adapter_fusion_config = self.config.adapters.get_fusion(name)\n h = get_adapter_config_hash(adapter_fusion_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter_fusion(\n save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders\n )\n\n def freeze_model(self, freeze=True):\n \"\"\"Freezes all weights of the model.\"\"\"\n # first freeze/ unfreeze all model weights\n for param in self.base_model.parameters():\n param.requires_grad = not freeze\n self.model_frozen = freeze\n\n def forward_context(self, context: ForwardContext, *args, **kwargs):\n \"\"\"\n This method is called by the ``ForwardContext`` at the beginning of the forward pass.\n \"\"\"\n # some warnings if we don't use available adapters\n active_adapters = getattr(self, \"active_adapters\", None) or AdapterSetup.get_context()\n if not active_adapters:\n if self.has_adapters():\n logger.warning(\"There are adapters available but none are activated for the forward pass.\")\n return\n\n context.adapters_parallelized = False\n # Add the shared parameters for the active adapters to the context\n context.shared_parameters = {\n name: param for name, param in self.shared_parameters.items() if name in active_adapters.flatten()\n }\n\n # Prefix tuning\n input_tensor = kwargs.get(\"input_ids\", None)\n if input_tensor is None:\n input_tensor = kwargs.get(\"decoder_input_ids\", None)\n if input_tensor is None:\n input_tensor = kwargs.get(\"attention_mask\", None)\n if input_tensor is None:\n input_tensor = args[0]\n context.prefix_states = self.base_model.prefix_tuning(input_tensor.shape[0])\n\n def load_embeddings(self, path: str, name: str):\n \"\"\"\n Load a saved embedding from the given path. If the embedding was saved with a tokenizer it is returned\n\n Args:\n path: the path to the saved embedding\n name: the name the embedding should be loaded as\n\n Returns: a tokenizer if it ws saved with the embedding otherwise None\n\n \"\"\"\n from ..models.auto.tokenization_auto import AutoTokenizer\n\n if name in self.loaded_embeddings:\n raise ValueError(\"An embedding with the name {} already exists\".format(name))\n tokenizer = None\n tokenizer_path = os.path.join(path, TOKENIZER_PATH)\n if os.path.isdir(tokenizer_path):\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n\n embedding_path = os.path.join(path, EMBEDDING_FILE)\n if not os.path.isfile(embedding_path):\n raise FileNotFoundError(\"No embeddings found at {}\".format(embedding_path))\n weights = torch.load(embedding_path)\n\n self.loaded_embeddings[name] = nn.Embedding.from_pretrained(weights)\n self.set_active_embeddings(name)\n return tokenizer\n\n def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):\n \"\"\"\n Add a new embedding to the model. If a reference embedding and reference tokenizer are provided tokens in the\n present in both tokenizers are initialized to the embedding in the reference_embedding.\n\n Args:\n name: the name of the embedding\n tokenizer: the tokenizer determining the vocab of the embedding\n reference_embedding:\n the reference embedding to use for initializing the embeddings of tokens present in the newly created\n embedding\n reference_tokenizer: the tokenizer providing the vocab for the reference embedding\n embedding_dim: the dimension of the embeddings (if None the hidden_size from the config is used)\n\n \"\"\"\n if name in self.loaded_embeddings:\n raise ValueError(\"An embedding with the name {} already exists\".format(name))\n if embedding_dim is None:\n embedding_dim = self.config.hidden_size\n embedding = nn.Embedding(tokenizer.vocab_size, embedding_dim)\n embedding.requires_grad_(False)\n if (reference_embedding is not None and reference_tokenizer is None) or (\n reference_tokenizer is not None and reference_embedding is None\n ):\n raise KeyError(\n \"Reference embedding and reference tokenizer are required to use initialize embeddings from reference embedding\"\n )\n if reference_embedding is not None and reference_tokenizer is not None:\n tokens = set(tokenizer.get_vocab().keys()) & set(reference_tokenizer.get_vocab().keys())\n reference_vocab = reference_tokenizer.get_vocab()\n vocab = tokenizer.get_vocab()\n for t in tokens:\n idx_reference = reference_vocab[t]\n idx = vocab[t]\n embedding.weight[idx] = self.loaded_embeddings[reference_embedding].weight[idx_reference].clone()\n embedding.train(False)\n self.loaded_embeddings[name] = embedding\n self.set_active_embeddings(name)\n\n def delete_embeddings(self, name):\n \"\"\"\n Deletes the embedding with the given name\n\n Args:\n name: The name of the embedding that should be deleted\n\n \"\"\"\n if name not in self.loaded_embeddings:\n raise ValueError(\"No embedding with name {}\".format(name))\n if self.active_embeddings == name:\n logger.warning(\"The active embedding is deleted. Setting the default embedding as active.\")\n self.set_active_embeddings(\"default\")\n del self.loaded_embeddings[name]\n\n def save_embeddings(self, path, name, tokenizer=None):\n \"\"\"\n Saves the embedding with the given name. If a tokenizer is passed as well the tokenizer is saved together with\n the embedding.\n\n Args:\n path: The path where the embedding should be saved\n name: The name of the embedding that should be saved\n tokenizer: optionally a tokenizer to save with the embedding (default is None)\n\n \"\"\"\n if self.active_embeddings == name:\n self.loaded_embeddings[name] = self.get_input_embeddings()\n os.makedirs(path, exist_ok=True)\n embedding_path = os.path.join(path, EMBEDDING_FILE)\n torch.save(self.loaded_embeddings[name].weight, embedding_path)\n if tokenizer:\n tokenizer_path = os.path.join(path, TOKENIZER_PATH)\n tokenizer.save_pretrained(tokenizer_path)\n\n def set_active_embeddings(self, name):\n \"\"\"\n Sets the active embedding for the forward pass of the model\n\n Args:\n name: The name of the embedding that should be used\n\n \"\"\"\n self.loaded_embeddings[self.active_embeddings] = self.get_input_embeddings()\n self.set_input_embeddings(self.loaded_embeddings[name])\n self._active_embedding = name\n\n @property\n def active_embeddings(self):\n return self._active_embedding\n\n def get_fusion_regularization_loss(self):\n reg_loss = 0.0\n\n target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)\n for i, layer in self.iter_layers():\n for module in layer.modules():\n if isinstance(module, AdapterLayer):\n for _, layer_fusion in module.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n return reg_loss\n\n def get_adapter(self, name) -> dict:\n \"\"\"\n Returns a dictionary with all weights of the adapter with the specified name.\n\n Args:\n name (str): The adapter name.\n\n Returns:\n dict: A nested dictionary containing the weights of the adapter. The dictionary is structured as follow:\n {<layer id>: {<module location>: <nn.Module>}}.\n \"\"\"\n destination = defaultdict(dict)\n\n # use a custom index to ensure numbering is from 0 to N layers\n for i, (_, layer) in enumerate(self.iter_layers()):\n for module in layer.modules():\n if isinstance(module, AdapterLayerBase):\n adapter_module = module.get_adapter(name)\n if adapter_module is not None:\n destination[i][module.location_key] = adapter_module\n\n return dict(destination)\n\n def eject_prefix_tuning(self, name: str):\n \"\"\"\n Converts the prefix tuning with the given name from the reparameterized form into the flat form.\n\n Args:\n name (str): The name of the prefix tuning.\n \"\"\"\n for module in self.modules():\n if isinstance(module, PrefixTuningPool):\n if name in module.prefix_tunings:\n module.prefix_tunings[name].eject()\n\n\n@inherit_doc\nclass ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):\n \"\"\"\n Mixin adding support for loading/ saving adapters to transformer models with head(s).\n \"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n self._convert_to_flex_head = False\n\n def set_shared_parameters(self, param):\n self.shared_parameters = param\n if self.base_model is not self:\n self.base_model.shared_parameters = self.shared_parameters\n\n def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:\n \"\"\"\n Iterates over all layers of the model.\n \"\"\"\n if self.base_model is self:\n return super().iter_layers()\n else:\n return self.base_model.iter_layers()\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n adapter_name (str): The name of the adapter module to be added.\n config (str or dict, optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional):\n Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.\n set_active (bool, optional):\n Set the adapter to be the active one. By default (False), the adapter is added but not activated.\n\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n else:\n self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"\n Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class\n that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter(adapter_setup, train_embeddings)\n else:\n self.base_model.train_adapter(adapter_setup, train_embeddings)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"\n Sets the model into mode for training of adapter fusion determined by a list of adapter names. If\n self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n else:\n self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n def save_head(self, save_directory: str, head_name: str = None):\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, name=head_name)\n\n def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):\n loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)\n return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_adapter(\n save_directory,\n adapter_name,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = None,\n with_head: bool = True,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(\n PredictionHeadLoader(\n self,\n error_on_missing=False,\n convert_to_flex_head=self._convert_to_flex_head,\n )\n )\n # Support passing a num_labels for compatibility reasons. Convert to label map here.\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None:\n id2label = {i: \"LABEL_\" + str(i) for i in range(num_labels)}\n return super().load_adapter(\n adapter_name_or_path,\n config=config,\n version=version,\n model_name=model_name,\n load_as=load_as,\n source=source,\n custom_weights_loaders=custom_weights_loaders,\n leave_out=leave_out,\n id2label=id2label,\n set_active=set_active,\n **kwargs,\n )\n\n def save_all_adapters(\n self,\n save_directory: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_all_adapters(\n save_directory,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n with_head: Union[bool, str] = False,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n with_head (Union[bool, str]):\n If True, will save a head with the same name as the AdapterFusionLayer. If a string, this will be used\n as the name of the head to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)\n\n if with_head:\n # Make sure to cover the different options for adapter_names\n if isinstance(with_head, str):\n head_name = with_head\n elif isinstance(adapter_names, Fuse):\n head_name = adapter_names.name\n elif isinstance(adapter_names, list):\n head_name = \",\".join(adapter_names)\n else:\n head_name = adapter_names\n if head_name not in self.heads:\n raise ValueError(\"No head with name {} found\".format(head_name))\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, head_name)\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n with_head: bool = True,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)\n\n def save_all_heads(self, save_directory):\n for head_name in self.heads:\n save_path = join(save_directory, head_name)\n self.save_head(save_path, head_name)\n\n def get_labels(self):\n return list(self.config.id2label.values())\n\n def get_labels_dict(self):\n return self.config.id2label\n\n def get_adapter(self, name):\n \"\"\"\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n return super().get_adapter(name)\n else:\n return self.base_model.get_adapter(name)\n\n def load_embeddings(self, path: str, name: str):\n if self.base_model is self:\n return super().load_embeddings(path, name)\n else:\n return self.base_model.load_embeddings(path, name)\n\n def save_embeddings(self, path, name, tokenizer=None):\n if self.base_model is self:\n return super().save_embeddings(path, name, tokenizer)\n else:\n return self.base_model.save_embeddings(path, name, tokenizer)\n\n def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):\n if self.base_model is None:\n return super().add_embeddings(name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim)\n else:\n return self.base_model.add_embeddings(\n name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim\n )\n\n def set_active_embeddings(self, name):\n if self.base_model is None:\n return super().set_active_embeddings(name)\n else:\n return self.base_model.set_active_embeddings(name)\n\n def delete_embeddings(self, name):\n if self.base_model is None:\n return super().delete_embeddings(name)\n else:\n return self.base_model.delete_embeddings(name)\n" ]
[ [ "torch.load", "torch.save", "torch.nn.Embedding.from_pretrained", "torch.nn.Embedding", "torch.zeros", "torch.nn.ModuleDict" ] ]
aaron8tang/qtrader
[ "e5c1e175e19b20381f9140fb76c30ad5cb81f01c", "e5c1e175e19b20381f9140fb76c30ad5cb81f01c" ]
[ "qtrader/simulation/aaft.py", "qtrader/agents/pretrainer/objectives.py" ]
[ "import numpy as np\nimport pandas as pd\n\n\ndef AAFT(df, random=np.random.uniform, random_state=None):\n \"\"\"Amplitude Adjusted Fourier Transform Baseline Generator.\"\"\"\n # set random seed\n np.random.seed(random_state)\n # Operate on numpy.ndarray\n ts = df.values\n # 2d time-series format\n _ts = ts.reshape(len(ts), -1)\n # Odd number of samples\n if len(_ts) % 2 != 0:\n _ts = _ts[1:, :]\n # Generated time-series\n ts_gen = np.empty_like(_ts)\n for i, tsi in enumerate(_ts.T):\n # Fourier Transaformation (real-valued signal)\n F_tsi = np.fft.rfft(tsi)\n # Randomization of Phase\n rv_phase = np.exp(random(0, np.pi, len(F_tsi)) * 1.0j)\n # Generation of new time-series\n F_tsi_new = F_tsi * rv_phase\n # Inverse Fourier Transformation\n ts_gen[:, i] = np.fft.irfft(F_tsi_new)\n # Create pandas DataFrame\n df_gen = pd.DataFrame(ts_gen, columns=df.columns,\n index=df.index[-len(ts_gen):])\n return df_gen\n", "import numpy as np\n\nfrom qtrader.utils.numpy import eps\n\n\ndef _mu_p(w: np.ndarray, r: np.ndarray) -> float:\n \"\"\"Portfolio Returns.\"\"\"\n return np.dot(w.T, r)\n\n\ndef _sigma_p(w: np.ndarray, Sigma: np.ndarray) -> float:\n \"\"\"Portoflio Variance\"\"\"\n return np.dot(np.dot(w.T, Sigma), w)\n\n\ndef _trans_costs(w: np.ndarray, w0: np.ndarray, coef: float) -> float:\n \"\"\"Transaction Costs.\"\"\"\n return np.sum(np.abs(w0 - w)) * coef\n\n\ndef risk_aversion(w: np.ndarray, mu: np.ndarray,\n Sigma: np.ndarray, w0: np.ndarray,\n alpha: float, beta: float) -> float:\n \"\"\"Risk Aversion with Transaction Costs.\"\"\"\n assert Sigma.shape[0] == Sigma.shape[1]\n assert mu.shape[0] == Sigma.shape[0]\n assert w.shape == w0.shape\n # mean - alpha * variance - transaction_costs\n return - (_mu_p(w, mu) - alpha * _sigma_p(w, Sigma) - _trans_costs(w, w0, beta))\n\n\ndef sharpe_ratio(w: np.ndarray, mu: np.ndarray,\n Sigma: np.ndarray, w0: np.ndarray,\n beta: float) -> float:\n \"\"\"Sharpe Ratio with Transaction Costs.\"\"\"\n assert Sigma.shape[0] == Sigma.shape[1]\n assert mu.shape[0] == Sigma.shape[0]\n assert w.shape == w0.shape\n # mean - alpha * variance - transaction_costs\n return - ((_mu_p(w, mu) - _trans_costs(w, w0, beta)) / (_sigma_p(w, Sigma) + eps))\n" ]
[ [ "numpy.empty_like", "numpy.fft.rfft", "numpy.random.seed", "numpy.fft.irfft" ], [ "numpy.dot", "numpy.abs" ] ]
camila-contreras/CD4ML-Scenarios
[ "806f812990c7cf33b5f78456f0065012b5b4cd35" ]
[ "cd4ml/model_tracking/validation_metrics.py" ]
[ "from sklearn import metrics\nimport numpy as np\nimport logging\nlogger = logging.getLogger(__name__)\n\n# TODO: add others\n# TODO: add ability to include generic functions\n\n\ndef r2_score(true_target, prediction):\n # R2 metric\n return metrics.r2_score(y_true=true_target, y_pred=prediction)\n\n\ndef rms_score(true_target, prediction):\n # Root mean square metric\n return np.sqrt(((prediction - true_target)**2).mean())\n\n\ndef mad_score(true_target, prediction):\n # mean absolute deviation metric\n return abs(prediction - true_target).mean()\n\n\ndef get_validation_metrics(metric_names, true_prediction_function):\n logger.info('Getting predictions')\n data = list(true_prediction_function())\n logger.info('Done with predictions')\n assert len(data) > 0\n true_target, prediction = zip(*data)\n\n true_target = np.array(true_target)\n prediction = np.array(prediction)\n\n n_validated = len(true_target)\n logger.info('n_validated: %s' % n_validated)\n\n validation_metrics = {}\n if 'r2_score' in metric_names:\n validation_metrics['r2_score'] = r2_score(true_target, prediction)\n logger.info('r2_score : {}'.format(validation_metrics['r2_score']))\n\n if 'rms_score' in metric_names:\n validation_metrics['rms_score'] = rms_score(true_target, prediction)\n logger.info('rms_scoring: {}'.format(validation_metrics['rms_score']))\n\n if 'mad_score' in metric_names:\n validation_metrics['mad_score'] = mad_score(true_target, prediction)\n logger.info('mad_scoring: {}'.format(validation_metrics['mad_score']))\n\n if 'num_validated' in metric_names:\n validation_metrics['num_validated'] = n_validated\n\n logger.info('Done validation metrics')\n\n return validation_metrics\n" ]
[ [ "numpy.array", "sklearn.metrics.r2_score" ] ]
yumorozov/scikit-learn-intelex
[ "7a39c0a0e208b49f209168b01fb50206f962175f" ]
[ "examples/daal4py/dbscan_spmd.py" ]
[ "#===============================================================================\n# Copyright 2014 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n# daal4py DBSCAN example for distributed memory systems; SPMD mode\n# run like this:\n# mpirun -n 4 python ./dbscan_spmd.py\n\nimport daal4py as d4p\nimport numpy as np\n\n\ndef main(method='defaultDense'):\n infile = \"./data/batch/dbscan_dense.csv\"\n epsilon = 0.04\n minObservations = 45\n\n # Load the data\n data = np.loadtxt(infile, delimiter=',')\n rpp = int(data.shape[0] / d4p.num_procs())\n data = data[rpp * d4p.my_procid(): rpp * d4p.my_procid() + rpp, :]\n\n # configure dbscan main object\n algo = d4p.dbscan(minObservations=minObservations, epsilon=epsilon, distributed=True)\n # and compute\n result = algo.compute(data)\n\n return result\n\n\nif __name__ == \"__main__\":\n # Initialize SPMD mode\n d4p.daalinit()\n result = main()\n print(\"\\nResults on node with id = \", d4p.my_procid(), \" :\\n\",\n \"\\nFirst 10 cluster assignments:\\n\", result.assignments[0:10],\n \"\\nNumber of clusters:\\n\", result.nClusters)\n d4p.daalfini()\n" ]
[ [ "numpy.loadtxt" ] ]
Alex-Roudjiat/Federated-ML-AI-Federated-ML-
[ "8ccc24cf2c01b868988f5d5bd65f1666cf5526bc" ]
[ "fedml_api/model/cv/darts/utils.py" ]
[ "import os\nimport shutil\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass Cutout(object):\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\n\ndef count_parameters_in_MB(model):\n return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if \"auxiliary\" not in name) / 1e6\n\n\ndef save_checkpoint(state, is_best, save):\n filename = os.path.join(save, 'checkpoint.pth.tar')\n torch.save(state, filename)\n if is_best:\n best_filename = os.path.join(save, 'model_best.pth.tar')\n shutil.copyfile(filename, best_filename)\n\n\ndef save(model, model_path):\n torch.save(model.state_dict(), model_path)\n\n\ndef load(model, model_path):\n model.load_state_dict(torch.load(model_path))\n\n\ndef drop_path(x, drop_prob):\n if drop_prob > 0.:\n keep_prob = 1. - drop_prob\n mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))\n x.div_(keep_prob)\n x.mul_(mask)\n return x\n\n\ndef create_exp_dir(path, scripts_to_save=None):\n if not os.path.exists(path):\n os.mkdir(path)\n print('Experiment dir : {}'.format(path))\n\n if scripts_to_save is not None:\n os.mkdir(os.path.join(path, 'scripts'))\n for script in scripts_to_save:\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\n shutil.copyfile(script, dst_file)\n" ]
[ [ "numpy.ones", "torch.load", "torch.save", "numpy.clip", "torch.from_numpy", "numpy.random.randint" ] ]
alexbarcelo/dislib
[ "989f81f235ae30b17410a8d805df258c7d931b38", "989f81f235ae30b17410a8d805df258c7d931b38" ]
[ "examples/linear_regression_plot.py", "tests/test_decision_tree.py" ]
[ "import numpy as np\nfrom pylab import scatter, plot, show\n\nimport dislib as ds\nfrom dislib.regression import LinearRegression\n\n\ndef main():\n \"\"\"\n Linear regression example with plot\n \"\"\"\n\n # Example data\n x = np.array([1000, 4000, 5000, 4500, 3000, 4000, 9000, 11000, 15000,\n 12000, 7000, 3000])\n y = np.array([9914, 40487, 54324, 50044, 34719, 42551, 94871, 118914,\n 158484, 131348, 78504, 36284])\n x_ds = ds.array(x[:, np.newaxis], (4, 1))\n y_ds = ds.array(y[:, np.newaxis], (4, 1))\n reg = LinearRegression()\n reg.fit(x_ds, y_ds)\n coef = reg.coef_.collect()\n intercept = reg.intercept_.collect()\n print(coef, intercept)\n\n # plot_result:\n scatter(x, y, marker='x')\n x_mesh = np.linspace(min(x), max(x), 1000)\n plot(x_mesh, [coef*x + intercept for x in x_mesh])\n show()\n\n\nif __name__ == \"__main__\":\n main()\n", "import unittest\n\nimport numpy as np\nfrom pycompss.api.api import compss_wait_on\n\nimport dislib as ds\nimport dislib.trees.decision_tree as dt\nfrom dislib.trees import RfClassifierDataset, transform_to_rf_dataset\n\n\nclass DecisionTreeTest(unittest.TestCase):\n def test_decision_tree(self):\n x1 = np.array(\n [\n [0.3, -0.3],\n [0.4, -0.5],\n [0.5, -0.4],\n [0.3, 0.3],\n [0.4, 0.5],\n [0.5, 0.4],\n [-0.3, -0.3],\n [-0.4, -0.5],\n [-0.5, -0.4],\n ]\n )\n x2 = np.array([[0.4, -0.3], [0.4, 0.3], [-0.4, -0.3]])\n y1 = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n y2 = np.array([0, 1, 2])\n\n x1_ds = ds.array(x1, (3, 2))\n x2_ds = ds.array(x2, (3, 2))\n y1_ds = ds.array(y1[:, np.newaxis], (3, 1))\n\n data1 = transform_to_rf_dataset(\n x1_ds, y1_ds, RfClassifierDataset, features_file=True\n )\n\n # Model\n try_features = 2\n max_depth = np.inf\n distr_depth = 2\n sklearn_max = 1e8\n bootstrap = True\n seed = 0\n random_state = np.random.RandomState(seed)\n n_samples, n_features = x1.shape\n n_classes = np.bincount(y1).shape[0]\n features_mmap = x1.T\n\n # Test bootstrap\n sample1, y_s1 = compss_wait_on(\n dt._sample_selection(n_samples, y1, True, seed)\n )\n sample2, y_s2 = compss_wait_on(\n dt._sample_selection(n_samples, y1, False, seed)\n )\n self.assertTrue(\n np.array_equal(sample1, np.array([0, 2, 3, 3, 3, 4, 5, 5, 7]))\n )\n self.assertTrue(\n np.array_equal(sample2, np.array([0, 1, 2, 3, 4, 5, 6, 7, 8]))\n )\n self.assertTrue(\n np.array_equal(y_s1, np.array([0, 0, 1, 1, 1, 1, 1, 1, 2]))\n )\n self.assertTrue(\n np.array_equal(y_s2, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]))\n )\n\n # Assert split wrapper\n sample, y_s = sample2, y_s2\n with self.assertRaises(ValueError):\n dt._split_node_wrapper(\n sample,\n n_features,\n y_s,\n n_classes,\n try_features,\n random_state,\n samples_file=None,\n features_file=None,\n )\n\n split = dt._split_node_wrapper(\n sample,\n n_features,\n y_s,\n n_classes,\n try_features,\n random_state,\n samples_file=data1.samples_path,\n features_file=data1.features_path,\n )\n split = compss_wait_on(split)\n node_info, left_group, y_l, right_group, y_r = split\n self.assertTrue(node_info.index in (0, 1))\n if node_info.index == 0:\n self.assertTrue(np.array_equal(left_group, np.array([6, 7, 8])))\n self.assertTrue(np.array_equal(y_l, np.array([2, 2, 2])))\n self.assertTrue(\n np.array_equal(right_group, np.array([0, 1, 2, 3, 4, 5]))\n )\n self.assertTrue(np.array_equal(y_r, np.array([0, 0, 0, 1, 1, 1])))\n self.assertAlmostEqual(node_info.value, 0.0)\n split_l = dt._compute_split(\n left_group,\n n_features,\n y_l,\n n_classes,\n try_features,\n features_mmap,\n random_state,\n )\n node_info, left_group, y_l, right_group, y_r = split_l\n self.assertTrue(np.array_equal(left_group, np.array([6, 7, 8])))\n self.assertTrue(np.array_equal(y_l, np.array([2, 2, 2])))\n self.assertTrue(np.array_equal(right_group, np.array([])))\n self.assertTrue(np.array_equal(y_r, np.array([])))\n self.assertTrue(\n np.array_equal(node_info.frequencies, np.array([0, 0, 3]))\n )\n self.assertEqual(node_info.size, 3)\n self.assertEqual(node_info.target, 2)\n elif node_info.index == 1:\n self.assertTrue(\n np.array_equal(left_group, np.array([0, 1, 2, 6, 7, 8]))\n )\n self.assertTrue(np.array_equal(y_l, np.array([0, 0, 0, 2, 2, 2])))\n self.assertTrue(np.array_equal(right_group, np.array([3, 4, 5])))\n self.assertTrue(np.array_equal(y_r, np.array([1, 1, 1])))\n self.assertAlmostEqual(node_info.value, 0.0)\n split_r = dt._compute_split(\n right_group,\n n_features,\n y_r,\n n_classes,\n try_features,\n features_mmap,\n random_state,\n )\n node_info, left_group, y_l, right_group, y_r = split_r\n self.assertTrue(np.array_equal(left_group, np.array([3, 4, 5])))\n self.assertTrue(np.array_equal(y_l, np.array([1, 1, 1])))\n self.assertTrue(np.array_equal(right_group, np.array([])))\n self.assertTrue(np.array_equal(y_r, np.array([])))\n self.assertTrue(\n np.array_equal(node_info.frequencies, np.array([0, 3, 0]))\n )\n self.assertEqual(node_info.size, 3)\n self.assertEqual(node_info.target, 1)\n\n # Test tree\n tree = dt.DecisionTreeClassifier(\n try_features,\n max_depth,\n distr_depth,\n sklearn_max,\n bootstrap,\n random_state,\n )\n tree.fit(data1)\n y_pred = compss_wait_on(tree.predict(x2_ds))\n self.assertTrue(np.array_equal(y_pred, y2))\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array" ], [ "numpy.array", "numpy.random.RandomState", "numpy.bincount", "numpy.array_equal" ] ]
JeffreyLuu/apexe3-api
[ "081da51370e8b01b1e8169e4137a99636ea71866" ]
[ "examples/python/real_time_insights.py" ]
[ "'''\n/**\n * real_time_insights.py\n * \n * Streams a real-time insights for the supplied pair\n * An example of the real-time insights is available here:\n * https://app.ae3platform.com/insights\n * \n * Disclaimer:\n * APEX:E3 is a financial technology company based in the United Kingdom https://www.apexe3.com\n * \n * None of this code constitutes financial advice. APEX:E3 is not \n * liable for any loss resulting from the use of this code or the API. \n * \n * This code is governed by The MIT License (MIT)\n * \n * Copyright (c) 2020 APEX:E3 Team\n * \n **/\n'''\nimport sys\nsys.path.append('..')\nfrom apexe3.apexe3 import initialise\nfrom apexe3.apexe3 import initialise_stream\nfrom apexe3.apexe3 import initialise_insights_for_pair\n\nimport pandas as pd\n\n#Change these values to a base or quote you are interested in\nbase = 'btc'\nquote = 'usdt'\n\ndef process_spread(event):\n print('Best spreads for ' + str(base) +' '+ str(quote))\n table=pd.DataFrame(event[\"values\"])\n table.columns = ['exchange','base','quote','misc','strSpread', 'spread']\n table = table[['exchange','spread']]\n print(table)\n print('------------------------------------------')\n\ndef process_arbitrage(event):\n print('Arbitrage Opportunity For ' + str(base) +' '+ str(quote))\n table=pd.DataFrame(event[\"values\"])\n table.columns = ['exchange','base','quote','misc','strSpread', 'spread']\n table = table[['base','quote','spread']]\n print(table)\n print('------------------------------------------') \n\ndef process_whales(event):\n print('Largest whales for ' + str(base) +' '+ str(quote))\n table=pd.DataFrame(event[\"values\"])\n table.columns = ['exchange','base','quote','misc','strSize (USD)', ' size (usd)']\n table = table[['exchange','size (usd)']]\n print(table)\n print('------------------------------------------')\n\ndef process_bid_imbalances(event):\n table=pd.DataFrame(event[\"values\"])\n print('bid imbalance for ' + str(base) +' '+ str(quote))\n table.columns = ['exchange','base','quote','misc','strStrength (USD)', 'bid imbalance']\n table = table[['exchange','bid imbalance']]\n print(table)\n print('------------------------------------------')\n\ndef process_ask_imbalances(event):\n table=pd.DataFrame(event[\"values\"])\n print('ask imbalance for ' + str(base) +' '+str(quote))\n table.columns = ['exchange','base','quote','misc','strStrength (USD)', 'ask imbalance']\n table = table[['exchange','ask imbalance']]\n print(table)\n print('------------------------------------------') \n\n\ndef init():\n with open('./../secret.txt', 'r') as f:\n clientId = f.readline().strip()\n clientSecret = f.readline().strip()\n f.close()\n emitter = initialise(clientId, clientSecret)\n emitter.on('SPREAD', process_spread)\n \n #UNCOMMENT TO RECIEVE UPDATES FOR THESE ANALYTICS\n #emitter.on('WHALE', process_whales)\n #emitter.on('VOI_BID', process_bid_imbalances)\n #emitter.on('VOI_ASK', process_ask_imbalances)\n #emitter.on('ARBITRAGE', process_arbitrage)\n\n\nif __name__ == \"__main__\":\n init()\n initialise_insights_for_pair(base, quote)" ]
[ [ "pandas.DataFrame" ] ]
Ravi-0809/question-generation
[ "9065a3b47293b8a69a0548af1f6bedd4a4aa7f9c" ]
[ "src/discriminator/instance.py" ]
[ "import sys,json,time,os\nsys.path.insert(0, \"/Users/tom/Dropbox/msc-ml/project/src/\")\nsys.path.insert(0, \"/cs/student/msc/ml/2017/thosking/dev/msc-project/src/\")\n\nimport tensorflow as tf\nimport numpy as np\n\nimport discriminator.config\nfrom discriminator.model import Model\nfrom discriminator.prepro import convert_to_features, word_tokenize\nimport helpers.loader as loader\nimport flags\n\nmem_limit=1\n\n\n# This provides a somewhat normalised interface to a pre-trained QANet model - some tweaks have been made to get it to play nicely when other models are spun up\nclass DiscriminatorInstance():\n def __init__(self, trainable=False, path=None, log_slug=None, force_init=False):\n config = tf.app.flags.FLAGS\n self.run_id = str(int(time.time())) + (\"-\"+log_slug if log_slug is not None else \"\")\n self.trainable = trainable\n self.load_from_chkpt(path, force_init)\n if trainable:\n self.summary_writer = tf.summary.FileWriter(config.log_dir+'disc/'+self.run_id, self.model.graph)\n def __del__(self):\n self.sess.close()\n\n\n def load_from_chkpt(self, path=None, force_init=False):\n\n config = tf.app.flags.FLAGS\n with open(config.disc_word_emb_file, \"r\") as fh:\n word_mat = np.array(json.load(fh), dtype=np.float32)\n with open(config.disc_char_emb_file, \"r\") as fh:\n char_mat = np.array(json.load(fh), dtype=np.float32)\n # with open(config.disc_test_meta, \"r\") as fh:\n # meta = json.load(fh)\n\n with open(config.disc_word_dictionary, \"r\") as fh:\n self.word_dictionary = json.load(fh)\n with open(config.disc_char_dictionary, \"r\") as fh:\n self.char_dictionary = json.load(fh)\n\n\n self.model = Model(config, None, word_mat, char_mat, trainable=self.trainable, demo = True, opt=False)\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_limit,allow_growth = True,visible_device_list='0')\n self.sess = tf.Session(graph=self.model.graph, config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True))\n\n with self.model.graph.as_default():\n self.saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)\n if force_init and path is not None:\n chkpt_path = tf.train.latest_checkpoint(path)\n print(\"Loading discriminator from \", chkpt_path)\n\n restore_vars= [v for v in tf.trainable_variables() if v.name[:13] != 'Output_Layer/']\n self.sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(restore_vars)\n saver.restore(self.sess, chkpt_path)\n elif path is not None:\n\n\n chkpt_path = tf.train.latest_checkpoint(path)\n print(\"Loading discriminator from \", chkpt_path)\n self.saver.restore(self.sess, chkpt_path)\n if config.disc_decay < 1.0:\n self.sess.run(self.model.assign_vars)\n else:\n\n os.makedirs(config.model_dir+'disc/'+self.run_id)\n self.sess.run(tf.global_variables_initializer())\n\n\n\n def save_to_chkpt(self, path, step):\n self.saver.save(self.sess, path+'disc/'+self.run_id+'/model.checkpoint', global_step=step)\n\n def char_pos_to_word(self, text, tokens, char_pos):\n ix=0\n for t,token in enumerate(tokens):\n # print(token, t, ix, char_pos)\n for char in token:\n ix = text.find(char, ix)\n # ix += 1\n if ix >= char_pos:\n # print(\"***\", token, char, t, ix, char_pos)\n return t\n\n def prepro(self,contexts, questions, ans_text, ans_pos):\n config = tf.app.flags.FLAGS\n\n\n # query = zip(contexts, questions)\n toks = [word_tokenize(ctxt.replace(\"''\", '\" ').replace(\"``\", '\" ').lower()) for ctxt in contexts]\n ans_tok_pos = [self.char_pos_to_word(contexts[ix].lower(), toks[ix], ans_pos[ix]) for ix in range(len(toks))]\n ans_lens = [len(word_tokenize(ans)) for ans in ans_text]\n ans_toks = [toks[ix][ans:ans+ans_lens[ix]] for ix,ans in enumerate(ans_tok_pos)]\n\n # print(ans_pos)\n # print(ans_toks)\n # print(toks)\n # exit()\n # ans_start = [toks[i].index(ans_tok[0]) for i,ans_tok in enumerate(ans_toks)]\n # ans_end = [ans_start[i] + len(ans_toks[i])-1 for i in range(len(ans_toks))]\n ans_start = ans_pos\n ans_end = [ans+ans_lens[ix]-1 for ix,ans in enumerate(ans_pos)]\n questions = [q.replace(loader.PAD,\"\").replace(loader.EOS,\"\") for q in questions]\n query = list(zip(contexts, questions))\n\n # # the QANet code has fixed batch sizes - so pad it\n # length=config.batch_size\n # if len(query) < config.batch_size:\n # length=len(query)\n # query += [[\"blank\",\"blank\"] for i in range(config.batch_size-length)]\n # ans_start += [0 for i in range(config.batch_size-length)]\n # ans_end += [0 for i in range(config.batch_size-length)]\n\n feats=[convert_to_features(config, q, self.word_dictionary, self.char_dictionary)+(ans_start[ix],ans_end[ix]) for ix,q in enumerate(query)]\n return feats\n\n def get_pred(self, contexts, questions, ans_text, ans_pos):\n length = len(contexts)\n\n feats = self.prepro(contexts,questions,ans_text,ans_pos)\n c,ch,q,qh,ans_start,ans_end = zip(*feats)\n fd = {'context:0': c,\n 'question:0': q,\n 'context_char:0': ch,\n 'question_char:0': qh,\n 'answer_index1:0': ans_start,\n 'answer_index2:0': ans_end}\n\n pred = self.sess.run(self.model.probs, feed_dict = fd)\n\n return pred[:length]\n\n def get_nll(self, contexts, questions, ans_text, ans_pos, gold_labels):\n length = len(contexts)\n\n feats = self.prepro(contexts,questions,ans_text,ans_pos)\n c,ch,q,qh,ans_start,ans_end = zip(*feats)\n fd = {'context:0': c,\n 'question:0': q,\n 'context_char:0': ch,\n 'question_char:0': qh,\n 'answer_index1:0': ans_start,\n 'answer_index2:0': ans_end,\n 'gold_class:0': gold_labels}\n\n nll = self.sess.run(self.model.nll, feed_dict = fd)\n\n return nll[:length]\n\n def train_step(self, contexts, questions, ans_text, ans_pos, gold_labels, step):\n if not self.trainable:\n exit('train_step called on non-trainable discriminator!')\n config = tf.app.flags.FLAGS\n\n length = len(contexts)\n gold_labels = gold_labels\n feats = self.prepro(contexts,questions,ans_text,ans_pos)\n c,ch,q,qh,ans_start,ans_end = zip(*feats)\n fd = {'context:0': c,\n 'question:0': q,\n 'context_char:0': ch,\n 'question_char:0': qh,\n 'answer_index1:0': ans_start,\n 'answer_index2:0': ans_end,\n 'gold_class:0': gold_labels,\n self.model.dropout: config.disc_dropout}\n\n _,summ,loss = self.sess.run([self.model.train_op, self.model.train_summary, self.model.loss], feed_dict = fd)\n\n # if step % 25 ==0:\n # print(gold_labels, questions)\n\n self.summary_writer.add_summary(summ, global_step=step)\n\n return loss\n\n\ndef main(_):\n\n from tqdm import tqdm\n import matplotlib.pyplot as plt\n import numpy as np\n from sklearn.metrics import confusion_matrix\n import itertools\n from sklearn.metrics import roc_curve, auc\n\n # squad = loader.load_squad_triples(path=\"./data/\", dev=True, v2=True, as_dict=True)\n # with open(\"./data/squad2_dev_official_output_fixed.json\") as dataset_file:\n # ans_preds = json.load(dataset_file)\n with open(\"./results/out_eval_MALUUBA-CROP-LATENT-GLOVE_test.json\") as dataset_file:\n results = json.load(dataset_file)['results']\n\n\n\n# 1535473379-MALUUBA-CROP-LATENT-GLOVE_train\n# 1535474306-MALUUBA-CROP-LATENT-GLOVE_train_QAINIT\n # disc_path = \"./models/saved/discriminator-trained\"\n disc_path = \"./models/saved2/1535473379-MALUUBA-CROP-LATENT-GLOVE_train\"\n # disc_path = \"./models/saved2/1535474306-MALUUBA-CROP-LATENT-GLOVE_train_QAINIT\"\n\n\n disc = DiscriminatorInstance(path=disc_path)\n # disc = DiscriminatorInstance(path=\"./models/disc/1533307366-SQUAD-QANETINIT\")\n\n # output={}\n # for id,candidates in tqdm(ans_preds.items()):\n # ctxt, q, ans_gold, ans_gold_pos, label_gold = squad[id]\n #\n # scores=[]\n # for candidate in candidates:\n # scores.append( disc.get_pred([ctxt], [q], [candidate['text']], [candidate['answer_start']]).tolist()[0] )\n # cand_ix = np.argmax(scores)\n #\n # pred_ans = candidates[cand_ix]['text']\n # pred_score = scores[cand_ix]\n # output[id] = pred_ans if pred_score > 0.5 else \"\"\n #\n # with open(\"./logs/squad2_dev_filtered.json\",\"w\") as fh:\n # json.dump(output, fh)\n\n gold_labels=[]\n pred_labels=[]\n scores=[]\n\n for res in tqdm(results[:3000]):\n # print(res['q_gold'], res['q_pred'])\n gold_score = disc.get_pred([res['c']], [res['q_gold']],[res['a_text']],[res['a_pos']])\n pred_score = disc.get_pred([res['c']], [res['q_pred']],[res['a_text']],[res['a_pos']])\n\n\n gold_labels.append(1)\n gold_labels.append(0)\n pred_labels.append(1.0 * (gold_score[0] > 0.5))\n pred_labels.append(1.0 * (pred_score[0] > 0.5))\n scores.append(gold_score[0])\n scores.append(pred_score[0])\n\n\n print(disc_path)\n print(\"Acc: \", np.mean(np.equal(gold_labels, pred_labels)))\n\n\n\n # oh_labels =np.eye(2)[gold_labels]\n ### disc conf mat\n # gold_labels =['Generated' if l==0 else 'Ground truth' for l in gold_labels]\n # pred_labels =['Generated' if l==0 else 'Ground truth' for l in pred_labels]\n plt.figure(figsize=(4.5,3.5))\n cm = confusion_matrix(gold_labels, pred_labels)\n mat = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(mat)\n plt.imshow(mat, cmap=plt.cm.Blues)\n plt.colorbar()\n tick_marks = np.arange(2)\n plt.xticks(tick_marks,['Generated','Ground truth'], rotation=0)\n plt.yticks(tick_marks, ['Generated','Ground truth'], rotation=90)\n fmt = '.2f'\n thresh = mat.max() / 2.\n for i, j in itertools.product(range(mat.shape[0]), range(mat.shape[1])):\n plt.text(j, i, format(mat[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if mat[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('Actual Source', fontsize=14)\n plt.xlabel('Predicted source', fontsize=14)\n plt.savefig(\"/users/Tom/Dropbox/Apps/Overleaf/Question Generation/figures/disc_cm_latent.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n # plt.show()\n # exit()\n\n\n\n ### disc Roc curves\n fpr, tpr, _ = roc_curve(gold_labels, scores)\n roc_auc = auc(fpr, tpr)\n plt.figure(figsize=(4.5,3.5))\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Discriminator RoC curve')\n plt.legend(loc=\"lower right\")\n plt.savefig(\"/users/Tom/Dropbox/Apps/Overleaf/Question Generation/figures/disc_roc_latent.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n plt.show()\n exit()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.ylabel", "tensorflow.GPUOptions", "matplotlib.pyplot.plot", "tensorflow.summary.FileWriter", "matplotlib.pyplot.xticks", "sklearn.metrics.roc_curve", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "sklearn.metrics.auc", "tensorflow.global_variables_initializer", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "tensorflow.app.run", "numpy.equal", "sklearn.metrics.confusion_matrix", "numpy.arange", "matplotlib.pyplot.ylim", "tensorflow.train.Saver", "matplotlib.pyplot.colorbar", "tensorflow.ConfigProto", "matplotlib.pyplot.legend", "tensorflow.trainable_variables", "tensorflow.train.latest_checkpoint", "matplotlib.pyplot.show", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel" ] ]
ProhardONE/python_primer
[ "211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0" ]
[ "ch_5/plot_w.py" ]
[ "# Exercise 5.35\n# Author: Noah Waterfield Price\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef f(x):\n r = np.zeros(len(x))\n r[x < 0] = -x[x < 0] - 5\n r[x >= 0] = x[x >= 0] - 5\n return abs(r)\n\nx = np.linspace(-10, 10, 101)\nplt.plot(x, f(x))\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "numpy.linspace" ] ]
KevinMMendez/cimcb_lite
[ "1e6cf7137cd04d6be4ad1ba6fd317077ace08ee8" ]
[ "cimcb_lite/model/BaseModel.py" ]
[ "from abc import ABC, abstractmethod, abstractproperty\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom bokeh.layouts import widgetbox, gridplot, column, row, layout\nfrom bokeh.models import HoverTool, Band\nfrom bokeh.models.widgets import DataTable, Div, TableColumn\nfrom bokeh.models.annotations import Title\nfrom bokeh.plotting import ColumnDataSource, figure, output_notebook, show\nfrom scipy import interp\nfrom sklearn import metrics\nfrom sklearn.utils import resample\nfrom ..bootstrap import Perc, BC, BCA\nfrom ..plot import scatter, scatterCI, boxplot, distribution, permutation_test, roc_calculate, roc_plot\nfrom ..utils import binary_metrics\n\n\nclass BaseModel(ABC):\n \"\"\"Base class for models: PLS_SIMPLS.\"\"\"\n\n @abstractmethod\n def __init__(self):\n pass\n\n @abstractmethod\n def train(self):\n \"\"\"Trains the model.\"\"\"\n pass\n\n @abstractmethod\n def test(self):\n \"\"\"Tests the model.\"\"\"\n pass\n\n @abstractproperty\n def bootlist(self):\n \"\"\"A list of attributes for bootstrap resampling.\"\"\"\n pass\n\n def evaluate(self, testset=None, specificity=False, cutoffscore=False, bootnum=1000):\n \"\"\"Plots a figure containing a Violin plot, Distribution plot, ROC plot and Binary Metrics statistics.\n\n Parameters\n ----------\n testset : array-like, shape = [n_samples, 2] or None, (default None)\n If testset is None, use train Y and train Y predicted for evaluate. Alternatively, testset is used to evaluate model in the format [Ytest, Ypred].\n\n specificity : number or False, (default False)\n Use the specificity to draw error bar. When False, use the cutoff score of 0.5.\n\n cutoffscore : number or False, (default False)\n Use the cutoff score to draw error bar. When False, use the specificity selected.\n\n bootnum : a positive integer, (default 1000)\n The number of bootstrap samples used in the computation.\n \"\"\"\n Ytrue_train = self.Y\n Yscore_train = self.Y_pred.flatten()\n\n # Get Ytrue_test, Yscore_test from testset\n if testset is not None:\n Ytrue_test = np.array(testset[0])\n Yscore_test = np.array(testset[1])\n\n # Error checking\n if len(Ytrue_test) != len(Yscore_test):\n raise ValueError(\"evaluate can't be used as length of Ytrue does not match length of Yscore in test set.\")\n if len(np.unique(Ytrue_test)) != 2:\n raise ValueError(\"Ytrue_test needs to have 2 groups. There is {}\".format(len(np.unique(Y))))\n if np.sort(np.unique(Ytrue_test))[0] != 0:\n raise ValueError(\"Ytrue_test should only contain 0s and 1s.\")\n if np.sort(np.unique(Ytrue_test))[1] != 1:\n raise ValueError(\"Ytrue_test should only contain 0s and 1s.\")\n\n # Get Yscore_combined and Ytrue_combined_name (Labeled Ytrue)\n Yscore_combined = np.concatenate([Yscore_train, Yscore_test])\n Ytrue_combined = np.concatenate([Ytrue_train, Ytrue_test + 2]) # Each Ytrue per group is unique\n Ytrue_combined_name = Ytrue_combined.astype(np.str)\n Ytrue_combined_name[Ytrue_combined == 0] = \"Train (0)\"\n Ytrue_combined_name[Ytrue_combined == 1] = \"Train (1)\"\n Ytrue_combined_name[Ytrue_combined == 2] = \"Test (0)\"\n Ytrue_combined_name[Ytrue_combined == 3] = \"Test (1)\"\n\n # Expliclity states which metric and value is used for the error_bar\n if specificity is not False:\n metric = \"specificity\"\n val = specificity\n elif cutoffscore is not False:\n metric = \"cutoffscore\"\n val = cutoffscore\n else:\n metric = \"specificity\"\n val = 0.8\n\n # ROC plot\n tpr, fpr, tpr_ci, stats, stats_bootci = roc_calculate(Ytrue_train, Yscore_train, bootnum=100, metric=metric, val=val)\n roc_title = \"Specificity: {}\".format(np.round(stats[\"val_specificity\"], 2))\n roc_bokeh = roc_plot(tpr, fpr, tpr_ci, width=320, height=315, title=roc_title, errorbar=stats[\"val_specificity\"])\n if testset is not None:\n fpr_test, tpr_test, threshold_test = metrics.roc_curve(Ytrue_test, Yscore_test, pos_label=1, drop_intermediate=False)\n fpr_test = np.insert(fpr_test, 0, 0)\n tpr_test = np.insert(tpr_test, 0, 0)\n roc_bokeh.line(fpr_test, tpr_test, color=\"red\", line_width=3.5, alpha=0.6, legend=\"ROC Curve (Test)\") # Add ROC Curve(Test) to roc_bokeh\n\n # Violin plot\n violin_title = \"Cut-off: {}\".format(np.round(stats[\"val_cutoffscore\"], 2))\n if testset is None:\n violin_bokeh = boxplot(Yscore_train, Ytrue_train, xlabel=\"Class\", ylabel=\"Predicted Score\", violin=True, color=[\"#FFCCCC\", \"#CCE5FF\"], width=320, height=315, title=violin_title, font_size=\"11pt\")\n else:\n violin_bokeh = boxplot(Yscore_combined, Ytrue_combined_name, xlabel=\"Class\", ylabel=\"Predicted Score\", violin=True, color=[\"#fcaeae\", \"#aed3f9\", \"#FFCCCC\", \"#CCE5FF\"], width=320, height=315, group_name=[\"Train (0)\", \"Test (0)\", \"Train (1)\", \"Test (1)\"], group_name_sort=[\"Test (0)\", \"Test (1)\", \"Train (0)\", \"Train (1)\"], title=violin_title, font_size=\"11pt\")\n violin_bokeh.multi_line([[-100, 100]], [[stats[\"val_cutoffscore\"], stats[\"val_cutoffscore\"]]], line_color=\"black\", line_width=2, line_alpha=1.0, line_dash=\"dashed\")\n\n # Distribution plot\n if testset is None:\n dist_bokeh = distribution(Yscore_train, group=Ytrue_train, kde=True, title=\"\", xlabel=\"Predicted Score\", ylabel=\"p.d.f.\", width=320, height=315)\n else:\n dist_bokeh = distribution(Yscore_combined, group=Ytrue_combined_name, kde=True, title=\"\", xlabel=\"Predicted Score\", ylabel=\"p.d.f.\", width=320, height=315)\n dist_bokeh.multi_line([[stats[\"val_cutoffscore\"], stats[\"val_cutoffscore\"]]], [[-100, 100]], line_color=\"black\", line_width=2, line_alpha=1.0, line_dash=\"dashed\")\n\n # Man-Whitney U for Table (round and use scienitic notation if p-value > 0.001)\n manw_pval = scipy.stats.mannwhitneyu(Yscore_train[Ytrue_train == 0], Yscore_train[Ytrue_train == 1], alternative=\"two-sided\")[1]\n if manw_pval > 0.001:\n manw_pval_round = \"%0.2f\" % manw_pval\n else:\n manw_pval_round = \"%0.2e\" % manw_pval\n if testset is not None:\n testmanw_pval = scipy.stats.mannwhitneyu(Yscore_test[Ytrue_test == 0], Yscore_test[Ytrue_test == 1], alternative=\"two-sided\")[1]\n if testmanw_pval > 0.001:\n testmanw_pval_round = \"%0.2f\" % testmanw_pval\n else:\n testmanw_pval_round = \"%0.2e\" % testmanw_pval\n\n # Create a stats table for test\n if testset is not None:\n teststats = binary_metrics(Ytrue_test, Yscore_test, cut_off=stats[\"val_cutoffscore\"])\n teststats_round = {}\n for i in teststats.keys():\n teststats_round[i] = np.round(teststats[i], 2)\n\n # Round stats, and stats_bootci for Table\n stats_round = {}\n for i in stats.keys():\n stats_round[i] = np.round(stats[i], 2)\n bootci_round = {}\n for i in stats_bootci.keys():\n bootci_round[i] = np.round(stats_bootci[i], 2)\n\n # Create table\n tabledata = dict(\n evaluate=[[\"Train\"]],\n manw_pval=[[\"{}\".format(manw_pval_round)]],\n auc=[[\"{} ({}, {})\".format(stats_round[\"AUC\"], bootci_round[\"AUC\"][0], bootci_round[\"AUC\"][1])]],\n accuracy=[[\"{} ({}, {})\".format(stats_round[\"ACCURACY\"], bootci_round[\"ACCURACY\"][0], bootci_round[\"ACCURACY\"][1])]],\n precision=[[\"{} ({}, {})\".format(stats_round[\"PRECISION\"], bootci_round[\"PRECISION\"][0], bootci_round[\"PRECISION\"][1])]],\n sensitivity=[[\"{} ({}, {})\".format(stats_round[\"SENSITIVITY\"], bootci_round[\"SENSITIVITY\"][0], bootci_round[\"SENSITIVITY\"][1])]],\n specificity=[[\"{} ({}, {})\".format(stats_round[\"SPECIFICITY\"], bootci_round[\"SPECIFICITY\"][0], bootci_round[\"SPECIFICITY\"][1])]],\n F1score=[[\"{} ({}, {})\".format(stats_round[\"F1-SCORE\"], bootci_round[\"F1-SCORE\"][0], bootci_round[\"F1-SCORE\"][1])]],\n R2=[[\"{} ({}, {})\".format(stats_round[\"R²\"], bootci_round[\"R²\"][0], bootci_round[\"R²\"][1])]],\n )\n\n # Append test data\n if testset is not None:\n tabledata[\"evaluate\"].append([\"Test\"])\n tabledata[\"manw_pval\"].append([testmanw_pval_round])\n tabledata[\"auc\"].append([teststats_round[\"AUC\"]])\n tabledata[\"accuracy\"].append([teststats_round[\"ACCURACY\"]])\n tabledata[\"precision\"].append([teststats_round[\"PRECISION\"]])\n tabledata[\"sensitivity\"].append([teststats_round[\"SENSITIVITY\"]])\n tabledata[\"specificity\"].append([teststats_round[\"SPECIFICITY\"]])\n tabledata[\"F1score\"].append([teststats_round[\"F1-SCORE\"]])\n tabledata[\"R2\"].append([teststats_round[\"R²\"]])\n\n # Plot table\n source = ColumnDataSource(data=tabledata)\n columns = [TableColumn(field=\"evaluate\", title=\"Evaluate\"), TableColumn(field=\"manw_pval\", title=\"MW-U Pvalue\"), TableColumn(field=\"R2\", title=\"R2\"), TableColumn(field=\"auc\", title=\"AUC\"), TableColumn(field=\"accuracy\", title=\"Accuracy\"), TableColumn(field=\"precision\", title=\"Precision\"), TableColumn(field=\"sensitivity\", title=\"Sensitivity\"), TableColumn(field=\"F1score\", title=\"F1score\")]\n table_bokeh = widgetbox(DataTable(source=source, columns=columns, width=950, height=90), width=950, height=80)\n\n # Title\n if specificity is not False:\n title = \"Specificity fixed to: {}\".format(np.round(val, 2))\n elif cutoffscore is not False:\n title = \"Score cut-off fixed to: {}\".format(np.round(val, 2))\n else:\n title = \"Specificity fixed to: {}\".format(np.round(val, 2))\n title_bokeh = \"<h3>{}</h3>\".format(title)\n\n # Combine table, violin plot and roc plot into one figure\n fig = layout([[violin_bokeh, dist_bokeh, roc_bokeh], [table_bokeh]], toolbar_location=\"right\")\n output_notebook()\n show(column(Div(text=title_bokeh, width=900, height=50), fig))\n\n def calc_bootci(self, bootnum=100, type=\"bca\"):\n \"\"\"Calculates bootstrap confidence intervals based on bootlist.\n\n Parameters\n ----------\n bootnum : a positive integer, (default 100)\n The number of bootstrap samples used in the computation.\n\n type : 'bc', 'bca', 'perc', (default 'bca')\n Methods for bootstrap confidence intervals. 'bc' is bias-corrected bootstrap confidence intervals. 'bca' is bias-corrected and accelerated bootstrap confidence intervals. 'perc' is percentile confidence intervals.\n \"\"\"\n bootlist = self.bootlist\n if type is \"bca\":\n boot = BCA(self, self.X, self.Y, self.bootlist, bootnum=bootnum)\n if type is \"bc\":\n boot = BC(self, self.X, self.Y, self.bootlist, bootnum=bootnum)\n if type is \"perc\":\n boot = Perc(self, self.X, self.Y, self.bootlist, bootnum=bootnum)\n self.bootci = boot.run()\n\n def plot_featureimportance(self, PeakTable, peaklist=None, ylabel=\"Label\", sort=True):\n \"\"\"Plots feature importance metrics.\n\n Parameters\n ----------\n PeakTable : DataFrame\n Peak sheet with the required columns.\n\n peaklist : list or None, (default None)\n Peaks to include in plot (the default is to include all samples).\n\n ylabel : string, (default \"Label\")\n Name of column in PeakTable to use as the ylabel.\n\n sort : boolean, (default True)\n Whether to sort plots in absolute descending order.\n\n Returns\n -------\n Peaksheet : DataFrame\n New PeakTable with added \"Coef\" and \"VIP\" columns (+ \"Coef-95CI\" and \"VIP-95CI\" if calc_bootci is used prior to plot_featureimportance).\n \"\"\"\n if not hasattr(self, \"bootci\"):\n print(\"Use method calc_bootci prior to plot_featureimportance to add 95% confidence intervals to plots.\")\n ci_coef = None\n ci_vip = None\n else:\n ci_coef = self.bootci[\"model.coef_\"]\n ci_vip = self.bootci[\"model.vip_\"]\n\n # Remove rows from PeakTable if not in peaklist\n if peaklist is not None:\n PeakTable = PeakTable[PeakTable[\"Name\"].isin(peaklist)]\n peaklabel = PeakTable[ylabel]\n peaklabel = peaklabel.apply(str)\n\n # Plot\n fig_1 = scatterCI(self.model.coef_, ci=ci_coef, label=peaklabel, hoverlabel=PeakTable[[\"Idx\", \"Name\", \"Label\"]], hline=0, col_hline=True, title=\"Coefficient Plot\", sort_abs=sort)\n fig_2 = scatterCI(self.model.vip_, ci=ci_vip, label=peaklabel, hoverlabel=PeakTable[[\"Idx\", \"Name\", \"Label\"]], hline=1, col_hline=False, title=\"Variable Importance in Projection (VIP)\", sort_abs=sort)\n fig = layout([[fig_1], [fig_2]])\n output_notebook()\n show(fig)\n\n # Return table with: Idx, Name, Label, Coefficient, 95CI, VIP, 95CI\n if not hasattr(self, \"bootci\"):\n coef = pd.DataFrame([self.model.coef_]).T\n coef.rename(columns={0: \"Coef\"}, inplace=True)\n vip = pd.DataFrame([self.model.vip_]).T\n vip.rename(columns={0: \"VIP\"}, inplace=True)\n else:\n coef = pd.DataFrame([self.model.coef_, self.bootci[\"model.coef_\"]]).T\n coef.rename(columns={0: \"Coef\", 1: \"Coef-95CI\"}, inplace=True)\n vip = pd.DataFrame([self.model.vip_, self.bootci[\"model.vip_\"]]).T\n vip.rename(columns={0: \"VIP\", 1: \"VIP-95CI\"}, inplace=True)\n\n Peaksheet = PeakTable.copy()\n Peaksheet[\"Coef\"] = coef[\"Coef\"].values\n Peaksheet[\"VIP\"] = vip[\"VIP\"].values\n if hasattr(self, \"bootci\"):\n Peaksheet[\"Coef-95CI\"] = coef[\"Coef-95CI\"].values\n Peaksheet[\"VIP-95CI\"] = vip[\"VIP-95CI\"].values\n return Peaksheet\n\n def permutation_test(self, nperm=100):\n \"\"\"Plots permutation test figures.\n\n Parameters\n ----------\n nperm : positive integer, (default 100)\n Number of permutations.\n \"\"\"\n fig = permutation_test(self, self.X, self.Y, nperm=nperm)\n output_notebook()\n show(fig)\n" ]
[ [ "sklearn.metrics.roc_curve", "pandas.DataFrame", "numpy.insert", "scipy.stats.mannwhitneyu", "numpy.array", "numpy.concatenate", "numpy.round", "numpy.unique" ] ]
qzchenwl/tensorboard
[ "e59ca8d45746f459d797f4e69377eda4433e1624" ]
[ "tensorboard/util/test_util.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TensorBoard testing helper routine module.\n\nThis module is basically a dumpster for really generic succinct helper\nroutines that exist solely for test code.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\nimport unittest\n\nimport tensorflow as tf\n\n# See discussion on issue #1996 for private module import justification.\nfrom tensorflow.python import tf2 as tensorflow_python_tf2\n\nfrom tensorboard.compat.proto import event_pb2\nfrom tensorboard.compat.proto import graph_pb2\nfrom tensorboard.compat.proto import meta_graph_pb2\nfrom tensorboard.compat.proto import summary_pb2\nfrom tensorboard.util import tb_logging\n\nlogger = tb_logging.get_logger()\n\n\nclass FileWriter(tf.compat.v1.summary.FileWriter):\n \"\"\"FileWriter for test.\n\n TensorFlow FileWriter uses TensorFlow's Protobuf Python binding\n which is largely discouraged in TensorBoard. We do not want a\n TB.Writer but require one for testing in integrational style\n (writing out event files and use the real event readers).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Briefly enter graph mode context so this testing FileWriter can be\n # created from an eager mode context without triggering a usage error.\n with tf.compat.v1.Graph().as_default():\n super(FileWriter, self).__init__(*args, **kwargs)\n\n def add_test_summary(self, tag, simple_value=1.0, step=None):\n \"\"\"Convenience for writing a simple summary for a given tag.\"\"\"\n value = summary_pb2.Summary.Value(tag=tag, simple_value=simple_value)\n summary = summary_pb2.Summary(value=[value])\n self.add_summary(summary, global_step=step)\n\n def add_event(self, event):\n if isinstance(event, event_pb2.Event):\n tf_event = tf.compat.v1.Event.FromString(event.SerializeToString())\n else:\n logger.warn(\n \"Added TensorFlow event proto. \"\n \"Please prefer TensorBoard copy of the proto\"\n )\n tf_event = event\n super(FileWriter, self).add_event(tf_event)\n\n def add_summary(self, summary, global_step=None):\n if isinstance(summary, summary_pb2.Summary):\n tf_summary = tf.compat.v1.Summary.FromString(\n summary.SerializeToString()\n )\n else:\n logger.warn(\n \"Added TensorFlow summary proto. \"\n \"Please prefer TensorBoard copy of the proto\"\n )\n tf_summary = summary\n super(FileWriter, self).add_summary(tf_summary, global_step)\n\n def add_session_log(self, session_log, global_step=None):\n if isinstance(session_log, event_pb2.SessionLog):\n tf_session_log = tf.compat.v1.SessionLog.FromString(\n session_log.SerializeToString()\n )\n else:\n logger.warn(\n \"Added TensorFlow session_log proto. \"\n \"Please prefer TensorBoard copy of the proto\"\n )\n tf_session_log = session_log\n super(FileWriter, self).add_session_log(tf_session_log, global_step)\n\n def add_graph(self, graph, global_step=None, graph_def=None):\n if isinstance(graph_def, graph_pb2.GraphDef):\n tf_graph_def = tf.compat.v1.GraphDef.FromString(\n graph_def.SerializeToString()\n )\n else:\n tf_graph_def = graph_def\n\n super(FileWriter, self).add_graph(\n graph, global_step=global_step, graph_def=tf_graph_def\n )\n\n def add_meta_graph(self, meta_graph_def, global_step=None):\n if isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):\n tf_meta_graph_def = tf.compat.v1.MetaGraphDef.FromString(\n meta_graph_def.SerializeToString()\n )\n else:\n tf_meta_graph_def = meta_graph_def\n\n super(FileWriter, self).add_meta_graph(\n meta_graph_def=tf_meta_graph_def, global_step=global_step\n )\n\n\nclass FileWriterCache(object):\n \"\"\"Cache for TensorBoard test file writers.\"\"\"\n\n # Cache, keyed by directory.\n _cache = {}\n\n # Lock protecting _FILE_WRITERS.\n _lock = threading.RLock()\n\n @staticmethod\n def get(logdir):\n \"\"\"Returns the FileWriter for the specified directory.\n\n Args:\n logdir: str, name of the directory.\n\n Returns:\n A `FileWriter`.\n \"\"\"\n with FileWriterCache._lock:\n if logdir not in FileWriterCache._cache:\n FileWriterCache._cache[logdir] = FileWriter(\n logdir, graph=tf.compat.v1.get_default_graph()\n )\n return FileWriterCache._cache[logdir]\n\n\nclass FakeTime(object):\n \"\"\"Thread-safe fake replacement for the `time` module.\"\"\"\n\n def __init__(self, current=0.0):\n self._time = float(current)\n self._lock = threading.Lock()\n\n def time(self):\n with self._lock:\n return self._time\n\n def sleep(self, secs):\n with self._lock:\n self._time += secs\n\n\ndef ensure_tb_summary_proto(summary):\n \"\"\"Ensures summary is TensorBoard Summary proto.\n\n TB v1 summary API returns TF Summary proto. To make test for v1 and\n v2 API congruent, one can use this API to convert result of v1 API\n to TB Summary proto.\n \"\"\"\n if isinstance(summary, summary_pb2.Summary):\n return summary\n\n return summary_pb2.Summary.FromString(summary.SerializeToString())\n\n\ndef _run_conditionally(guard, name, default_reason=None):\n \"\"\"Create a decorator factory that skips a test when guard returns False.\n\n The factory raises ValueError when default_reason is None and reason is not\n passed to the factory.\n\n Args:\n guard: A lambda that returns True if a test should be executed.\n name: A human readable name for the decorator for an error message.\n default_reason: A string describing why a test should be skipped. If it\n is None, the decorator will make sure the reason is supplied by the\n consumer of the decorator. Default is None.\n\n Raises:\n ValueError when both reason and default_reason are None.\n\n Returns:\n A function that returns a decorator.\n \"\"\"\n\n def _impl(reason=None):\n if reason is None:\n if default_reason is None:\n raise ValueError(\"%s requires a reason for skipping.\" % name)\n reason = default_reason\n return unittest.skipUnless(guard(), reason)\n\n return _impl\n\n\nrun_v1_only = _run_conditionally(\n lambda: not tensorflow_python_tf2.enabled(), name=\"run_v1_only\"\n)\nrun_v2_only = _run_conditionally(\n lambda: tensorflow_python_tf2.enabled(),\n name=\"run_v2_only\",\n default_reason=\"Test only appropriate for TensorFlow v2\",\n)\n" ]
[ [ "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.Graph", "tensorflow.python.tf2.enabled" ] ]
DragonMyth/MyDartEnv
[ "5a5c40d0104e22e0493f823c41734776fb2e6790" ]
[ "gym/envs/dart/flatworm_swim_straight_reduced.py" ]
[ "import numpy as np\nfrom gym import utils\nfrom gym.envs.dart import dart_env\nfrom .simple_water_world import BaseFluidSimulator\n\n\nclass DartFlatwormSwimStraightReducedEnv(dart_env.DartEnv, utils.EzPickle):\n def __init__(self):\n control_bounds = np.array([[1.0] * 12, [-1.0] * 12])\n self.action_scale = np.array([5*np.pi,5*np.pi,3*np.pi,3*np.pi,np.pi,np.pi]*2)\n self.frame_skip = 5\n dart_env.DartEnv.__init__(self, 'flatworm_reduced.skel', self.frame_skip, 53, control_bounds, dt=0.002,\n disableViewer=not True,\n custom_world=BaseFluidSimulator)\n utils.EzPickle.__init__(self)\n\n self.bodynodes_dict = self.construct_skel_dict()\n\n self.init_state = self._get_obs()\n self.original_com = self.robot_skeleton.C\n self.original_q = self.robot_skeleton.q\n\n num_of_dofs = len(self.robot_skeleton.dofs) - len(self.robot_skeleton.joints[0].dofs)\n\n self.simulation_dt = self.dt * 1.0 / self.frame_skip\n self.Kp = np.diagflat([0.0] * len(self.robot_skeleton.joints[0].dofs) + [4000.0] * num_of_dofs)\n # self.Kd = 150 * self.simulation_dt * self.Kp\n self.Kd = self.simulation_dt * self.Kp\n\n self.invM = np.linalg.inv(self.robot_skeleton.M + self.Kd * self.simulation_dt)\n # self.symm_rate = -1 * np.array([1, 1, 0.01, 0.01])\n\n def _step(self, a):\n old_com = self.robot_skeleton.C\n old_q = self.robot_skeleton.q\n old_dq = self.robot_skeleton.dq\n\n\n\n target_pos = self.build_target_pos(a)\n ##SPD Controller\n # for i in range(self.frame_skip):\n # invM = self.invM\n # p = -self.Kp.dot(self.robot_skeleton.q + self.robot_skeleton.dq * self.simulation_dt - target_pos)\n # d = -self.Kd.dot(self.robot_skeleton.dq)\n # qddot = invM.dot(-self.robot_skeleton.c + p + d + self.robot_skeleton.constraint_forces())\n # tau = p + d - self.Kd.dot(qddot) * self.simulation_dt\n # # tau *= 0.0005\n # tau[0:len(self.robot_skeleton.joints[0].dofs)] = 0\n # self.do_simulation(tau, 1)\n\n invM = self.invM\n p = -self.Kp.dot(self.robot_skeleton.q + self.robot_skeleton.dq * self.simulation_dt - target_pos)\n d = -self.Kd.dot(self.robot_skeleton.dq)\n qddot = invM.dot(-self.robot_skeleton.c + p + d + self.robot_skeleton.constraint_forces())\n tau = p + d - self.Kd.dot(qddot) * self.simulation_dt\n tau *= 0.001\n tau[0:len(self.robot_skeleton.joints[0].dofs)] = 0\n self.do_simulation(tau, self.frame_skip)\n cur_com = self.robot_skeleton.C\n cur_q = self.robot_skeleton.q\n cur_dq = self.robot_skeleton.dq\n ob = self._get_obs()\n\n angs = np.abs(self.robot_skeleton.q[6::])\n\n horizontal_pos_rwd = (cur_com[0] - old_com[0]) * 500\n horizontal_vel_rwd = 0 # 3*cur_dq[3]\n orth_pen = 0.5 * (np.abs(cur_com[1] - self.original_com[1]) + np.abs(cur_com[2] - self.original_com[2]))\n rotate_pen = np.sum(np.abs(cur_q[:3] - self.original_q[:3]))\n\n energy_consumed_pen = 0.1 * np.sum(tau[6::] * old_dq[6::] * self.frame_skip)\n # mirror_enforce\n reward = 1 + horizontal_pos_rwd + horizontal_vel_rwd - rotate_pen - orth_pen - energy_consumed_pen\n\n notdone = np.isfinite(ob[5::]).all() and (np.abs(angs) < np.pi / 2.0).all()\n done = not notdone\n\n return ob, reward, done, {'rwd': reward, 'horizontal_pos_rwd': horizontal_pos_rwd,\n 'horizontal_vel_rwd': horizontal_vel_rwd,\n 'rotate_pen': -rotate_pen, 'orth_pen': -orth_pen, 'energy_consumed_pen':energy_consumed_pen,'tau':tau[6::]}\n\n def _get_obs(self):\n\n return np.concatenate([self.robot_skeleton.q[4:6], self.robot_skeleton.dq[3:6], self.robot_skeleton.q[6::],\n self.robot_skeleton.dq[6::]]).ravel()\n\n def reset_model(self):\n self.dart_world.reset()\n qpos = self.robot_skeleton.q + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)\n qvel = self.robot_skeleton.dq + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self._get_viewer().scene.tb.trans[2] = -3.5\n self._get_viewer().scene.tb._set_theta(-60)\n self.track_skeleton_id = 0\n\n def do_simulation(self, tau, n_frames):\n for _ in range(n_frames):\n comb = []\n import itertools\n for i in itertools.product(['l', 'r'], [1, 2, 3]):\n comb.append(i)\n for segIdx in range(1):\n for side, idx in comb:\n offset1_dir = np.array([-1, 0, 0])\n offset2_dir = np.array([1, 0, 0])\n curr_key = 'wing_' + str(side) + '_' + str(segIdx) + str(idx)\n next_key = 'wing_' + str(side) + '_' + str(segIdx + 1) + str(idx)\n curr_body = self.bodynodes_dict[curr_key]\n next_body = self.bodynodes_dict[next_key]\n\n constraint_force, offset1, offset2 = self.calc_constraint_force(curr_body, offset1_dir, next_body,\n offset2_dir, strength=6)\n\n curr_body.add_ext_force(constraint_force, _offset=offset1)\n next_body.add_ext_force(-constraint_force, _offset=offset2)\n\n super(DartFlatwormSwimStraightReducedEnv,self).do_simulation(tau,1)\n\n\n\n\n def calc_constraint_force(self, bodynode1, offset1_dir, bodynode2, offset2_dir, strength=1.0):\n shape1 = bodynode1.shapenodes[0]\n body1_geometry = shape1.shape.size()\n shape2 = bodynode2.shapenodes[0]\n body2_geometry = shape2.shape.size()\n\n offset1 = offset1_dir * body1_geometry / 2\n offset2 = offset2_dir * body2_geometry / 2\n\n body1_link_pos_to_world = bodynode1.to_world(offset1)\n body2_link_pos_to_world = bodynode2.to_world(offset2)\n constraint_force_dir = body2_link_pos_to_world - body1_link_pos_to_world\n constraint_force = constraint_force_dir * strength\n return constraint_force, offset1, offset2\n\n\n def construct_skel_dict(self):\n node_dict = {}\n bodynodes = self.robot_skeleton.bodynodes\n for i in range(len(bodynodes)):\n node_dict[bodynodes[i].name] = bodynodes[i]\n return node_dict\n\n def build_target_pos(self,a):\n target_pos = np.zeros(24)\n a = a*self.action_scale\n target_pos[0:6] = a[0:6]\n target_pos[6:12]= a[6:12]\n\n return np.concatenate(([0.0] * 6, target_pos))\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.linalg.inv", "numpy.abs", "numpy.array", "numpy.concatenate", "numpy.isfinite" ] ]
cysmnl/geometric_cognition
[ "473c0cf585aaf49904bfb87c35ea706e12f67f8a" ]
[ "torch_geometric/read/planetoid.py" ]
[ "import sys\nimport os.path as osp\nfrom itertools import repeat\n\nimport torch\nfrom torch_sparse import coalesce\nfrom torch_geometric.data import Data\nfrom torch_geometric.read import read_txt_array\nfrom torch_geometric.utils import remove_self_loops\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\n\ndef read_planetoid_data(folder, prefix):\n \"\"\"Reads the planetoid data format.\n ind.{}.x\n \"\"\"\n names = ['x', 'tx', 'allx', 'y', 'ty', 'ally', 'graph', 'test.index']\n items = [read_file(folder, prefix, name) for name in names]\n x, tx, allx, y, ty, ally, graph, test_index = items\n train_index = torch.arange(y.size(0), dtype=torch.long)\n val_index = torch.arange(y.size(0), y.size(0) + 500, dtype=torch.long)\n sorted_test_index = test_index.sort()[0]\n\n if prefix.lower() == 'citeseer':\n # There are some isolated nodes in the Citeseer graph, resulting in\n # none consecutive test indices. We need to identify them and add them\n # as zero vectors to `tx` and `ty`.\n len_test_indices = (test_index.max() - test_index.min()).item() + 1\n\n tx_ext = torch.zeros(len_test_indices, tx.size(1))\n tx_ext[sorted_test_index - test_index.min(), :] = tx\n ty_ext = torch.zeros(len_test_indices, ty.size(1))\n ty_ext[sorted_test_index - test_index.min(), :] = ty\n\n tx, ty = tx_ext, ty_ext\n\n x = torch.cat([allx, tx], dim=0)\n y = torch.cat([ally, ty], dim=0).max(dim=1)[1]\n\n x[test_index] = x[sorted_test_index]\n y[test_index] = y[sorted_test_index]\n\n train_mask = sample_mask(train_index, num_nodes=y.size(0))\n val_mask = sample_mask(val_index, num_nodes=y.size(0))\n test_mask = sample_mask(test_index, num_nodes=y.size(0))\n\n edge_index = edge_index_from_dict(graph, num_nodes=y.size(0))\n\n data = Data(x=x, edge_index=edge_index, y=y)\n data.train_mask = train_mask\n data.val_mask = val_mask\n data.test_mask = test_mask\n\n return data\n\n\ndef read_file(folder, prefix, name):\n path = osp.join(folder, 'ind.{}.{}'.format(prefix.lower(), name))\n\n if name == 'test.index':\n return read_txt_array(path, dtype=torch.long)\n\n with open(path, 'rb') as f:\n if sys.version_info > (3, 0):\n out = pickle.load(f, encoding='latin1')\n else:\n out = pickle.load(f)\n\n if name == 'graph':\n return out\n\n out = out.todense() if hasattr(out, 'todense') else out\n out = torch.Tensor(out)\n return out\n\n\ndef edge_index_from_dict(graph_dict, num_nodes=None):\n row, col = [], []\n for key, value in graph_dict.items():\n row += repeat(key, len(value))\n col += value\n edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0)\n # NOTE: There are duplicated edges and self loops in the datasets. Other\n # implementations do not remove them!\n edge_index, _ = remove_self_loops(edge_index)\n edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)\n return edge_index\n\n\ndef sample_mask(index, num_nodes):\n mask = torch.zeros((num_nodes, ), dtype=torch.uint8)\n mask[index] = 1\n return mask\n" ]
[ [ "torch.zeros", "torch.tensor", "torch.cat", "torch.Tensor" ] ]
lichnost/latent-pose-reenactment
[ "ee2719355f1db3d0b927f9b10b0d42d1fd07d4c9" ]
[ "embedders/FSTH.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn.utils import spectral_norm\nfrom generators.common import blocks\n\nclass Wrapper:\n @staticmethod\n def get_args(parser):\n parser.add('--embed_padding', type=str, default='zero', help='zero|reflection')\n parser.add('--embed_num_blocks', type=int, default=6)\n parser.add('--average_function', type=str, default='sum', help='sum|max')\n\n @staticmethod\n def get_net(args):\n net = Embedder(\n args.embed_padding, args.in_channels, args.out_channels,\n args.num_channels, args.max_num_channels, args.embed_channels,\n args.embed_num_blocks, args.average_function)\n return net.to(args.device)\n\nclass Embedder(nn.Module):\n def __init__(self, padding, in_channels, out_channels, num_channels, max_num_channels, embed_channels,\n embed_num_blocks, average_function):\n super().__init__()\n\n def get_down_block(in_channels, out_channels, padding):\n return blocks.ResBlock(in_channels, out_channels, padding, upsample=False, downsample=True,\n norm_layer='none')\n\n if padding == 'zero':\n padding = nn.ZeroPad2d\n elif padding == 'reflection':\n padding = nn.ReflectionPad2d\n\n self.out_channels = embed_channels\n\n self.down_block = nn.Sequential(\n padding(1),\n spectral_norm(\n nn.Conv2d(in_channels + out_channels, num_channels, 3, 1, 0),\n eps=1e-4),\n nn.ReLU(),\n padding(1),\n spectral_norm(\n nn.Conv2d(num_channels, num_channels, 3, 1, 0),\n eps=1e-4),\n nn.AvgPool2d(2))\n self.skip = nn.Sequential(\n spectral_norm(\n nn.Conv2d(in_channels + out_channels, num_channels, 1),\n eps=1e-4),\n nn.AvgPool2d(2))\n\n layers = []\n in_channels = num_channels\n for i in range(1, embed_num_blocks - 1):\n out_channels = min(in_channels * 2, max_num_channels)\n layers.append(get_down_block(in_channels, out_channels, padding))\n in_channels = out_channels\n layers.append(get_down_block(out_channels, embed_channels, padding))\n self.down_blocks = nn.Sequential(*layers)\n\n self.average_function = average_function\n\n self.finetuning = False\n\n def enable_finetuning(self, data_dict=None):\n self.finetuning = True\n\n def get_identity_embedding(self, data_dict):\n enc_stickmen = data_dict['enc_stickmen']\n enc_rgbs = data_dict['enc_rgbs']\n\n inputs = torch.cat([enc_stickmen, enc_rgbs], 2)\n\n b, n, c, h, w = inputs.shape\n inputs = inputs.view(-1, c, h, w)\n out = self.down_block(inputs)\n out = out + self.skip(inputs)\n out = self.down_blocks(out)\n out = torch.relu(out)\n embeds_elemwise = out.view(b, n, self.out_channels, -1).sum(3)\n\n if self.average_function == 'sum':\n embeds = embeds_elemwise.mean(1)\n elif self.average_function == 'max':\n embeds = embeds_elemwise.max(1)[0]\n else:\n raise Exception('Incorrect `average_function` argument, expected `sum` or `max`')\n\n data_dict['embeds'] = embeds\n data_dict['embeds_elemwise'] = embeds_elemwise\n\n def get_pose_embedding(self, data_dict):\n pass\n\n def forward(self, data_dict):\n if not self.finetuning:\n self.get_identity_embedding(data_dict)\n self.get_pose_embedding(data_dict)\n" ]
[ [ "torch.relu", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.ReLU", "torch.cat" ] ]
aimldl/coding
[ "70ddbfaa454ab92fd072ee8dc614ecc330b34a70" ]
[ "python/en/_matplotlib/gallery/text_labels_and_annotations/auto-wrapping_text.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ntext_labels_and_annotations/auto-wrapping_text.py\nMatplotlib > Gallery > Text, labels and annotations> Auto-wrapping text\nhttps://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/autowrap.html#sphx-glr-gallery-text-labels-and-annotations-autowrap-py\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nplt.axis([0, 10, 0, 10])\nt = (\"This is a really long string that I'd rather have wrapped so that it \"\n \"doesn't go outside of the figure, but if it's long enough it will go \"\n \"off the top or bottom!\")\nplt.text(4, 1, t, ha='left', rotation=15, wrap=True)\nplt.text(6, 5, t, ha='left', rotation=15, wrap=True)\nplt.text(5, 5, t, ha='right', rotation=-15, wrap=True)\nplt.text(5, 10, t, fontsize=18, style='oblique', ha='center',\n va='top', wrap=True)\nplt.text(3, 4, t, family='serif', style='italic', ha='right', wrap=True)\nplt.text(-1, 0, t, ha='left', rotation=-15, wrap=True)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.text" ] ]
geetakumri/Moview_Review_Sentiment_Analysis
[ "41f4c17d9115633b000f52268ac768cfe013e808" ]
[ "model_build.py" ]
[ "from sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer, CountVectorizer\nfrom sklearn import svm\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, ConfusionMatrixDisplay\nfrom sklearn.linear_model import SGDClassifier, LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport joblib\n\n\ndef model_building(data):\n def model_prediction(tfidf,name, model):\n \n # Training the classifier with Naive Bayes\n cassifier = Pipeline([tfidf,\n (name,model),\n ])\n\n cassifier.fit(X_train, Y_train)\n test_predict = cassifier.predict(X_test)\n #print(\"test_predict\", set(test_predict))\n\n train_accuracy = round(cassifier.score(X_train, Y_train)*100)\n test_accuracy = round(accuracy_score(test_predict, Y_test)*100)\n\n print(f\" {name} Train Accuracy Score : {train_accuracy}% \")\n print(f\" {name} Test Accuracy Score : {test_accuracy}% \")\n print()\n joblib.dump(cassifier, open(name, \"wb\"))\n\n\n X_train, X_test, y_train, y_test = train_test_split(data.index.values, data.Sentiment.values, test_size=0.1, random_state=42, stratify=data.Sentiment)\n X_train, X_val, y_train, y_val = train_test_split(X_train,y_train,test_size=.15, random_state=42, stratify=y_train)\n \n data['data_type'] = ['not_set']*data.shape[0]\n data.loc[X_train, 'data_type'] = 'train'\n data.loc[X_val, 'data_type'] = 'val'\n data.loc[X_test,'data_type'] = 'test'\n\n data = data.dropna()\n train_set = data[data['data_type'] == 'train'].drop_duplicates(ignore_index=True)\n val_set = data[data['data_type'] == 'val'].drop_duplicates(ignore_index=True)\n test_set = data[data['data_type'] == 'test'].drop_duplicates(ignore_index=True)\n\n data = pd.concat([train_set, val_set, test_set], ignore_index=True)\n data = data.sample(frac=1, random_state=1).reset_index(drop=True)\n\n X_train = train_set.Phrase.values\n Y_train = train_set.Sentiment.values\n X_test = test_set.Phrase.values\n Y_test = test_set.Sentiment.values\n\n #vect = CountVectorizer(stop_words='english', ngram_range=(1,1), )\n\n models = []\n models.append(('nb_clf', MultinomialNB()))\n models.append(('rf_clf', DecisionTreeClassifier()))\n models.append(('sgd_clf', SGDClassifier()))\n\n for name, model in models:\n model_prediction(('tfidf', TfidfVectorizer()),name, model)\n \n\n\n\n\n \n\n\n" ]
[ [ "sklearn.naive_bayes.MultinomialNB", "sklearn.tree.DecisionTreeClassifier", "sklearn.linear_model.SGDClassifier", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.metrics.accuracy_score", "pandas.concat", "sklearn.pipeline.Pipeline", "sklearn.model_selection.train_test_split" ] ]
wfondrie/diadem
[ "cf42449ccd305b7fd040f9b03129256f60f13949" ]
[ "diadem/align.py" ]
[ "\"\"\"\nThis module contains the my implementation of the FastDTW algorithm.\n\nThe algorithm is described in http://cs.fit.edu/~pkc/papers/tdm04.pdf.\nThis implementation is losely based on the python package from this\nGitHub repository: https://github.com/slaypni/fastdtw.\n\nMy code deviates from this repository is a few ways to make it more\nuser friendly and amenable to aligning mass spectrometry runs:\n 1. Cython is not needed for good speed, because of numba.\n 2. The input numpy arrays (x and y) can be of any dimensionality, so\n long as the distance function can handle it.\n\nWritten by William E Fondrie, 2019\n\"\"\"\nfrom typing import Tuple, Callable\n\nimport numpy as np\nimport numba as nb\n\n# Distance Functions ----------------------------------------------------------\[email protected]\ndef cosine_distance(x, y, tiny=np.finfo(float).tiny):\n \"\"\"Compute 1 minus the cosine similarity between x and y\"\"\"\n denom = (np.linalg.norm(x) * np.linalg.norm(y) + tiny)\n return 1 - np.dot(x, y) / denom\n\n\n# DTW Functions ---------------------------------------------------------------\ndef fastdtw(x: np.ndarray, y: np.ndarray, radius: int = 1,\n dist: Callable[[np.ndarray, np.ndarray], float]\n = cosine_distance) -> Tuple[float, Tuple[Tuple[int, int]]]:\n \"\"\"\n Find the approximate minimum warping path between x and y.\n\n Parameters\n ----------\n x, y : numpy.ndarray\n Numpy arrays of the series to align. The first dimension is\n always assumed to be the time domain. For example, if aligning\n two mass spectrometry runs by their precursor mass spectra,\n x and y would be of shape [retention time, m/z] where m/z is\n each spectrum vectorized along the m/z axis.\n\n radius : int\n The radius to use for the FastDTW neighborhood.\n\n dist: Callable\n A distance function (not in the strict sense of the word), which\n accepts single time slices of x and y as input and returns their\n distance as a float.\n\n Returns\n -------\n Tuple[float, Tuple[Tuple[int, int]]]\n A tuple containing two elements. The first is the estimated DTW\n distance between x and y. The second is a Tuple of Tuples\n indicating the minimal warping path between x and y. The\n innermost tuple contains the mapping of (x, y) pairs in the\n path.\n \"\"\"\n min_time_size = radius + 2\n\n # The base case\n if x.shape[0] < min_time_size or y.shape[0] < min_time_size:\n return dtw(x, y, dist)\n\n # Recursive state\n shrunk_x = _reduce_by_half(x)\n shrunk_y = _reduce_by_half(y)\n _, path = fastdtw(shrunk_x, shrunk_y, radius=radius)\n window = _expand_window(path, x.shape[0], y.shape[0], radius=radius)\n\n return dtw(x, y, dist, window)\n\n\ndef dtw(x: np.ndarray, y: np.ndarray,\n dist: Callable[[np.ndarray, np.ndarray], float] = cosine_distance,\n _window = None) -> Tuple[float, Tuple[Tuple[int, int]]]:\n \"\"\"\n Find the minimum warping path between x and y.\n\n Parameters\n ----------\n x, y : numpy.ndarray\n Numpy arrays of the series to align. The first dimension is\n always assumed to be the time domain. For example, if aligning\n two mass spectrometry runs by their precursor mass spectra,\n x and y would be of shape [retention time, m/z] where m/z is\n each spectrum vectorized along the m/z axis.\n\n dist: Callable\n A distance function (not in the strict sense of the word), which\n accepts single time slices of x and y as input and returns their\n distance as a float.\n\n Returns\n -------\n Tuple[float, Tuple[Tuple[int, int]]]\n A tuple containing two elements. The first is the estimated DTW\n distance between x and y. The second is a Tuple of Tuples\n indicating the minimal warping path between x and y. The\n innermost tuple contains the mapping of (x, y) pairs in the\n path.\n \"\"\"\n if _window is None:\n _window = [(i, j) for i in range(x.shape[0]) for j in range(y.shape[0])]\n\n _window = list(_window)\n return _dtw_main(x, y, dist, _window)\n\n# Utility functions -----------------------------------------------------------\n# This is the implementation of the Dynamic Time Warping algorithm.\n# For some reason the jitted version is wayyyyy slower :(\ndef _dtw_main(x, y, dist, window):\n \"\"\"The DTW algorithm\"\"\"\n res = {}\n res[0, 0] = (float(0), 0, 0)\n\n for i, j in window:\n dt = dist(x[i, ...], y[j, ...])\n moves = ((i, j+1), (i+1, j), (i, j))\n\n val = np.Inf\n for move in moves:\n if move in res:\n if res[move][0] < val:\n val = res[move][0]\n res[i+1, j+1] = (val + dt, *move)\n\n\n path = []\n i, j = x.shape[0], y.shape[0]\n while i or j:\n path.append((i-1, j-1))\n i, j = res[i, j][1], res[i, j][2]\n\n path.reverse()\n return (res[x.shape[0], y.shape[0]][0], tuple(path))\n\n\ndef _reduce_by_half(x):\n \"\"\"Reduce x by half by taking the average.\"\"\"\n max_idx = x.shape[0] - (x.shape[0] % 2)\n return np.array([(x[i, ...] + x[i+1, ...]) / 2 for i in range(0, max_idx, 2)])\n\n\ndef _expand_window(path, len_x, len_y, radius):\n \"\"\"Expands the window around path and returns a new window\"\"\"\n path_ = set(path)\n path_range = range(-radius, radius+1)\n window = set()\n\n for i, j in path:\n for a, b in ((i+a, j+b) for a in path_range for b in path_range):\n if 0 <= a < len_x and 0 <= b < len_y:\n path_.add((a, b))\n\n for i, j in path_:\n i *= 2\n j *= 2\n for a, b in ((i, j), (i, j+1), (i+1, j), (i+1, j+1)):\n if 0 <= a < len_x and 0 <= b < len_y:\n window.add((a, b))\n\n return sorted(window)\n" ]
[ [ "numpy.finfo", "numpy.linalg.norm", "numpy.dot" ] ]
tclarkin/shread_dash
[ "a45e2f2946c74526e69c087587676aaa4cb15fba" ]
[ "plot_lib/snow_plot.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 27, 2022\n\nSHREAD Dash Snow Plot\n\nScript for running the snow plot in the dashboard (shread_dash.py)\n\n@author: buriona, tclarkin (2020-2022)\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plot_lib.utils import import_snotel,import_csas_live\n\nfrom database import snotel_sites\nfrom database import csas_gages\nfrom plot_lib.utils import screen_spatial,ba_stats_all,ba_stats_std,screen_csas,screen_snotel\nfrom plot_lib.utils import ba_min_plot, ba_max_plot, ba_mean_plot, ba_median_plot\nfrom plot_lib.utils import shade_forecast\n\ndef get_basin_stats(snodas_df,stype=\"swe\"):\n dates = snodas_df[\"Date\"].unique()\n last_date = dates.max()\n snodas_unique = snodas_df[snodas_df[\"Date\"]==last_date]\n mean_el = round(snodas_unique[\"elev_ft\"].mean(),0)\n points = len(snodas_unique)\n area = round(points * 0.386102, 0)\n\n if stype==\"swe\":\n mean_ft = snodas_unique[\"mean\"].mean()/12\n vol_af = round(mean_ft*area*640,0)\n stats = (\n f'Volume: ~{vol_af:,.0f} acre-feet | '\n f'Mean Elevation: {mean_el:,.0f} feet & Area: {area:,.0f} sq.mi. | '\n f'(approximated by {points} points)'\n )\n else:\n stats = (\n f'Mean Elevation: {mean_el:,.0f} feet & Area: {area:,.0f} sq.mi. |'\n f'(approximated by {points} points)'\n )\n\n return stats\n\ndef get_snow_plot(basin, stype, elrange, aspects, slopes, start_date,\n end_date, dtype,snotel_sel,csas_sel,forecast_sel,plot_albedo,\n offline=True):\n \"\"\"\n :description: this function updates the snowplot\n :param basin: the selected basins (checklist)\n :param stype: the snow type (swe/snowdepth)\n :param elrange: the range of elevations ([min,max])\n :param aspects: the range of aspects ([min,max])\n :param slopes: the range of slopes ([min,max])\n :param start_date: start date (from date selector)\n :param end_date: end date (from date selector)\n :param snotel_sel: list of selected snotel sites ([])\n :param albedo: boolean\n :return: update figure\n \"\"\"\n # Set dtype:\n dtype = \"dv\"\n\n # Create date axis\n dates = pd.date_range(start_date, end_date, freq=\"D\", tz='UTC')\n\n # Set snow type based on user selection\n if stype == \"swe\":\n ylabel = \"Mean SWE (in)\"\n dlabel = \"SWE\"\n slabel = \"WTEQ\"\n if stype == \"sd\":\n ylabel = \"Mean Snow Depth (in)\"\n dlabel = \"snow depth\"\n slabel = \"SNWD\"\n\n ## Process SHREAD data\n # Filter data\n if basin == None:\n snodas_plot = False\n snodas_max = np.nan\n basin_stats_str = ''\n else:\n snodas_plot = True\n snodas_df = screen_spatial(\n stype, start_date, end_date, basin, aspects, elrange, slopes\n )\n if snodas_df.empty:\n snodas_plot = False\n snodas_max = np.nan\n basin_stats_str = 'No valid SHREAD data for given parameters'\n else:\n # Calculate basin average values\n ba_snodas = ba_stats_all(snodas_df)\n snodas_max = ba_snodas['95%'].max()\n basin_stats_str = get_basin_stats(snodas_df,stype)\n \n ## Process SNOTEL data (if selected)\n\n # Add data for selected SNOTEL sites\n snotel_s_df = pd.DataFrame(index=dates)\n name_df = pd.DataFrame(index=snotel_sel)\n for s in snotel_sel:\n name_df.loc[s, \"name\"] = str(snotel_sites.loc[s, \"site_no\"]) + \" \" + snotel_sites.loc[s, \"name\"] + \" (\" + str(\n round(snotel_sites.loc[s, \"elev_ft\"], 0)) + \" ft)\"\n if offline:\n snotel_in = screen_snotel(f\"snotel_{s}\", start_date, end_date)\n else:\n snotel_in = import_snotel(s, start_date, end_date, vars=[slabel])\n snotel_in = snotel_s_df.merge(snotel_in[slabel], left_index=True, right_index=True, how=\"left\")\n snotel_s_df.loc[:, s] = snotel_in[slabel]\n\n if len(snotel_sel) == 0:\n snotel_max = np.nan\n else:\n snotel_max = snotel_s_df.max().max()\n\n ## Process CSAS data (if selected)\n csas_a_df = pd.DataFrame()\n for site in csas_sel:\n if offline:\n csas_df = screen_csas(site, start_date, end_date,dtype)\n else:\n csas_df = import_csas_live(site,start_date,end_date,dtype)\n\n if (plot_albedo) and (site != \"SBSG\") and (site != \"PTSP\"):\n csas_a_df[site] = csas_df[\"albedo\"]\n\n # Process NDFD, if selected\n\n # Filter data\n rhm = sky = snow = False\n\n if (basin != None) or (len(forecast_sel)>0):\n\n # remove rfc\n if \"flow\" in forecast_sel:\n forecast_sel.remove(\"flow\")\n\n # check if there are still items\n if len(forecast_sel) > 0:\n\n if dtype==\"iv\":\n step=\"D\"\n elif dtype==\"dv\":\n step=\"D\"\n\n ndfd_max = 0\n rhm = sky = snow = False\n for sensor in forecast_sel:\n\n if sensor in [\"qpf\",\"maxt\",\"mint\",\"pop12\"]:\n continue\n\n df = screen_spatial(sensor,start_date,end_date,basin,aspects,elrange,slopes,\"Date\")\n if df.empty:\n continue\n else:\n # Calculate basin average values\n ba_ndfd = ba_stats_std(df, \"Date\")\n ba_ndfd = ba_ndfd.tz_localize(tz=\"utc\")\n\n if sensor!=\"qpf\":\n ba_ndfd = ba_ndfd['mean'].resample(step).mean()\n else:\n ba_ndfd = ba_ndfd['mean'].resample(step).sum()\n\n ndfd = pd.DataFrame(index=dates)\n\n if sensor == \"sky\":\n sky = ndfd.merge(ba_ndfd,left_index=True,right_index=True,how=\"left\")\n\n if sensor == \"snow\":\n snow = ndfd.merge(ba_ndfd-1,left_index=True,right_index=True,how=\"left\")\n\n if sensor == \"rhm\":\n rhm = ndfd.merge(ba_ndfd, left_index=True, right_index=True, how=\"left\")\n\n ### Plot the data\n ymax = np.nanmax([snodas_max,snotel_max,20]) * 1.25\n\n print(\"Updating snow plot...\")\n fig = go.Figure()\n\n if snodas_plot==True:\n fig.add_trace(ba_max_plot(ba_snodas, dlabel))\n fig.add_trace(ba_min_plot(ba_snodas, dlabel))\n fig.add_trace(ba_mean_plot(ba_snodas, dlabel))\n fig.add_trace(ba_median_plot(ba_snodas, dlabel))\n\n for s in snotel_sel:\n fig.add_trace(go.Scatter(\n x=snotel_s_df.index,\n y=snotel_s_df[s],\n text=ylabel,\n mode='lines',\n line=dict(color=snotel_sites.loc[s, \"color\"]),\n name=name_df.loc[s, \"name\"]))\n\n if (plot_albedo) and (offline):\n for c in csas_a_df.columns:\n fig.add_trace(go.Scatter(\n x=csas_a_df.index,\n y=(1-csas_a_df[c])*100,\n text=\"100% - Albedo\",\n mode='lines',\n line=dict(color=csas_gages.loc[c, \"color\"], dash=\"dash\"),\n name=c + \" 100% - Albedo\",\n yaxis=\"y2\"))\n\n if snow is not False:\n fig.add_trace(go.Scatter(\n x=snow.index,\n y=[ymax - 2] * len(snow),\n mode=\"text\",\n textfont=dict(\n color=\"black\"\n ),\n marker=dict(color=\"black\"),\n text=snow.round(2),\n name=\"Snow (in, SWE)\",\n showlegend=False,\n yaxis=\"y1\"\n ))\n\n if sky is not False:\n fig.add_trace(go.Scatter(\n x=sky.index,\n y=[ymax-4]*len(sky),\n mode=\"text\",\n textfont=dict(\n color=\"green\"\n ),\n marker=dict(color=\"green\"),\n text=sky.round(0),\n name=\"Sky Coverage (%)\",\n showlegend=False,\n yaxis=\"y1\"\n ))\n\n if rhm is not False:\n fig.add_trace(go.Scatter(\n x=rhm.index,\n y=[ymax - 6] * len(rhm),\n mode=\"text\",\n textfont=dict(\n color=\"brown\"\n ),\n marker=dict(color=\"brown\"),\n text=rhm.round(0),\n name=\"Relative Humidity\",\n showlegend=False,\n yaxis=\"y1\"\n ))\n\n fig.add_trace(shade_forecast(ymax))\n fig.update_layout(\n xaxis=dict(\n range=[start_date, end_date],\n showline=True,\n linecolor=\"black\",\n mirror=True\n ),\n yaxis=dict(\n title = ylabel,\n type = 'linear',\n range = [0, ymax],\n showline = True,\n linecolor = \"black\",\n mirror = True\n ),\n margin={'l': 40, 'b': 40, 't': 10, 'r': 45},\n height=400,\n legend={'x': 0, 'y': 1, 'bgcolor': 'rgba(255,255,255,0.8)'},\n hovermode='closest',\n plot_bgcolor='white',\n )\n if (plot_albedo) and (offline):\n fig.update_layout(\n yaxis2=dict(\n title=\"100% - Albedo\",\n side=\"right\",\n overlaying='y',\n range=[0, 100]),\n margin={'l': 40, 'b': 40, 't': 0, 'r': 40},\n )\n\n if snodas_plot:\n return fig, basin_stats_str\n \n return fig, basin_stats_str\n" ]
[ [ "numpy.nanmax", "pandas.DataFrame", "pandas.date_range" ] ]
tblut/NNFS
[ "75320c546043bc74f368a7a6edcd8bb70aa90dc4" ]
[ "nnfs/model.py" ]
[ "import numpy as np\nfrom nnfs.layers import Linear\nfrom nnfs.optimizers import SGD\n\n\nclass Model:\n def __init__(self, layers, loss, optimizer=SGD(lr=0.01)):\n self.layers = layers\n self.loss = loss\n self.optimizer = optimizer\n\n def save_weights(self, filename):\n weights = []\n for layer in self.layers:\n for param in layer.get_parameters():\n weights.append(param.value)\n np.savez(filename, *weights)\n\n def load_weights(self, filename):\n weights = np.load(filename)\n param_index = 0\n for layer in self.layers:\n for param in layer.get_parameters():\n param.value = weights[f'arr_{param_index}']\n param_index += 1\n\n def predict(self, inputs):\n outputs = inputs\n for layer in self.layers:\n outputs = layer.forward(outputs)\n return outputs\n\n def train(self, X, y, epochs=20, batch_size=32, validation_data=None, metrics=None, verbose=1):\n history = {'train_loss': [0.0] * epochs}\n if validation_data:\n history['valid_loss'] = [0.0] * epochs\n if metrics:\n for name, _ in metrics.items():\n history[f'train_{name}'] = [0.0] * epochs\n if validation_data:\n history[f'valid_{name}'] = [0.0] * epochs\n\n n_batches = (len(X) + batch_size - 1) // batch_size\n for epoch in range(epochs):\n train_loss = 0.0\n for batch_index in range(n_batches):\n batch_start = batch_index * batch_size\n batch_end = min((batch_index + 1) * batch_size, X.shape[0])\n X_batch = X[batch_start:batch_end, ...]\n y_batch = y[batch_start:batch_end, ...]\n\n y_pred = self.predict(X_batch)\n batch_loss = self.loss(y_pred, y_batch)\n batch_loss += np.sum([layer.get_loss() for layer in self.layers])\n train_loss += batch_loss / n_batches\n\n parameters = []\n grad_in = self.loss.get_grad_in(y_pred, y_batch)\n for layer in reversed(self.layers):\n grad_in = layer.backward(grad_in)\n for param in layer.get_parameters():\n parameters.append(param)\n\n self.optimizer.apply_gradients(parameters)\n\n if metrics:\n for name, metric in metrics.items():\n history[f'train_{name}'][epoch] += metric(y_pred, y_batch) / n_batches\n\n history['train_loss'][epoch] = train_loss\n\n if validation_data:\n valid_loss = 0.0\n n_valid_batches = (len(validation_data[0]) + batch_size - 1) // batch_size\n for batch_index in range(n_valid_batches):\n batch_start = batch_index * batch_size\n batch_end = min((batch_index + 1) * batch_size, validation_data[0].shape[0])\n X_batch = validation_data[0][batch_start:batch_end, ...]\n y_batch = validation_data[1][batch_start:batch_end, ...]\n y_pred = self.predict(X_batch)\n batch_loss = self.loss(y_pred, y_batch)\n batch_loss += np.sum([layer.get_loss() for layer in self.layers])\n valid_loss += batch_loss / n_valid_batches\n if metrics:\n for name, metric in metrics.items():\n history[f'valid_{name}'][epoch] += metric(y_pred, y_batch) / n_valid_batches\n history['valid_loss'][epoch] = valid_loss\n\n if not verbose:\n continue\n log_str = f\"epoch: {epoch+1}/{epochs} - train_loss: {train_loss:.8f}\"\n if metrics:\n for name, metric in metrics.items():\n value = history[f'train_{name}'][epoch]\n log_str += f\" - train_{name}: {value:.8f}\"\n if validation_data:\n log_str += f\" - valid_loss: {valid_loss:.8f}\"\n if metrics:\n for name, metric in metrics.items():\n value = history[f'valid_{name}'][epoch]\n log_str += f\" - valid_{name}: {value:.8f}\"\n print(log_str)\n return history\n" ]
[ [ "numpy.load", "numpy.savez" ] ]
mozhumz/machine_learning_py
[ "880f6778ac16b0a16a80b31972a35304caa91dc1" ]
[ "demoDay25_CNNAndWord2Vec/boston_multi.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n#加载数据集\nboston_housing = tf.keras.datasets.boston_housing\n(train_x,train_y),(test_x,test_y) = boston_housing.load_data()\n\nnum_train=len(train_x) #训练集和测试机中样本的数量\nnum_test=len(test_x)\n\n#对训练样本和测试样本进行标准化(归一化),这里有用到张量的广播运算机制\nx_train=(train_x-train_x.min(axis=0))/(train_x.max(axis=0)-train_x.min(axis=0))\ny_train = train_y\n\nx_test=(test_x-test_x.min(axis=0))/(test_x.max(axis=0)-test_x.min(axis=0))\ny_test = test_y\n\n#生成多元回归需要的二维形式\nx0_train = np.ones(num_train).reshape(-1,1)\nx0_test = np.ones(num_test).reshape(-1,1)\n\n#对张量数据类型转换和进行堆叠\nX_train = tf.cast(tf.concat([x0_train,x_train],axis=1), tf.float32)\nX_test = tf.cast(tf.concat([x0_test, x_test], axis=1), tf.float32)\n\n#将房价转换为列向量\nY_train = tf.constant(y_train.reshape(-1,1), tf.float32)\nY_test = tf.constant(y_test.reshape(-1,1), tf.float32)\n\n#设置超参数\nlearn_rate = 0.01\niter = 2000\ndisplay_step=200\n\n#设置模型变量初始值\nnp.random.seed(612)\nW = tf.Variable(np.random.randn(14,1), dtype = tf.float32)\n\n#训练模型\nmse_train=[]\nmse_test=[]\n\nfor i in range(iter+1):\n with tf.GradientTape() as tape:\n PRED_train = tf.matmul(X_train,W)\n Loss_train = 0.5*tf.reduce_mean(tf.square(Y_train-PRED_train))\n\n PRED_test = tf.matmul(X_test,W)\n Loss_test = 0.5*tf.reduce_mean(tf.square(Y_test-PRED_test))\n\n mse_train.append(Loss_train)\n mse_test.append(Loss_test)\n\n dL_dW = tape.gradient(Loss_train, W)\n W.assign_sub(learn_rate*dL_dW)\n\n if i % display_step == 0:\n print('i: %i, Train_loss:%f, Test_loss: %f' % (i,Loss_train,Loss_test))\n\n\n#可视化输出\nplt.figure(figsize=(20,10))\n\nplt.subplot(221)\nplt.ylabel('MSE')\nplt.plot(mse_train,color = 'blue',linewidth=3)\nplt.plot(mse_test,color = 'red',linewidth=3)\nplt.title('训练误差和测试误差',fontsize = 20)\n\nplt.subplot(222)\nplt.ylabel('Price')\nplt.plot(y_train,color='blue', marker='o', label='true_price')\nplt.plot(PRED_train, color ='red', marker='.', label='predict')\nplt.legend()\nplt.title('训练数据集房价和训练数据集预测房价',fontsize = 20)\n\nplt.subplot(223)\nplt.ylabel('Price')\nplt.plot(y_test, color='blue', marker='o', label='true_price')\nplt.plot(PRED_test, color='red', marker='.', label='predict')\nplt.legend()\nplt.title('测试数据集房价和测试数据集预测房价',fontsize = 20)\n\nplt.show()" ]
[ [ "numpy.ones", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.random.seed", "numpy.random.randn", "tensorflow.matmul", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "tensorflow.GradientTape", "tensorflow.concat", "tensorflow.square", "matplotlib.pyplot.plot" ] ]
tak-sakumoto/formatomato
[ "4713338135b2ac3960cc2f9a6f017199853cdc52" ]
[ "for_imagenet/make_df_imagenet.py" ]
[ "import pandas as pd\nfrom PIL import Image\nfrom pathlib import Path\n\ndef make_df_imagenet(dataset):\n \"\"\"\n Making Pandas Dataframes of the extracted data\n \"\"\"\n # Making lists of class columns\n classes = list(Path(dataset).iterdir())\n classes = [p.stem for p in classes if p.is_dir()]\n class_ids = [i for i in range(len(classes))]\n\n class_df_dict = {\n 'CLASS_ID': class_ids,\n 'CLASS': classes\n }\n\n # Making a Pandas Dataframe\n class_df = pd.DataFrame(class_df_dict)\n\n # Set IMAGE_ID as index\n class_df = class_df.set_index('CLASS_ID')\n\n image_ids = []\n image_names = []\n widths = []\n heights = []\n img_classes = []\n \n # Making lists of image information columns\n for _class in classes:\n img_path_list = list((Path(dataset) / _class).glob('*.JPEG'))\n\n for img_path in img_path_list:\n img = Image.open(img_path)\n image_names.append(img_path.name)\n widths.append(img.width)\n heights.append(img.height)\n img_classes.append(_class)\n \n image_ids = [i for i in range(len(image_names))]\n\n image_df_dict = {\n 'IMAGE_ID': image_ids,\n 'IMAGE_NAME': image_names,\n 'WIDTH': widths,\n 'HEIGHT': heights\n }\n \n # Making a Pandas Dataframe\n image_df = pd.DataFrame(image_df_dict)\n # Set IMAGE_ID as index\n image_df = image_df.set_index('IMAGE_ID')\n\n df_dict = {\n 'IMAGE_ID': image_ids,\n 'IMAGE_NAME': image_names,\n 'CLASS': img_classes\n }\n\n # Making a Pandas Dataframe\n df = pd.DataFrame(df_dict)\n\n # Set IMAGE_ID as index\n df = df.set_index('IMAGE_ID')\n\n return df, image_df, class_df \n" ]
[ [ "pandas.DataFrame" ] ]
cyber-meow/Robotic_state_repr_learning
[ "d74fe372bea0b1cf42107450a8c3344a99279e91" ]
[ "utility.py" ]
[ "\n\"\"\"\nUtility functions\n\"\"\"\n\nimport numpy as np\n\n\ndef set_all_args(obj, argdict):\n for k in argdict.keys():\n if hasattr(obj, k):\n setattr(obj, k, argdict[k])\n else:\n print(\"Warning: parameter name {} not found!\".format(k))\n\ndef div0(a,b):\n with np.errstate(divide='ignore', invalid='ignore'):\n c = np.true_divide(a, b)\n c = np.nan_to_num(c)\n return c\n\n" ]
[ [ "numpy.nan_to_num", "numpy.true_divide", "numpy.errstate" ] ]
yashbonde/GAN-textures
[ "7e9bfa61c474f17812bad2430e63a2383ac85067" ]
[ "mgan.py" ]
[ "\nimport os\nimport time\nimport random\nimport argparse\nimport numpy as np\nfrom tqdm import trange\nfrom types import SimpleNamespace\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\nfrom torch.utils import tensorboard as tb\n\nfrom maze import Maze\n\nos.makedirs(\"images\", exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=256, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=128, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--size\", type=int, default=32, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=1, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=50, help=\"interval between image sampling\")\nparser.add_argument(\"--model_folder\", type = str, default = \"models\", help = \"folder to save model to\")\nparser.add_argument(\"--save_every\", type = int, default = 50, help = \"interval to save the models\")\nparser.add_argument(\"--seed\", type = int, default = 4, help = \"seed value\")\nopt = parser.parse_args()\nopt = SimpleNamespace(**vars(opt), img_size = opt.size + int(opt.size % 2 == 0))\n\ncuda = True if torch.cuda.is_available() else False\nos.makedirs(opt.model_folder, exist_ok=True)\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\nset_seed(opt.seed)\n\nclass Mazze():\n def __init__(self, w, h):\n self.width = int(w/2)\n self.height = int(h/2)\n\n def __len__(self):\n # just any random number\n return 10000\n\n def __getitem__(self, *args, **kwargs):\n m = Maze().generate(width=self.width, height=self.height)\n m = m._to_str_matrix(_np = True)\n m = torch.from_numpy(m)\n return m\n\n def __iter__(self):\n m = Maze().generate(width=self.width, height=self.height)\n m = m._to_str_matrix(_np = True)\n m = torch.from_numpy(m)\n yield m\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n self.init_size = opt.img_size // 4\n self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))\n\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, z, _pad = False):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n if _pad:\n new_size = img.size(2) + 1\n img_padded = torch.ones((img.size(0), 1, new_size, new_size))\n img_padded[:, :, 1:, 1:] = img\n return img_padded\n else:\n return img\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(opt.channels, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = opt.img_size // 2 ** 4\n self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())\n\n def forward(self, img):\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n return validity\n\n\n# Loss function\nadversarial_loss = torch.nn.BCELoss()\n\n# Initialize generator and discriminator\ngenerator = Generator()\ndiscriminator = Discriminator()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n adversarial_loss.cuda()\n\n# Initialize weights\ngenerator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\n\n# print(f\"Generator: {generator}\")\n# print(f\"Discriminator: {discriminator}\")\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n# z = Variable(Tensor(np.random.normal(0, 1, (opt.batch_size, opt.latent_dim))))\n# print(f\"Latent: {z.size()}\")\n# gen_imgs = generator(z)\n# print(f\"gen_imgs: {gen_imgs.size()}\")\n# gen_dis = discriminator(gen_imgs.detach())\n# print(gen_dis.size())\n\n# Configure data loader\nm = Mazze(opt.size, opt.size)\n# dataloader = DataLoader(m, batch_size = opt.batch_size)\n# for i,m in enumerate(dataloader):\n# if i: break\n# print(m.shape)\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n# ----------\n# Training\n# ----------\n\nwith tb.SummaryWriter(log_dir = opt.model_folder, flush_secs = 20) as sw:\n try:\n global_step = 0\n for epoch in range(opt.n_epochs):\n size = (len(m) // opt.batch_size) + int(len(m) % opt.batch_size != 1)\n pbar = trange(size)\n dataloader = DataLoader(m, batch_size = opt.batch_size)\n for i, imgs in zip(pbar, dataloader):\n b, x, y = imgs.shape\n imgs = imgs.view(b, 1, x, y)\n\n # Adversarial ground truths\n valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)\n fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(Tensor))[:,:,1:,1:]\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise as generator input\n z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))\n\n # Generate a batch of images\n gen_imgs = generator(z)\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = adversarial_loss(discriminator(gen_imgs), valid)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # run the discriminator on real and generated values\n gen_dis = discriminator(gen_imgs.detach())\n real_dis = discriminator(real_imgs)\n\n # Measure discriminator's ability to classify real from generated samples\n real_loss = adversarial_loss(real_dis, valid)\n fake_loss = adversarial_loss(gen_dis, fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n\n pbar.set_description(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())\n )\n\n batches_done = epoch * len(dataloader) + i\n if batches_done % opt.sample_interval == 0:\n print(f\"saving images at: images/{batches_done}.png\")\n images_to_save = gen_imgs.data[:25]\n images_to_save[images_to_save >= 0.5] = 1\n images_to_save[images_to_save < 0.5] = 0\n save_image(images_to_save, f\"images/{batches_done}.png\", nrow=5, normalize=True)\n\n if batches_done % opt.save_every == 0:\n print(f\"Saving model in folder: {opt.model_folder}\")\n torch.save(generator.state_dict(), f\"{opt.model_folder}/generator.pt\")\n torch.save(discriminator.state_dict(), f\"{opt.model_folder}/discriminator.pt\")\n\n sw.add_scalar(\"Dis-Loss/Real\", real_loss.item(), global_step = global_step, walltime = time.time())\n sw.add_scalar(\"Dis-Loss/Fake\", fake_loss.item(), global_step = global_step, walltime = time.time())\n sw.add_scalar(\"Dis-Loss/Total\", d_loss.item(), global_step = global_step, walltime = time.time())\n sw.add_scalar(\"Gen-Loss/Loss\", g_loss.item(), global_step = global_step, walltime = time.time())\n\n gen_img_sharpened = gen_imgs[0].clone()\n gen_img_sharpened[gen_img_sharpened >= 0.5] = 1\n gen_img_sharpened[gen_img_sharpened < 0.5] = 0\n sw.add_image(\"Generated\", gen_img_sharpened, global_step = global_step, walltime = time.time())\n sw.add_image(\"Real\", real_imgs[0], global_step = global_step, walltime = time.time())\n\n global_step += 1\n except KeyboardInterrupt:\n pass\n\nprint(f\"Saving model in folder: {opt.model_folder}\")\ntorch.save(generator.state_dict(), f\"{opt.model_folder}/generator.pt\")\ntorch.save(discriminator.state_dict(), f\"{opt.model_folder}/discriminator.pt\")\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.manual_seed_all", "numpy.random.seed", "torch.nn.Upsample", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.utils.tensorboard.SummaryWriter", "torch.nn.Sigmoid", "torch.nn.BatchNorm2d", "torch.nn.init.normal_", "torch.from_numpy", "torch.manual_seed", "torch.nn.Linear", "torch.nn.Dropout2d", "torch.nn.init.constant_", "torch.nn.Tanh", "numpy.random.normal", "torch.nn.BCELoss", "torch.nn.LeakyReLU" ] ]
Gerzer/coremltools
[ "47e2010a68668bd1960dca040f5f87c0e66a0cbd" ]
[ "examples/neural_network_inference/tensorflow_converter/Tensorflow_1/linear_mnist_train.py" ]
[ "from __future__ import print_function\nimport os\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data # Import MINST data\n\ndef linear_model(x):\n # x is the image input\n # mnist data image of shape 28*28=784\n\n # Set model weights\n W = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n\n # Construct model\n pred = tf.nn.softmax(tf.matmul(x, W) + b)\n\n # Return the last op\n return pred\n\n\ndef train():\n mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n # instantiate the model in the default graph\n x = tf.placeholder(tf.float32, [None, 784])\n\n print('image_input: ', x) \n #print 'image_input: ', x\n pred = linear_model(x)\n #print 'pred output:', pred\n\n print('pred output:', pred)\n\n # Add training components to it\n # 0-9 digits recognition => 10 classes\n y = tf.placeholder(tf.float32, [None, 10])\n\n # Define training hyper-parameters\n learning_rate = 0.01\n training_epochs = 25\n batch_size = 100\n display_step = 1\n\n # Define Cross Entropy loss\n cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n # Use Gradient Descent\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n\n # Use a saver to save checkpoints\n saver = tf.train.Saver()\n # Training starts here\n with tf.Session() as sess:\n sess.run(init)\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Fit training using batch data\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,\n y: batch_ys})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if (epoch+1) % display_step == 0:\n print((\"Epoch: {:04d} , cost= {:.9f}\").format(epoch+1,avg_cost))\n #print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost)\n print('Training Done. Now save the checkpoint...')\n #print 'Training Done. Now save the checkpoint...'\n save_dir = './checkpoints'\n save_path = os.path.join(save_dir, 'model.ckpt')\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n save_path = saver.save(sess, save_path)\n tf.train.write_graph(sess.graph, './', 'model.pbtxt')\n\n\nif __name__ == '__main__':\n\n # Read the data\n train()\n\n" ]
[ [ "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.global_variables_initializer", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.matmul", "tensorflow.train.GradientDescentOptimizer", "tensorflow.train.Saver", "tensorflow.Session", "tensorflow.log", "tensorflow.train.write_graph" ] ]
mlopstemplates/Ignitedemo
[ "9a8329d8aaa4c82b0f322b6e677df5b1769050ea" ]
[ "code/train/train.py" ]
[ "import os\nimport argparse\nimport itertools\nimport numpy as np\nimport joblib\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score\nfrom sklearn.model_selection import train_test_split\n\nfrom azureml.core import Dataset, Run\nrun = Run.get_context()\n\n\ndef log_confusion_matrix_image(cm, labels, normalize=False, log_name='confusion_matrix', title='Confusion matrix', cmap=plt.cm.Blues):\n '''\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n '''\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\", color='white' if cm[i, j] > thresh else 'black')\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n run.log_image(log_name, plot=plt)\n plt.savefig(os.path.join('outputs', '{0}.png'.format(log_name)))\n\n\ndef log_confusion_matrix(cm, labels):\n # log confusion matrix as object\n cm_json = {\n 'schema_type': 'confusion_matrix',\n 'schema_version': 'v1',\n 'data': {\n 'class_labels': labels,\n 'matrix': cm.tolist()\n }\n }\n run.log_confusion_matrix('confusion_matrix', cm_json)\n\n # log confusion matrix as image\n log_confusion_matrix_image(cm, labels, normalize=False, log_name='confusion_matrix_unnormalized', title='Confusion matrix')\n\n # log normalized confusion matrix as image\n log_confusion_matrix_image(cm, labels, normalize=True, log_name='confusion_matrix_normalized', title='Normalized confusion matrix')\n\n\ndef main(args):\n # create the outputs folder\n os.makedirs('outputs', exist_ok=True)\n\n # Log arguments\n run.log('Kernel type', np.str(args.kernel))\n run.log('Penalty', np.float(args.penalty))\n\n # Load iris dataset\n X, y = datasets.load_iris(return_X_y=True)\n\n # dividing X,y into train and test data\n x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=223)\n data = {'train': {'X': x_train, 'y': y_train},\n 'test': {'X': x_test, 'y': y_test}}\n\n # train a SVM classifier\n svm_model = SVC(kernel=args.kernel, C=args.penalty, gamma='scale').fit(data['train']['X'], data['train']['y'])\n svm_predictions = svm_model.predict(data['test']['X'])\n\n # accuracy for X_test\n accuracy = svm_model.score(data['test']['X'], data['test']['y'])\n print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy))\n run.log('Accuracy', np.float(accuracy))\n\n # precision for X_test\n precision = precision_score(svm_predictions, data[\"test\"][\"y\"], average='weighted')\n print('Precision of SVM classifier on test set: {:.2f}'.format(precision))\n run.log('precision', precision)\n\n # recall for X_test\n recall = recall_score(svm_predictions, data[\"test\"][\"y\"], average='weighted')\n print('Recall of SVM classifier on test set: {:.2f}'.format(recall))\n run.log('recall', recall)\n\n # f1-score for X_test\n f1 = f1_score(svm_predictions, data[\"test\"][\"y\"], average='weighted')\n print('F1-Score of SVM classifier on test set: {:.2f}'.format(f1))\n run.log('f1-score', f1)\n\n # create a confusion matrix\n labels = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']\n labels_numbers = [0, 1, 2]\n cm = confusion_matrix(y_test, svm_predictions, labels_numbers)\n log_confusion_matrix(cm, labels)\n\n # files saved in the \"outputs\" folder are automatically uploaded into run history\n model_file_name = \"model.pkl\"\n joblib.dump(svm_model, os.path.join('outputs', model_file_name))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--kernel', type=str, default='rbf', help='Kernel type to be used in the algorithm')\n parser.add_argument('--penalty', type=float, default=1.0, help='Penalty parameter of the error term')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args=args)\n" ]
[ [ "sklearn.svm.SVC", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "sklearn.metrics.f1_score", "matplotlib.pyplot.imshow", "sklearn.metrics.precision_score", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "sklearn.metrics.recall_score", "sklearn.metrics.confusion_matrix", "numpy.str", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel", "numpy.float", "sklearn.datasets.load_iris" ] ]
ktian08/6784-drugs
[ "7c3ae9f65ce60b031008b0026bb9b954575315fa" ]
[ "datasetIO.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nAndrew D. Rouillard\nComputational Biologist\nTarget Sciences\nGSK\[email protected]\n\"\"\"\n\nimport os\nimport gzip\nimport pickle\nimport numpy as np\nimport dataclasses as dc\n\ndef load_datasetinfo(datasetspath):\n dataset_info = []\n with open(datasetspath, mode='rt', encoding=\"utf-8\", errors=\"surrogateescape\") as fr:\n fields = [x.strip() for x in fr.readline().split('\\t')]\n for line in fr:\n entries = [x.strip() for x in line.split('\\t')]\n dataset_info.append({field:entry for field,entry in zip(fields,entries)})\n return dataset_info\n\ndef save_datasetinfo(datasetspath, dataset_infos):\n fields = sorted(dataset_infos[0].keys())\n with open(datasetspath, mode='wt', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join(fields) + '\\n')\n for dataset_info in dataset_infos:\n entries = [dataset_info[field] for field in fields]\n fw.write('\\t'.join([entry if type(entry)==str else '{0:1.6g}'.format(entry) for entry in entries]) + '\\n')\n\ndef append_datasetinfo(datasetspath, dataset_info):\n fields = sorted(dataset_info.keys())\n entries = [dataset_info[field] for field in fields]\n if not os.path.exists(datasetspath):\n with open(datasetspath, mode='wt', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join(fields) + '\\n')\n with open(datasetspath, mode='at', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join([entry if type(entry)==str else '{0:1.6g}'.format(entry) for entry in entries]) + '\\n')\n\ndef load_examples(examplespath):\n examples = set()\n with open(examplespath, mode='rt', encoding='utf-8', errors='surrogateescape') as fr:\n fr.readline()\n for line in fr:\n examples.add(line.split('\\t', maxsplit=1)[0].strip())\n return examples\n\ndef load_clusterassignments(clusterassignmentspath):\n if '.pickle' in clusterassignmentspath:\n with open(clusterassignmentspath, 'rb') as fr:\n return pickle.load(fr)\n else:\n item_cluster = {}\n with open(clusterassignmentspath, mode='rt', encoding='utf-8', errors='surrogateescape') as fr:\n fr.readline()\n for line in fr:\n item, cluster = [x.strip() for x in line.split('\\t')]\n item_cluster[item] = int(cluster)\n return item_cluster\n\ndef save_clusterassignments(clusterassignmentspath, item_cluster, itemname):\n if '.pickle' in clusterassignmentspath:\n with open(clusterassignmentspath, 'wb') as fw:\n pickle.dump(item_cluster, fw)\n else:\n with open(clusterassignmentspath, mode='wt', encoding='utf-8', errors='surrogateescape') as fw:\n fw.write('\\t'.join([itemname, 'cluster']) + '\\n')\n for item, cluster in item_cluster.items():\n fw.write('\\t'.join([item, str(cluster)]) + '\\n')\n\ndef load_datamatrix(datasetpath, delimiter='\\t', dtype='float64', getmetadata=True, getmatrix=True):\n if '.pickle' in datasetpath:\n with open(datasetpath, 'rb') as fr:\n return pickle.load(fr)\n else:\n if '.gz' in datasetpath:\n openfunc = gzip.open\n else:\n openfunc = open\n with openfunc(datasetpath, mode='rt', encoding=\"utf-8\", errors=\"surrogateescape\") as fr:\n rowmeta = {}\n columnmeta = {}\n rowlabels = []\n entries = [x.strip() for x in fr.readline().split(delimiter)]\n skipcolumns = sum([entry=='#' for entry in entries]) + 1\n columnname = entries[skipcolumns-1]\n columnlabels = np.array(entries[skipcolumns:], dtype='object')\n firstentry = entries[0]\n skiprows = 1\n if getmetadata:\n while firstentry == '#':\n entries = [x.strip() for x in fr.readline().split(delimiter)]\n columnmetaname = entries[skipcolumns-1].split('/')[-1]\n if columnmetaname.lower() != 'na':\n columnmeta[columnmetaname] = np.array(entries[skipcolumns:], dtype='object')\n firstentry = entries[0]\n skiprows += 1\n rowname = firstentry\n rowmetanames = entries[1:skipcolumns]\n if len(rowmetanames) > 0:\n rowmetanames[-1] = rowmetanames[-1].split('/')[0]\n rowmetaname_idx = {}\n for i, rowmetaname in enumerate(rowmetanames):\n if rowmetaname.lower() != 'na':\n rowmeta[rowmetaname] = []\n rowmetaname_idx[rowmetaname] = i\n for line in fr:\n entries = [x.strip() for x in line.split(delimiter, maxsplit=skipcolumns)[:skipcolumns]]\n rowlabels.append(entries.pop(0))\n for rowmetaname, idx in rowmetaname_idx.items():\n rowmeta[rowmetaname].append(entries[idx])\n rowlabels = np.array(rowlabels, dtype='object')\n for rowmetaname, rowmetavalues in rowmeta.items():\n rowmeta[rowmetaname] = np.array(rowmetavalues, dtype='object')\n else:\n while firstentry == '#':\n entries = [x.strip() for x in fr.readline().split(delimiter)]\n firstentry = entries[0]\n skiprows += 1\n rowname = firstentry\n for line in fr:\n rowlabels.append(line.split(delimiter, maxsplit=1)[0].strip())\n rowlabels = np.array(rowlabels, dtype='object')\n if getmatrix:\n matrix = np.loadtxt(datasetpath, dtype=dtype, delimiter=delimiter, skiprows=skiprows,\n usecols=range(skipcolumns,len(columnlabels)+skipcolumns), ndmin=2)\n else:\n matrix = np.zeros((0,0), dtype=dtype)\n matrixname = rowname + '_' + columnname + '_associations_from_' + datasetpath\n return dc.datamatrix(rowname, rowlabels, columnname, columnlabels, matrixname, matrix, rowmeta, columnmeta)\n\ndef save_datamatrix(datasetpath, dm):\n if '.pickle' in datasetpath:\n with open(datasetpath, 'wb') as fw:\n pickle.dump(dm, fw)\n else:\n if '.gz' in datasetpath:\n openfunc = gzip.open\n else:\n openfunc = open\n np.savetxt(datasetpath.replace('.txt', '.temp.txt'), dm.matrix, fmt='%1.6g', delimiter='\\t', newline='\\n')\n with openfunc(datasetpath, mode='wt', encoding=\"utf-8\", errors=\"surrogateescape\") as fw, openfunc(datasetpath.replace('.txt', '.temp.txt'), 'rt') as fr:\n rowmeta_names_and_dtypes = [(k,v.dtype) for k,v in dm.rowmeta.items()]\n spacers = ['#' for x in range(len(rowmeta_names_and_dtypes)+1)]\n fw.write('\\t'.join(spacers + [dm.columnname] + dm.columnlabels.tolist()) + '\\n')\n for columnmetaname, columnmetadata in dm.columnmeta.items():\n if columnmetadata.dtype == 'object':\n fw.write('\\t'.join(spacers + [columnmetaname] + columnmetadata.tolist()) + '\\n')\n else:\n fw.write('\\t'.join(spacers + [columnmetaname] + ['{0:1.6g}'.format(x) for x in columnmetadata]) + '\\n')\n fw.write('\\t'.join([dm.rowname] + [k for k,t in rowmeta_names_and_dtypes] + ['na/na'] + ['na' for i in range(dm.shape[1])]) + '\\n')\n for i, line in enumerate(fr):\n rowmetadata = [dm.rowmeta[k][i] if t=='object' else '{0:1.6g}'.format(dm.rowmeta[k][i]) for k,t in rowmeta_names_and_dtypes]\n fw.write('\\t'.join([dm.rowlabels[i]] + rowmetadata + ['na']) + '\\t' + line)\n os.remove(datasetpath.replace('.txt', '.temp.txt'))\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
0xLiso/DeepLearningFromScratch
[ "997e94953b9e5e1ffd8c38af9277e7925e0b4ea7" ]
[ "solutions/python/Lesson02-03/Operation.py" ]
[ "import numpy as np\n\nfrom Tensor import Tensor\n\n\nclass Operation:\n\tresult = None\n\tdef forward(self):\n\t\traise NotImplementedError\n\tdef backward(self, gradOutput: Tensor):\n\t\traise NotImplementedError\n\n\nclass Negative(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\t\n\n\tdef forward(self):\n\t\tself.result = -self.A \n\n\nclass Add(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tself.result = self.A + self.B\n\n\nclass Substract(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tself.result = self.A - self.B\n\nclass Divide(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\tdef forward(self):\n\t\tself.result = self.A/self.B\n\n\nclass Multiply(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\tdef forward(self):\n\t\tself.result = self.A * self.B\n\n\nclass Sum(Operation):\n\tdef __init__(self, A: Tensor,axis:int = -1):\n\t\tself.A = A\n\t\tself.axis = axis\n\n\tdef forward(self):\n\t\tself.result = np.sum(self.A,self.axis)\n\nclass MatMul(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tself.result = np.matmul( self.A , self.B )\n\n\n\nclass MatMulNaive(Operation):\n\tdef __init__(self, A: Tensor,B:Tensor):\n\t\tself.A = A\n\t\tself.B = B\n\n\tdef forward(self):\n\t\tcshape=(self.A.shape[0],self.B.shape[1])\n\t\tC=Tensor([x for x in range(np.prod(cshape))]).reshape(cshape)\n\t\tfor i in range(0, self.A.shape[0]):\n\t\t\tfor j in range(0, self.B.shape[1]):\n\t\t\t\tC[i,j]=0\n\t\t\t\tfor k in range(0,self.A.shape[1]):\n\t\t\t\t\tC[i,j]+=self.A[i,k]*self.B[k,j]\n\t\tself.result = C\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.sum", "numpy.matmul", "numpy.prod" ] ]
OliverSchmitz/lue
[ "da097e8c1de30724bfe7667cc04344b6535b40cd" ]
[ "source/data_model/python/test/lue_test/test_case.py" ]
[ "import os\nimport shlex\nimport subprocess\nimport unittest\nimport numpy\nimport lue\nimport lue_test\n\n\nclass TestCase(unittest.TestCase):\n\n @classmethod\n def dataset_name(self,\n module_name,\n filename):\n return \"{}.lue\".format(\n os.path.join(os.path.dirname(module_name), filename))\n\n\n def assertArraysEqual(self,\n lhs,\n rhs):\n self.assertEqual(lhs.dtype, rhs.dtype)\n try:\n numpy.testing.assert_equal(lhs, rhs)\n except AssertionError as exception:\n self.fail(str(exception))\n\n\n @classmethod\n def add_method(cls,\n method):\n \"\"\"\n Binds the `method` passed in to the class.\n This is a convenience function to use when adding test methods to\n test cases programmatically at runtime.\n \"\"\"\n setattr(cls, method.__name__, method)\n\n\n @classmethod\n def create_dataset(cls,\n name):\n \"\"\"\n Create dataset, removing an existing dataset first\n \"\"\"\n lue_test.remove_file_if_existant(name)\n\n return lue.create_dataset(name)\n\n\n @classmethod\n def relative_pathname(cls,\n directory_pathname,\n filename):\n \"\"\"\n Return a relative pathname to *filename*, given that the test module\n is located in *directory_pathname*.\n \"\"\"\n return os.path.join(\n lue_test.relative_pathname(__file__, directory_pathname),\n filename)\n\n\n def assertDatasetIsValid(self,\n dataset):\n \"\"\"\n Validate *dataset*\n \"\"\"\n\n if isinstance(dataset, str):\n self.assertTrue(os.path.exists(dataset_pathname))\n dataset = lue.open_dataset(dataset_pathname)\n\n try:\n lue.assert_is_valid(dataset, fail_on_warning=True)\n except RuntimeError as exception:\n self.fail(\"dataset {} is not valid\\n{}\".format(\n dataset.pathname, exception))\n" ]
[ [ "numpy.testing.assert_equal" ] ]
xrael/orbit-predictor
[ "9ff616122be0b33e43144bd32a055e1f676801dd" ]
[ "tests/test_numerical_predictor.py" ]
[ "import datetime as dt\nfrom unittest import TestCase\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_almost_equal\nimport pytest\n\nfrom orbit_predictor.locations import ARG\nfrom orbit_predictor.predictors.numerical import (\n J2Predictor, InvalidOrbitError, R_E_KM, is_sun_synchronous\n)\n\n\nclass J2PredictorTests(TestCase):\n def setUp(self):\n # Converted to classical orbital elements\n sma = 6780\n ecc = 0.001\n inc = 28.5\n raan = 67.0\n argp = 355.0\n ta = 250.0\n\n self.epoch = dt.datetime(2000, 1, 1, 12, 0)\n\n self.predictor = J2Predictor(sma, ecc, inc, raan, argp, ta, self.epoch)\n\n def test_propagate_eci(self):\n # Data from GMAT\n expected_position = np.array([2085.9287615146, -6009.5713894563, -2357.3802307070])\n expected_velocity = np.array([6.4787522759177, 3.2366136616580, -2.5063420188165])\n\n when_utc = self.epoch + dt.timedelta(hours=3)\n\n position_eci, velocity_eci = self.predictor.propagate_eci(when_utc)\n\n assert_allclose(position_eci, expected_position, rtol=1e-2)\n assert_allclose(velocity_eci, expected_velocity, rtol=1e-2)\n\n def test_get_next_pass(self):\n pass_ = self.predictor.get_next_pass(ARG)\n\n assert pass_.sate_id == \"<custom>\"\n\n\nclass SunSynchronousTests(TestCase):\n def test_invalid_parameters_raises_error(self):\n self.assertRaises(\n InvalidOrbitError, J2Predictor.sun_synchronous, alt_km=400, inc_deg=90)\n self.assertRaises(\n InvalidOrbitError, J2Predictor.sun_synchronous, alt_km=10000, ecc=0)\n\n def test_sun_sync_from_altitude_and_eccentricity(self):\n # Vallado 3rd edition, example 11-2\n expected_inc = 98.6\n\n pred = J2Predictor.sun_synchronous(alt_km=800, ecc=0)\n self.assertAlmostEqual(pred.get_position().osculating_elements[2], expected_inc, places=2)\n\n def test_sun_sync_from_altitude_and_inclination(self):\n # Hardcoded from our implementation\n expected_ecc = 0.14546153131334466\n\n pred = J2Predictor.sun_synchronous(alt_km=475, inc_deg=97)\n self.assertAlmostEqual(pred.get_position().osculating_elements[1], expected_ecc, places=14)\n\n def test_sun_sync_from_eccentricity_and_inclination(self):\n # Vallado 3rd edition, example 11-2\n expected_sma = 7346.846\n\n pred = J2Predictor.sun_synchronous(ecc=0.2, inc_deg=98.6)\n self.assertAlmostEqual(pred.get_position().osculating_elements[0], expected_sma, places=1)\n\n def test_sun_sync_delta_true_anomaly_has_expected_anomaly_and_epoch(self):\n date = dt.datetime.today().date()\n ltan_h = 12\n expected_ref_epoch = dt.datetime(date.year, date.month, date.day, 12)\n\n for expected_ta_deg in [-30, 0, 30]:\n pred = J2Predictor.sun_synchronous(\n alt_km=800, ecc=0, date=date, ltan_h=ltan_h, ta_deg=expected_ta_deg\n )\n\n ta_deg = pred.get_position(expected_ref_epoch).osculating_elements[5]\n self.assertAlmostEqual(ta_deg, expected_ta_deg % 360, places=12)\n\n def test_sun_sync_delta_true_anomaly_non_circular(self):\n date = dt.datetime.today().date()\n ltan_h = 12\n expected_ref_epoch = dt.datetime(date.year, date.month, date.day, 12)\n\n for expected_ta_deg in [-30, 30]:\n pred = J2Predictor.sun_synchronous(\n alt_km=475, ecc=0.1455, date=date, ltan_h=ltan_h, ta_deg=expected_ta_deg\n )\n\n ta_deg = pred.get_position(expected_ref_epoch).osculating_elements[5]\n self.assertAlmostEqual(ta_deg, expected_ta_deg % 360, places=12)\n\n\n# Test data from Wertz et al. \"Space Mission Engineering: The New SMAD\" (2011), table 9-13\[email protected](\"orbits,days,inc_deg,expected_h\", [\n (14, 1, 28, 817.14),\n (43, 3, 28, 701.34),\n (29, 2, 28, 645.06),\n (59, 4, 28, 562.55),\n (74, 5, 28, 546.31),\n (15, 1, 28, 482.25),\n])\ndef test_repeated_groundtrack_sma(orbits, days, inc_deg, expected_h):\n pred = J2Predictor.repeating_ground_track(orbits=orbits, days=days, ecc=0.0, inc_deg=inc_deg)\n\n assert_almost_equal(pred.get_position().osculating_elements[0] - R_E_KM, expected_h, decimal=0)\n\n\ndef test_is_sun_sync_returns_false_for_non_sun_sync_orbit():\n pred1 = J2Predictor(7000, 0, 0, 0, 0, 0, dt.datetime.now())\n\n assert not is_sun_synchronous(pred1)\n\n\ndef test_is_sun_sync_detects_almost_sun_sync_orbit():\n pred2 = J2Predictor(R_E_KM + 460, 0.001, 97.4, 0, 0, 0, dt.datetime.now())\n\n assert not is_sun_synchronous(pred2)\n assert is_sun_synchronous(pred2, rtol=1e-1)\n\n\ndef test_is_sun_sync_returns_true_for_sun_sync_orbit():\n pred1 = J2Predictor.sun_synchronous(alt_km=500, ecc=0)\n pred2 = J2Predictor.sun_synchronous(alt_km=500, inc_deg=97)\n pred3 = J2Predictor.sun_synchronous(ecc=0, inc_deg=97)\n\n assert is_sun_synchronous(pred1)\n assert is_sun_synchronous(pred2)\n assert is_sun_synchronous(pred3)\n" ]
[ [ "numpy.array", "numpy.testing.assert_allclose" ] ]
PingjunChen/ThyroidRule
[ "1213cf0783c84da5917ca903c156e5e4280402f5" ]
[ "utils/wsi_util.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os, sys, pdb\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.utils.data as data\nfrom torchvision import datasets, transforms\n\nimport numpy as np\nimport cv2, copy, time\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import binary_fill_holes, binary_closing, binary_dilation\nfrom skimage import transform, morphology, filters\nfrom skimage.morphology import remove_small_objects\n\nimport loader\n\n\ndef refine_prediction(pred, thresh, min_size):\n binary = pred > thresh # Threshold\n binary = binary_dilation(binary, structure=np.ones((5,5))) # dilation to connect\n binary = binary_fill_holes(binary) # Fill holes\n # Remove outliers\n mask = remove_small_objects(binary, min_size=min_size, connectivity=8)\n\n return mask\n\n\ndef pred_patches(cls_model, patches, args):\n preds = []\n\n start_time = time.time()\n slide_dset = loader.PatchDataset(patches)\n dset_loader = data.DataLoader(slide_dset, batch_size=args.batch_size, shuffle=False, num_workers=4)\n with torch.no_grad():\n for ind, inputs in enumerate(dset_loader):\n inputs = inputs.type(torch.FloatTensor)\n inputs = Variable(inputs.cuda())\n outputs = cls_model(inputs)\n _, batch_preds = outputs.max(1)\n preds.extend(batch_preds.cpu().tolist())\n\n elapsed_time = time.time() - start_time\n print(\"{} seconds for {} patches.\".format(elapsed_time, patches.shape[0]))\n \n return preds\n\n\ndef slide_pred(cls_model, split_arr, patches, wsi_dim, args):\n # Save prediction results\n RAW_SIZE = 299\n SIZE1, SIZE2, SIZE4 = int(RAW_SIZE/4), int(RAW_SIZE/2), RAW_SIZE\n class_num = 3\n result_map = np.zeros((wsi_dim[0], wsi_dim[1], class_num), dtype=np.uint8)\n\n # Prediction\n if patches.shape[0] > 0: # exist\n preds = pred_patches(cls_model, patches, args)\n for coor, pred in zip(split_arr, preds):\n result_map[coor[0]+SIZE1:coor[0]+SIZE1+SIZE2, coor[1]+SIZE1:coor[1]+SIZE1+SIZE2, pred] = 255\n\n # Resize results\n args.img_cnt_ratio = 2**(args.cnt_level - args.img_level)\n s_height, s_width = wsi_dim[0] / args.img_cnt_ratio, wsi_dim[1] / args.img_cnt_ratio\n result_img = transform.resize(result_map, (s_height, s_width))\n\n MINIMUM_REGION_SIZE = (np.floor(SIZE2 / args.img_cnt_ratio))**2\n # refine unsure\n unsure_min_size = MINIMUM_REGION_SIZE * args.unsure_grid_num\n result_img[:,:,1] = refine_prediction(result_img[:,:,1], thresh=args.unsure_prob, min_size=unsure_min_size)\n unsure_img = (result_img[:,:,1] * 255).astype(np.uint8)\n _, unsure_cnts, _ = cv2.findContours(unsure_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n max_unsure = 0\n if len(unsure_cnts) != 0:\n max_unsure_cnt = max(unsure_cnts, key = cv2.contourArea)\n max_unsure = cv2.contourArea(max_unsure_cnt)\n unsure_num_grid = int(max_unsure / MINIMUM_REGION_SIZE)\n # refine malignant\n yes_min_size = MINIMUM_REGION_SIZE * args.malignant_num_min\n result_img[:,:,2] = refine_prediction(result_img[:,:,2], thresh=args.malignant_prob, min_size=yes_min_size)\n yes_img = (result_img[:,:,2] * 255).astype(np.uint8)\n _, yes_cnts, _ = cv2.findContours(yes_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n max_yes = 0\n if len(yes_cnts) != 0:\n max_yes_cnt = max(yes_cnts, key = cv2.contourArea)\n max_yes = cv2.contourArea(max_yes_cnt)\n yes_num_grid = int(max_yes / MINIMUM_REGION_SIZE)\n\n # Rule-based diagnosis\n diag_flag = thyroid_diagnosis_rule(unsure_num_grid, yes_num_grid, args)\n return result_img, diag_flag\n\n\ndef thyroid_diagnosis_rule(unsure_num, yes_num, args):\n diag_flag = \"Benign\"\n # if there are unsure regions, take it unsure\n if unsure_num != 0:\n diag_flag = \"Unsure\"\n else:\n # if malignant regions large than 16, take it as malignant\n if yes_num >= args.malignant_num_max:\n diag_flag = \"Malignant\"\n # if malignant regions num between 2-16, take is as Unsure\n elif yes_num >= args.malignant_num_min and yes_num < args.malignant_num_max:\n diag_flag = \"Unsure\"\n else:\n diag_flag = \"Benign\"\n return diag_flag\n\n\n\ndef pred_feas(cls_model, patches, args):\n probs, logits, vecs = [], [], []\n\n def fea_hook(module, input, output):\n t_fea2048 = input[0].cpu().tolist()\n cur_vecs = copy.deepcopy(t_fea2048)\n t_logit3 = output.cpu().tolist()\n cur_logits = copy.deepcopy(t_logit3)\n t_fea3 = F.softmax(output, dim=-1)\n cur_fea3 = t_fea3.cpu().tolist()\n cur_probs = copy.deepcopy(cur_fea3)\n\n vecs.extend(cur_vecs)\n logits.extend(cur_logits)\n probs.extend(cur_probs)\n\n cls_model.fc.register_forward_hook(fea_hook)\n slide_dset = loader.PatchDataset(patches)\n dset_loader = data.DataLoader(slide_dset, batch_size=args.batch_size, shuffle=False, num_workers=4)\n with torch.no_grad():\n for ind, inputs in enumerate(dset_loader):\n inputs = inputs.type(torch.FloatTensor)\n inputs = Variable(inputs.cuda())\n outputs = cls_model(inputs)\n\n return probs, logits, vecs\n\n\n\ndef sort_by_prob(BBoxes, ClsProbs, ClsLogits, FeaVecs):\n fea_dict = {}\n norm_prob_list = [ele[0] for ele in ClsProbs]\n sorting_indx = np.argsort(norm_prob_list)\n fea_dict[\"bbox\"] = [BBoxes[ind] for ind in sorting_indx]\n fea_dict[\"prob\"] = [ClsProbs[ind] for ind in sorting_indx]\n fea_dict[\"logit\"] = [ClsLogits[ind] for ind in sorting_indx]\n fea_dict[\"feaVec\"] = [FeaVecs[ind] for ind in sorting_indx]\n\n return fea_dict\n\n\ndef gen_slide_feas(cls_model, split_arr, patches, wsi_dim, args):\n RAW_SIZE = 299\n SIZE1, SIZE2, SIZE4 = int(RAW_SIZE/4), int(RAW_SIZE/2), RAW_SIZE\n class_num = 3\n\n FeasList = []\n BBoxes, ClsProbs, ClsLogits, FeaVecs = [], [], [], []\n # Prediction\n if patches.shape[0] > 0: # exist\n ClsProbs, ClsLogits, FeaVecs = pred_feas(cls_model, patches, args)\n for coor in split_arr:\n cur_x, cur_y = coor[1]+SIZE1, coor[0]+SIZE1\n cur_bbox = [cur_x, cur_y, SIZE2, SIZE2]\n BBoxes.append(cur_bbox)\n\n fea_dict = sort_by_prob(BBoxes, ClsProbs, ClsLogits, FeaVecs)\n return fea_dict\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.ones", "numpy.zeros", "scipy.ndimage.binary_fill_holes", "torch.nn.functional.softmax", "torch.no_grad", "numpy.argsort", "numpy.floor" ] ]
shawnkx/NAT-with-Local-AT
[ "16b29e068ad568e3a020f1309e140aa0dbc38479" ]
[ "Mask-Predict/fairseq/data/language_pair_context_mask.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport numpy as np\nimport torch\nimport random\n\nfrom fairseq import utils\n\nfrom . import data_utils, FairseqDataset\n\n\ndef collate(\n samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False,\n input_feeding=True,\n):\n if len(samples) == 0:\n return {}\n\n def merge(key, is_list=False):\n if is_list:\n res = []\n for i in range(len(samples[0][key])):\n res.append(data_utils.collate_tokens(\n [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,\n ))\n return res\n else:\n return data_utils.collate_tokens(\n [s[key] for s in samples], pad_idx, eos_idx, left_pad=False,\n )\n\n is_target_list = isinstance(samples[0]['dec_target'], list)\n return {\n 'id': torch.LongTensor([s['id'] for s in samples]),\n 'ntokens': sum(s['ntokens'] for s in samples),\n 'net_input': {\n 'src_tokens': merge('enc_source'),\n 'src_lengths': torch.LongTensor([\n s['enc_source'].numel() for s in samples\n ]),\n 'prev_output_tokens': merge('dec_source')\n },\n 'target': merge('dec_target', is_target_list),\n 'nsentences': samples[0]['enc_source'].size(0),\n }\n\n \"\"\"id = torch.LongTensor([s['id'] for s in samples])\n src_tokens = merge('source', left_pad=left_pad_source)\n # sort by descending source length\n src_lengths = torch.LongTensor([s['source'].numel() for s in samples])\n src_lengths, sort_order = src_lengths.sort(descending=True)\n id = id.index_select(0, sort_order)\n src_tokens = src_tokens.index_select(0, sort_order)\n prev_output_tokens = None\n target = None\n if samples[0].get('target', None) is not None:\n target = merge('target', left_pad=left_pad_target)\n target = target.index_select(0, sort_order)\n ntokens = sum(len(s['target']) for s in samples)\n if input_feeding:\n # we create a shifted version of targets for feeding the\n # previous output token(s) into the next decoder step\n prev_output_tokens = merge(\n 'target',\n left_pad=left_pad_target,\n move_eos_to_beginning=True,\n )\n prev_output_tokens = prev_output_tokens.index_select(0, sort_order)\n else:\n ntokens = sum(len(s['source']) for s in samples)\n \n batch = {\n 'id': id,\n 'ntokens': ntokens,\n 'net_input': {\n 'src_tokens': src_tokens,\n 'src_lengths': src_lengths,\n },\n 'target': target,\n 'nsentences': samples[0]['source'].size(0),\n }\n if prev_output_tokens is not None:\n batch['net_input']['prev_output_tokens'] = prev_output_tokens\n return batch\"\"\"\n\n\nclass LanguagePairContextMask(FairseqDataset):\n \"\"\"\n A pair of torch.utils.data.Datasets.\n Args:\n src (torch.utils.data.Dataset): source dataset to wrap\n src_sizes (List[int]): source sentence lengths\n src_dict (~fairseq.data.Dictionary): source vocabulary\n tgt (torch.utils.data.Dataset, optional): target dataset to wrap\n tgt_sizes (List[int], optional): target sentence lengths\n tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary\n left_pad_source (bool, optional): pad source tensors on the left side.\n Default: ``True``\n left_pad_target (bool, optional): pad target tensors on the left side.\n Default: ``False``\n max_source_positions (int, optional): max number of tokens in the source\n sentence. Default: ``1024``\n max_target_positions (int, optional): max number of tokens in the target\n sentence. Default: ``1024``\n shuffle (bool, optional): shuffle dataset elements before batching.\n Default: ``True``\n input_feeding (bool, optional): create a shifted version of the targets\n to be passed into the model for input feeding/teacher forcing.\n Default: ``True``\n \"\"\"\n\n def __init__(\n self, src, src_sizes, src_dict,\n tgt=None, tgt_sizes=None, tgt_dict=None,\n left_pad_source=True, left_pad_target=False,\n max_source_positions=2048, max_target_positions=2048,\n shuffle=True, input_feeding=True,\n dynamic_length=False,\n mask_range=False,\n train=True,\n seed=None,\n len_context=None\n ):\n if tgt_dict is not None:\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n self.src = src\n self.tgt = tgt\n self.src_sizes = np.array(src_sizes)\n self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n self.left_pad_source = left_pad_source\n self.left_pad_target = left_pad_target\n self.max_source_positions = max_source_positions\n self.max_target_positions = max_target_positions\n self.shuffle = shuffle\n self.input_feeding = input_feeding\n self.dynamic_length = dynamic_length\n self.mask_range = mask_range\n self.train = train\n self.seed = seed\n self.random = np.random.RandomState(seed)\n self.seed = seed\n self.len_context = len_context\n\n def __getitem__(self, index):\n enc_source, dec_source, dec_target, ntokens = self._make_source_target(self.src[index], self.tgt[index])\n return {'id': index, 'enc_source': enc_source, 'dec_source': dec_source, 'dec_target': dec_target, 'ntokens': ntokens}\n\n def __len__(self):\n return len(self.src)\n\n def _make_source_target(self, source, target):\n if self.dynamic_length:\n max_len = 3 * len(source) // 2 + 1\n target = target.new((target.tolist() + ([self.tgt_dict.eos()] * (max_len - len(target))))[:max_len])\n \n min_num_masks = 1\n \n enc_source = source\n target = target.new([self.tgt_dict.bos()] + target.tolist())\n dec_source = target.new(target.tolist())\n dec_target_cp = target.new(target.tolist())\n dec_target = target.new([self.tgt_dict.pad()] * len(dec_source))\n \n if self.train:\n if min_num_masks < len(dec_source):\n sample_size = self.random.randint(min_num_masks, len(dec_source))\n else:\n sample_size = len(dec_source)\n\n if self.mask_range:\n start = self.random.randint(len(dec_source) - sample_size + 1)\n ind = list(range(start, start + sample_size))\n else:\n ind = self.random.choice(len(dec_source) , size=sample_size, replace=False)\n \n dec_source[ind] = self.tgt_dict.mask()\n dec_target[ind] = dec_target_cp[ind]\n \n left_context = dec_target.new([self.tgt_dict.bos()] * self.len_context)\n right_context = dec_target.new([self.tgt_dict.eos()] * (self.len_context))\n len_ori_target = dec_target.size(0)\n # print (\"original tokens\", self.tgt_dict.string(dec_target, remove_eos=False))\n dec_target = torch.cat((left_context, dec_target, right_context))\n context_dec_target = torch.cat([dec_target[i:i + len_ori_target] for i in range(self.len_context * 2 + 1)], dim=0)\n context_dec_target = context_dec_target.view(self.len_context * 2 + 1, -1).transpose(0, 1).contiguous().view(-1)\n context_ind = ind * (self.len_context * 2 + 1) + 1\n context_ind = np.concatenate((context_ind, context_ind - 2, context_ind + 2))\n context_ind = context_ind[context_ind >= 0]\n context_ind = context_ind[context_ind < len(context_dec_target)]\n pad_ctx_dec_target = context_dec_target.new([self.tgt_dict.pad()] * len(context_dec_target))\n pad_ctx_dec_target[context_ind] = context_dec_target[context_ind]\n context_dec_target = pad_ctx_dec_target\n else:\n dec_target = dec_target_cp\n dec_source[:] = self.tgt_dict.mask()\n left_context = dec_target.new([self.tgt_dict.bos()] * self.len_context)\n right_context = dec_target.new([self.tgt_dict.eos()] * (self.len_context))\n len_ori_target = dec_target.size(0)\n # print (\"original tokens\", self.tgt_dict.string(dec_target, remove_eos=False))\n dec_target = torch.cat((left_context, dec_target, right_context))\n context_dec_target = torch.cat([dec_target[i:i + len_ori_target] for i in range(self.len_context * 2 + 1)], dim=0)\n context_dec_target = context_dec_target.view(self.len_context * 2 + 1, -1).transpose(0, 1).contiguous().view(-1)\n ntokens = dec_target.ne(self.tgt_dict.pad()).sum(-1).item()\n \n\n # print (\"masked tokens\", self.tgt_dict.string(dec_source), len(dec_source))\n # # print (\"original tokens\", self.tgt_dict.string(dec_target, remove_eos=False), len(dec_target))\n # print(\"context dec target tokens\", self.tgt_dict.string(context_dec_target, remove_eos=False))\n # # print (\"source tokens\", self.src_dict.string(enc_source))\n # exit()\n return enc_source, dec_source, context_dec_target, ntokens\n\n def collater(self, samples):\n \"\"\"Merge a list of samples to form a mini-batch.\n Args:\n samples (List[dict]): samples to collate\n Returns:\n dict: a mini-batch with the following keys:\n - `id` (LongTensor): example IDs in the original input order\n - `ntokens` (int): total number of tokens in the batch\n - `net_input` (dict): the input to the Model, containing keys:\n - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in\n the source sentence of shape `(bsz, src_len)`. Padding will\n appear on the left if *left_pad_source* is ``True``.\n - `src_lengths` (LongTensor): 1D Tensor of the unpadded\n lengths of each source sentence of shape `(bsz)`\n - `prev_output_tokens` (LongTensor): a padded 2D Tensor of\n tokens in the target sentence, shifted right by one position\n for input feeding/teacher forcing, of shape `(bsz,\n tgt_len)`. This key will not be present if *input_feeding*\n is ``False``. Padding will appear on the left if\n *left_pad_target* is ``True``.\n - `target` (LongTensor): a padded 2D Tensor of tokens in the\n target sentence of shape `(bsz, tgt_len)`. Padding will appear\n on the left if *left_pad_target* is ``True``.\n \"\"\"\n return collate(\n samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),\n left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,\n input_feeding=self.input_feeding,\n )\n\n def get_dummy_batch(self, num_tokens, max_positions, src_len=128, tgt_len=128):\n \"\"\"Return a dummy batch with a given number of tokens.\"\"\"\n src_len, tgt_len = utils.resolve_max_positions(\n (src_len, tgt_len),\n max_positions,\n (self.max_source_positions, self.max_target_positions),\n )\n bsz = num_tokens // max(src_len, tgt_len)\n\n enc_source, dec_source, dec_target, ntokens = self._make_source_target(self.src_dict.dummy_sentence(src_len), self.tgt_dict.dummy_sentence(tgt_len))\n\n return self.collater([\n {\n 'id': i,\n 'enc_source': enc_source,\n 'dec_source': dec_source,\n 'dec_target': dec_target,\n 'ntokens': ntokens,\n }\n for i in range(bsz)\n ])\n\n def num_tokens(self, index):\n \"\"\"Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.\"\"\"\n return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n if self.shuffle and self.train and self.seed is None:\n return np.random.permutation(len(self))\n \n indices = np.arange(len(self))\n if self.tgt_sizes is not None:\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\n\n def prefetch(self, indices):\n self.src.prefetch(indices)\n self.tgt.prefetch(indices)\n\n @property\n def supports_prefetch(self):\n return (\n hasattr(self.src, 'supports_prefetch')\n and self.src.supports_prefetch\n and hasattr(self.tgt, 'supports_prefetch')\n and self.tgt.supports_prefetch\n )\n" ]
[ [ "numpy.argsort", "numpy.random.RandomState", "numpy.array", "numpy.concatenate", "torch.LongTensor", "torch.cat" ] ]
sooftware/Fairseq-Listen-Attend-Spell
[ "9c66b3e7afef8bdcd24c6e71efffc45b8db6ae04" ]
[ "fairseq_las/data/data_utils.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\n\n\ndef calc_mean_invstddev(feature):\n if len(feature.size()) != 2:\n raise ValueError(\"We expect the input feature to be 2-D tensor\")\n mean = feature.mean(0)\n var = feature.var(0)\n # avoid division by ~zero\n eps = 1e-8\n if (var < eps).any():\n return mean, 1.0 / (torch.sqrt(var) + eps)\n return mean, 1.0 / torch.sqrt(var)\n\n\ndef apply_mv_norm(features):\n # If there is less than 2 spectrograms, the variance cannot be computed (is NaN)\n # and normalization is not possible, so return the item as it is\n if features.size(0) < 2:\n return features\n mean, invstddev = calc_mean_invstddev(features)\n res = (features - mean) * invstddev\n return res\n\n\ndef lengths_to_encoder_padding_mask(lengths, batch_first=False):\n \"\"\"\n convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor\n\n Args:\n lengths: a (B, )-shaped tensor\n\n Return:\n max_length: maximum length of B sequences\n encoder_padding_mask: a (max_length, B) binary mask, where\n [t, b] = 0 for t < lengths[b] and 1 otherwise\n\n TODO:\n kernelize this function if benchmarking shows this function is slow\n \"\"\"\n max_lengths = torch.max(lengths).item()\n bsz = lengths.size(0)\n encoder_padding_mask = torch.arange(\n max_lengths\n ).to( # a (T, ) tensor with [0, ..., T-1]\n lengths.device\n ).view( # move to the right device\n 1, max_lengths\n ).expand( # reshape to (1, T)-shaped tensor\n bsz, -1\n ) >= lengths.view( # expand to (B, T)-shaped tensor\n bsz, 1\n ).expand(\n -1, max_lengths\n )\n if not batch_first:\n return encoder_padding_mask.t(), max_lengths\n else:\n return encoder_padding_mask, max_lengths\n\n\ndef encoder_padding_mask_to_lengths(encoder_padding_mask, max_lengths, batch_size, device):\n \"\"\"\n convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor\n\n Conventionally, encoder output contains a encoder_padding_mask, which is\n a 2-D mask in a shape (T, B), whose (t, b) element indicate whether\n encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we\n need to convert this mask tensor to a 1-D tensor in shape (B, ), where\n [b] denotes the valid length of b-th sequence\n\n Args:\n encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,\n indicating all are valid\n Return:\n seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the\n number of valid elements of b-th sequence\n\n max_lengths: maximum length of all sequence, if encoder_padding_mask is\n not None, max_lengths must equal to encoder_padding_mask.size(0)\n\n batch_size: batch size; if encoder_padding_mask is\n not None, max_lengths must equal to encoder_padding_mask.size(1)\n\n device: which device to put the result on\n \"\"\"\n if encoder_padding_mask is None:\n return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)\n\n assert encoder_padding_mask.size(0) == max_lengths, \"max_lengths does not match\"\n assert encoder_padding_mask.size(1) == batch_size, \"batch_size does not match\"\n\n return max_lengths - torch.sum(encoder_padding_mask, dim=0)\n" ]
[ [ "torch.sum", "torch.sqrt", "torch.arange", "torch.max", "torch.Tensor" ] ]
colesbury/awkward-1.0
[ "d036ab18eb54de8a2571d9f179d315ac8ee22119", "d036ab18eb54de8a2571d9f179d315ac8ee22119" ]
[ "tests/test_0006-deep-iteration.py", "tests/test_0074-argsort-and-sort.py" ]
[ "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport sys\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_iterator():\n content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3]))\n offsets = ak.layout.Index32(np.array([0, 2, 2, 3], \"i4\"))\n array = ak.layout.ListOffsetArray32(offsets, content)\n assert list(content) == [1.1, 2.2, 3.3]\n assert [np.asarray(x).tolist() for x in array] == [[1.1, 2.2], [], [3.3]]\n\n\ndef test_refcount():\n content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3]))\n offsets = ak.layout.Index32(np.array([0, 2, 2, 3], \"i4\"))\n array = ak.layout.ListOffsetArray32(offsets, content)\n\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n iter1 = iter(content)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n x1 = next(iter1)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n iter2 = iter(array)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n x2 = next(iter2)\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n del iter1\n del x1\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n\n del iter2\n del x2\n assert (sys.getrefcount(content), sys.getrefcount(array)) == (2, 2)\n", "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_bool_sort():\n array = ak.layout.NumpyArray(np.array([True, False, True, False, False]))\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n False,\n False,\n False,\n True,\n True,\n ]\n\n\ndef test_keep_None_in_place_test():\n array = ak.Array([[3, 2, 1], [], None, [4, 5]])\n\n assert ak.to_list(ak.argsort(array, axis=1)) == [\n [2, 1, 0],\n [],\n None,\n [0, 1],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1)) == [\n [1, 2, 3],\n [],\n None,\n [4, 5],\n ]\n\n assert ak.to_list(array[ak.argsort(array, axis=1)]) == ak.to_list(\n ak.sort(array, axis=1)\n )\n\n\ndef test_EmptyArray():\n array = ak.layout.EmptyArray()\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n assert str(ak.type(ak.sort(array))) == \"0 * float64\"\n assert str(ak.type(ak.argsort(array))) == \"0 * int64\"\n\n array2 = ak.Array([[], [], []])\n assert ak.to_list(ak.argsort(array2)) == [[], [], []]\n assert str(ak.type(ak.argsort(array2))) == \"3 * var * int64\"\n\n\ndef test_NumpyArray():\n array = ak.layout.NumpyArray(np.array([3.3, 2.2, 1.1, 5.5, 4.4]))\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=False)) == [\n 2,\n 1,\n 0,\n 4,\n 3,\n ]\n assert ak.to_list(ak.argsort(array, axis=0, ascending=False, stable=False)) == [\n 3,\n 4,\n 0,\n 1,\n 2,\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n ]\n assert ak.to_list(ak.sort(array, axis=0, ascending=False, stable=False)) == [\n 5.5,\n 4.4,\n 3.3,\n 2.2,\n 1.1,\n ]\n\n array2 = ak.layout.NumpyArray(np.array([[3.3, 2.2, 4.4], [1.1, 5.5, 3.3]]))\n\n assert ak.to_list(\n ak.sort(array2, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array2, axis=1))\n assert ak.to_list(\n ak.sort(array2, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array2, axis=0))\n\n assert ak.to_list(\n ak.argsort(array2, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array2, 1))\n assert ak.to_list(\n ak.argsort(array2, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array2, 0))\n\n with pytest.raises(ValueError) as err:\n ak.sort(array2, axis=2, ascending=True, stable=False)\n assert str(err.value).startswith(\n \"axis=2 exceeds the depth of the nested list structure (which is 2)\"\n )\n\n\ndef test_IndexedOptionArray():\n array = ak.Array(\n [\n [None, None, 2.2, 1.1, 3.3],\n [None, None, None],\n [4.4, None, 5.5],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n )\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n [-4.4, -5.5, -6.6, 1.1, 3.3],\n [4.4, None, 2.2],\n [5.5, None, 5.5],\n [None, None, None],\n [None, None, None],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=True, stable=False)) == [\n [1.1, 2.2, 3.3, None, None],\n [None, None, None],\n [4.4, 5.5, None],\n [5.5, None, None],\n [-6.6, -5.5, -4.4],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=False, stable=True)) == [\n [3.3, 2.2, 1.1, None, None],\n [None, None, None],\n [5.5, 4.4, None],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=False, stable=False)) == [\n [3.3, 2.2, 1.1, None, None],\n [None, None, None],\n [5.5, 4.4, None],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=True)) == [\n [4, 4, 4, 0, 0],\n [2, 0, 0],\n [3, 1, 2],\n [0, 2, 1],\n [1, 3, 3],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=False)) == [\n [4, 4, 4, 0, 0],\n [2, 0, 0],\n [3, 1, 2],\n [0, 2, 1],\n [1, 3, 3],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=False, stable=True)) == [\n [3, 4, 2, 0, 0],\n [2, 0, 0],\n [4, 1, 4],\n [0, 2, 1],\n [1, 3, 3],\n ]\n assert ak.to_list(ak.argsort(array, axis=0, ascending=False, stable=False)) == [\n [3, 4, 2, 0, 0],\n [2, 0, 0],\n [4, 1, 4],\n [0, 2, 1],\n [1, 3, 3],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=True, stable=True)) == [\n [3, 2, 4, 0, 1],\n [0, 1, 2],\n [0, 2, 1],\n [0, 1, 2],\n [2, 1, 0],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=True, stable=False)) == [\n [3, 2, 4, 0, 1],\n [0, 1, 2],\n [0, 2, 1],\n [0, 1, 2],\n [2, 1, 0],\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=False, stable=True)) == [\n [4, 2, 3, 0, 1],\n [0, 1, 2],\n [2, 0, 1],\n [0, 1, 2],\n [0, 1, 2],\n ]\n\n array2 = ak.Array([None, None, 1, -1, 30])\n assert ak.to_list(ak.argsort(array2, axis=0, ascending=True, stable=True)) == [\n 3,\n 2,\n 4,\n 0,\n 1,\n ]\n\n array3 = ak.Array(\n [[2.2, 1.1, 3.3], [], [4.4, 5.5], [5.5], [-4.4, -5.5, -6.6]]\n ).layout\n\n assert ak.to_list(ak.sort(array3, axis=1, ascending=False, stable=False)) == [\n [3.3, 2.2, 1.1],\n [],\n [5.5, 4.4],\n [5.5],\n [-4.4, -5.5, -6.6],\n ]\n\n assert ak.to_list(ak.sort(array3, axis=0, ascending=True, stable=False)) == [\n [-4.4, -5.5, -6.6],\n [],\n [2.2, 1.1],\n [4.4],\n [5.5, 5.5, 3.3],\n ]\n\n # FIXME: Based on NumPy list sorting:\n #\n # array([list([2.2, 1.1, 3.3]), list([]), list([4.4, 5.5]), list([5.5]),\n # list([-4.4, -5.5, -6.6])], dtype=object)\n # np.sort(array, axis=0)\n # array([list([]), list([-4.4, -5.5, -6.6]), list([2.2, 1.1, 3.3]),\n # list([4.4, 5.5]), list([5.5])], dtype=object)\n #\n # the result should be:\n #\n # [[ -4.4, -5.5, -6.6 ],\n # [ 2.2, 1.1, 3.3 ],\n # [ 4.4, 5.5 ],\n # [ 5.5 ],\n # []]\n\n # This can be done following the steps: pad, sort,\n # and dropna to strip off the None's\n #\n array4 = array3.rpad(3, 1)\n assert ak.to_list(array4) == [\n [2.2, 1.1, 3.3],\n [None, None, None],\n [4.4, 5.5, None],\n [5.5, None, None],\n [-4.4, -5.5, -6.6],\n ]\n\n array5 = ak.sort(array4, axis=0, ascending=True, stable=False)\n assert ak.to_list(array5) == [\n [-4.4, -5.5, -6.6],\n [2.2, 1.1, 3.3],\n [4.4, 5.5, None],\n [5.5, None, None],\n [None, None, None],\n ]\n\n array4 = array3.rpad(5, 1)\n assert ak.to_list(array4) == [\n [2.2, 1.1, 3.3, None, None],\n [None, None, None, None, None],\n [4.4, 5.5, None, None, None],\n [5.5, None, None, None, None],\n [-4.4, -5.5, -6.6, None, None],\n ]\n\n array5 = ak.sort(array4, axis=0, ascending=True, stable=False)\n assert ak.to_list(array5) == [\n [-4.4, -5.5, -6.6, None, None],\n [2.2, 1.1, 3.3, None, None],\n [4.4, 5.5, None, None, None],\n [5.5, None, None, None, None],\n [None, None, None, None, None],\n ]\n\n array5 = ak.argsort(array4, axis=0, ascending=True, stable=False)\n assert ak.to_list(array5) == [\n [4, 4, 4, 0, 0],\n [0, 0, 0, 1, 1],\n [2, 2, 1, 2, 2],\n [3, 1, 2, 3, 3],\n [1, 3, 3, 4, 4],\n ]\n\n # FIXME: implement dropna to strip off the None's\n #\n # array6 = array5.dropna(0)\n # assert ak.to_list(array6) == [\n # [ -4.4, -5.5, -6.6 ],\n # [ 2.2, 1.1, 3.3 ],\n # [ 4.4, 5.5 ],\n # [ 5.5 ],\n # []]\n\n content = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5]))\n index1 = ak.layout.Index32(np.array([1, 2, 3, 4], dtype=np.int32))\n indexedarray1 = ak.layout.IndexedArray32(index1, content)\n assert ak.to_list(\n ak.argsort(indexedarray1, axis=0, ascending=True, stable=False)\n ) == [0, 1, 2, 3]\n\n index2 = ak.layout.Index64(np.array([1, 2, 3], dtype=np.int64))\n indexedarray2 = ak.layout.IndexedArray64(index2, indexedarray1)\n assert ak.to_list(\n ak.sort(indexedarray2, axis=0, ascending=False, stable=False)\n ) == [5.5, 4.4, 3.3]\n\n index3 = ak.layout.Index32(np.array([1, 2], dtype=np.int32))\n indexedarray3 = ak.layout.IndexedArray32(index3, indexedarray2)\n assert ak.to_list(ak.sort(indexedarray3, axis=0, ascending=True, stable=False)) == [\n 4.4,\n 5.5,\n ]\n\n\ndef test_3d():\n array = ak.layout.NumpyArray(\n np.array(\n [\n # axis 2: 0 1 2 3 4 # axis 1:\n [\n [1.1, 2.2, 3.3, 4.4, 5.5], # 0\n [6.6, 7.7, 8.8, 9.9, 10.10], # 1\n [11.11, 12.12, 13.13, 14.14, 15.15],\n ], # 2\n [\n [-1.1, -2.2, -3.3, -4.4, -5.5], # 3\n [-6.6, -7.7, -8.8, -9.9, -10.1], # 4\n [-11.11, -12.12, -13.13, -14.14, -15.15],\n ],\n ]\n )\n ) # 5\n assert ak.to_list(\n ak.argsort(array, axis=2, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array, 2))\n assert ak.to_list(\n ak.sort(array, axis=2, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array, 2))\n assert ak.to_list(\n ak.argsort(array, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array, 1))\n assert ak.to_list(\n ak.sort(array, axis=1, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array, 1))\n\n assert ak.to_list(ak.sort(array, axis=1, ascending=False, stable=False)) == [\n [\n [11.11, 12.12, 13.13, 14.14, 15.15],\n [6.6, 7.7, 8.8, 9.9, 10.1],\n [1.1, 2.2, 3.3, 4.4, 5.5],\n ],\n [\n [-1.1, -2.2, -3.3, -4.4, -5.5],\n [-6.6, -7.7, -8.8, -9.9, -10.1],\n [-11.11, -12.12, -13.13, -14.14, -15.15],\n ],\n ]\n\n assert ak.to_list(\n ak.sort(array, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.sort(array, 0))\n assert ak.to_list(\n ak.argsort(array, axis=0, ascending=True, stable=False)\n ) == ak.to_list(np.argsort(array, 0))\n\n\ndef test_RecordArray():\n array = ak.Array(\n [\n {\"x\": 0.0, \"y\": []},\n {\"x\": 1.1, \"y\": [1]},\n {\"x\": 2.2, \"y\": [2, 2]},\n {\"x\": 3.3, \"y\": [3, 3, 3]},\n {\"x\": 4.4, \"y\": [4, 4, 4, 4]},\n {\"x\": 5.5, \"y\": [5, 5, 5]},\n {\"x\": 6.6, \"y\": [6, 6]},\n {\"x\": 7.7, \"y\": [7]},\n {\"x\": 8.8, \"y\": []},\n ]\n )\n assert ak.to_list(array) == [\n {\"x\": 0.0, \"y\": []},\n {\"x\": 1.1, \"y\": [1]},\n {\"x\": 2.2, \"y\": [2, 2]},\n {\"x\": 3.3, \"y\": [3, 3, 3]},\n {\"x\": 4.4, \"y\": [4, 4, 4, 4]},\n {\"x\": 5.5, \"y\": [5, 5, 5]},\n {\"x\": 6.6, \"y\": [6, 6]},\n {\"x\": 7.7, \"y\": [7]},\n {\"x\": 8.8, \"y\": []},\n ]\n\n assert ak.to_list(array.layout.sort(-1, True, False)) == {\n \"x\": [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8],\n \"y\": [[], [1], [2, 2], [3, 3, 3], [4, 4, 4, 4], [5, 5, 5], [6, 6], [7], []],\n }\n\n assert ak.to_list(array.layout.sort(-1, False, False)) == {\n \"x\": [8.8, 7.7, 6.6, 5.5, 4.4, 3.3, 2.2, 1.1, 0.0],\n \"y\": [[], [1], [2, 2], [3, 3, 3], [4, 4, 4, 4], [5, 5, 5], [6, 6], [7], []],\n }\n\n assert ak.to_list(array.layout.argsort(-1, True, False)) == {\n \"x\": [0, 1, 2, 3, 4, 5, 6, 7, 8],\n \"y\": [[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3], [0, 1, 2], [0, 1], [0], []],\n }\n\n assert ak.to_list(array.x.layout.argsort(0, True, False)) == [\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n ]\n assert ak.to_list(array.x.layout.argsort(0, False, False)) == [\n 8,\n 7,\n 6,\n 5,\n 4,\n 3,\n 2,\n 1,\n 0,\n ]\n\n array_y = array.y\n assert ak.to_list(array_y) == [\n [],\n [1],\n [2, 2],\n [3, 3, 3],\n [4, 4, 4, 4],\n [5, 5, 5],\n [6, 6],\n [7],\n [],\n ]\n assert ak.to_list(array.y.layout.argsort(0, True, False)) == [\n # FIXME?\n [],\n [1],\n [2, 2],\n [3, 3, 3],\n [4, 4, 4, 4],\n [5, 5, 5],\n [6, 6],\n [7],\n []\n # [],\n # [0],\n # [1, 0],\n # [2, 1, 0],\n # [3, 2, 1, 0],\n # [4, 3, 2],\n # [5, 4],\n # [6],\n # [],\n ]\n\n assert ak.to_list(array.y.layout.argsort(1, True, True)) == [\n [],\n [0],\n [0, 1],\n [0, 1, 2],\n [0, 1, 2, 3],\n [0, 1, 2],\n [0, 1],\n [0],\n [],\n ]\n\n\ndef test_ByteMaskedArray():\n content = ak.from_iter(\n [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False\n )\n mask = ak.layout.Index8(np.array([0, 0, 1, 1, 0], dtype=np.int8))\n array = ak.layout.ByteMaskedArray(mask, content, valid_when=False)\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=False)) == [\n [0, 0, 0],\n [],\n [2, 2, 2, 2],\n None,\n None,\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n [0.0, 1.1, 2.2],\n [],\n [6.6, 7.7, 8.8, 9.9],\n None,\n None,\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=False, stable=False)) == [\n [6.6, 7.7, 8.8],\n [],\n [0.0, 1.1, 2.2, 9.9],\n None,\n None,\n ]\n\n assert ak.to_list(ak.argsort(array, axis=1, ascending=True, stable=False)) == [\n [0, 1, 2],\n [],\n None,\n None,\n [0, 1, 2, 3],\n ]\n\n assert ak.to_list(array.sort(1, False, False)) == [\n [2.2, 1.1, 0.0],\n [],\n None,\n None,\n [9.9, 8.8, 7.7, 6.6],\n ]\n\n\ndef test_UnionArray():\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter(\n [[\"one\"], [\"two\"], [\"three\"], [\"four\"], [\"five\"]], highlevel=False\n )\n tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n\n with pytest.raises(ValueError) as err:\n ak.sort(array, axis=1, ascending=True, stable=False)\n assert str(err.value).startswith(\"cannot sort UnionArray8_32\")\n\n\ndef test_sort_strings():\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n assert ak.to_list(content1) == [\"one\", \"two\", \"three\", \"four\", \"five\"]\n\n assert ak.to_list(ak.sort(content1, axis=0, ascending=True, stable=False)) == [\n \"five\",\n \"four\",\n \"one\",\n \"three\",\n \"two\",\n ]\n assert ak.to_list(ak.sort(content1, axis=0, ascending=False, stable=False)) == [\n \"two\",\n \"three\",\n \"one\",\n \"four\",\n \"five\",\n ]\n\n\ndef test_sort_bytestrings():\n array = ak.from_iter(\n [b\"one\", b\"two\", b\"three\", b\"two\", b\"two\", b\"one\", b\"three\"], highlevel=False\n )\n assert ak.to_list(array) == [\n b\"one\",\n b\"two\",\n b\"three\",\n b\"two\",\n b\"two\",\n b\"one\",\n b\"three\",\n ]\n\n assert ak.to_list(ak.sort(array, axis=0, ascending=True, stable=False)) == [\n b\"one\",\n b\"one\",\n b\"three\",\n b\"three\",\n b\"two\",\n b\"two\",\n b\"two\",\n ]\n\n assert ak.to_list(ak.argsort(array, axis=0, ascending=True, stable=True)) == [\n 0,\n 5,\n 2,\n 6,\n 1,\n 3,\n 4,\n ]\n\n\ndef test_sort_zero_length_arrays():\n array = ak.layout.IndexedArray64(\n ak.layout.Index64([]), ak.layout.NumpyArray([1, 2, 3])\n )\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n tags = ak.layout.Index8([])\n index = ak.layout.Index32([])\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n content = ak.from_iter(\n [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]], highlevel=False\n )\n mask = ak.layout.Index8([])\n array = ak.layout.ByteMaskedArray(mask, content, valid_when=False)\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n array = ak.layout.NumpyArray([])\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n array = ak.layout.RecordArray([])\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n content = ak.layout.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n starts1 = ak.layout.Index64([])\n stops1 = ak.layout.Index64([])\n offsets1 = ak.layout.Index64(np.array([0]))\n array = ak.layout.ListArray64(starts1, stops1, content)\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n\n array = ak.layout.ListOffsetArray64(offsets1, content)\n assert ak.to_list(array) == []\n assert ak.to_list(ak.sort(array)) == []\n assert ak.to_list(ak.argsort(array)) == []\n" ]
[ [ "numpy.array", "numpy.asarray" ], [ "numpy.array", "numpy.sort", "numpy.argsort" ] ]
naoyam/lbann
[ "d30e053b6f86d1cf8cca1d61c94bbbdbfc4945c4" ]
[ "bamboo/unit_tests/test_unit_layer_squared_difference.py" ]
[ "import functools\nimport operator\nimport os\nimport os.path\nimport sys\nimport numpy as np\n\n# Bamboo utilities\ncurrent_file = os.path.realpath(__file__)\ncurrent_dir = os.path.dirname(current_file)\nsys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))\nimport tools\n\n# ==============================================\n# Objects for Python data reader\n# ==============================================\n# Note: The Python data reader imports this file as a module and calls\n# the functions below to ingest data.\n\n# Data\nnp.random.seed(2019102415)\n_samples = np.random.normal(size=(23,2,7)).astype(np.float32)\n\n# Sample access functions\ndef get_sample(index):\n return _samples[index].reshape(-1)\ndef num_samples():\n return _samples.shape[0]\ndef sample_dims():\n return (2*_samples.shape[-1],)\n\n# ==============================================\n# Setup LBANN experiment\n# ==============================================\n\ndef setup_experiment(lbann):\n \"\"\"Construct LBANN experiment.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n trainer = lbann.Trainer()\n model = construct_model(lbann)\n data_reader = construct_data_reader(lbann)\n optimizer = lbann.NoOptimizer()\n return trainer, model, data_reader, optimizer\n\ndef construct_model(lbann):\n \"\"\"Construct LBANN model.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Input data\n # Note: Sum with weights layers so that gradient checking will\n # verify that error signals are correct.\n slice_size = _samples.shape[-1]\n x0_weights = lbann.Weights(optimizer=lbann.SGD(),\n initializer=lbann.ConstantInitializer(value=0.0),\n name='input0_weights')\n x1_weights = lbann.Weights(optimizer=lbann.SGD(),\n initializer=lbann.ConstantInitializer(value=0.0),\n name='input1_weights')\n x_slice = lbann.Slice(lbann.Input(),\n slice_points=tools.str_list([0, slice_size, 2*slice_size]))\n x0 = lbann.Sum(x_slice,\n lbann.WeightsLayer(weights=x0_weights, dims=str(slice_size)))\n x1 = lbann.Sum(x_slice,\n lbann.WeightsLayer(weights=x1_weights, dims=str(slice_size)))\n x0_lbann = x0\n x1_lbann = x1\n\n # Objects for LBANN model\n obj = []\n metrics = []\n callbacks = []\n\n # ------------------------------------------\n # Data-parallel layout\n # ------------------------------------------\n\n # LBANN implementation\n x0 = x0_lbann\n x1 = x1_lbann\n y = lbann.SquaredDifference(x0, x1, data_layout='data_parallel')\n z = lbann.L2Norm2(y)\n obj.append(z)\n metrics.append(lbann.Metric(z, name='data-parallel layout'))\n\n # NumPy implementation\n vals = []\n for i in range(num_samples()):\n x = get_sample(i).astype(np.float64)\n x0 = x[:slice_size]\n x1 = x[slice_size:]\n y = (x1-x0)**2\n z = tools.numpy_l2norm2(y)\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metrics[-1].name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # ------------------------------------------\n # Model-parallel layout\n # ------------------------------------------\n\n # LBANN implementation\n x0 = x0_lbann\n x1 = x1_lbann\n y = lbann.SquaredDifference(x0, x1, data_layout='model_parallel')\n z = lbann.L2Norm2(y)\n obj.append(z)\n metrics.append(lbann.Metric(z, name='model-parallel layout, unbiased'))\n\n # NumPy implementation\n vals = []\n for i in range(num_samples()):\n x = get_sample(i).astype(np.float64)\n x0 = x[:slice_size]\n x1 = x[slice_size:]\n y = (x1-x0)**2\n z = tools.numpy_l2norm2(y)\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metrics[-1].name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # ------------------------------------------\n # Gradient checking\n # ------------------------------------------\n\n callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))\n\n # ------------------------------------------\n # Construct model\n # ------------------------------------------\n\n mini_batch_size = num_samples() // 2\n num_epochs = 0\n return lbann.Model(mini_batch_size,\n num_epochs,\n layers=lbann.traverse_layer_graph(x0_lbann),\n objective_function=obj,\n metrics=metrics,\n callbacks=callbacks)\n\ndef construct_data_reader(lbann):\n \"\"\"Construct Protobuf message for Python data reader.\n\n The Python data reader will import the current Python file to\n access the sample access functions.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Note: The training data reader should be removed when\n # https://github.com/LLNL/lbann/issues/1098 is resolved.\n message = lbann.reader_pb2.DataReader()\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'train'\n )\n ])\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'test'\n )\n ])\n return message\n\n# ==============================================\n# Setup PyTest\n# ==============================================\n\n# Create test functions that can interact with PyTest\nfor test in tools.create_tests(setup_experiment, __file__):\n globals()[test.__name__] = test\n" ]
[ [ "numpy.random.normal", "numpy.finfo", "numpy.random.seed", "numpy.mean" ] ]
hizb-resume/LTD-local-track-to-detect-for-VID
[ "7147ac7c6cd4b22a956aaaabaa151e5ed5410c68" ]
[ "projects/adnet/mains/ADNet2.py" ]
[ "import _init_paths\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nfrom trainers.adnet_train_sl import adnet_train_sl\nimport argparse\nfrom options.general2 import opts\nfrom models.ADNet import adnet\nfrom utils.get_train_videos import get_train_videos\nfrom trainers.adnet_train_rl import adnet_train_rl\nimport torch\ntorch.multiprocessing.set_start_method('spawn', force=True)\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport os\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\nparser = argparse.ArgumentParser(\n description='ADNet training')\n# parser.add_argument('--resume', default='weights/ADNet_SL_backup.pth', type=str, help='Resume from checkpoint')\n# parser.add_argument('--resume', default='weights/ADNet_RL_2epoch8_backup.pth', type=str, help='Resume from checkpoint')\n# parser.add_argument('--resume', default='weights/ADNet_SL_epoch27_final.pth', type=str, help='Resume from checkpoint')\nparser.add_argument('--resume', default='models/weights_mul_step3_new/ADNet_SL_.pth', type=str, help='Resume from checkpoint')\nparser.add_argument('--num_workers', default=6, type=int, help='Number of workers used in dataloading')\nparser.add_argument('--start_iter', default=2, type=int, help='Begin counting iterations starting from this value (should be used with resume)')\nparser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')\nparser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')\nparser.add_argument('--visualize', default=True, type=str2bool, help='Use tensorboardx to for loss visualization')\nparser.add_argument('--send_images_to_visualization', type=str2bool, default=False, help='Sample a random image from each 10th batch, send it to visdom after augmentations step')\nparser.add_argument('--save_folder', default='models/weights_del', help='Location to save checkpoint models')\nparser.add_argument('--tensorlogdir', default='logs/tensorboardx_log_del', help='Location to save tensorboardx_log')\nparser.add_argument('--train_consecutive', default=False, type=str2bool, help='Whether to train consecutive frames')\nparser.add_argument('--train_mul_step', default=False, type=str2bool, help='Whether to train multiple steps')\n\nparser.add_argument('--save_file', default='ADNet_SL_', type=str, help='save file part of file name for SL')\nparser.add_argument('--save_file_RL', default='ADNet_RL_', type=str, help='save file part of file name for RL')\nparser.add_argument('--start_epoch', default=0, type=int, help='Begin counting epochs starting from this value')\n\nparser.add_argument('--run_supervised', default=True, type=str2bool, help='Whether to run supervised learning or not')\n\nparser.add_argument('--multidomain', default=False, type=str2bool, help='Separating weight for each videos (default) or not')\n\nparser.add_argument('--save_result_images', default=False, type=str2bool, help='Whether to save the results or not. Save folder: images/')\nparser.add_argument('--display_images', default=False, type=str2bool, help='Whether to display images or not')\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n # Supervised Learning part\n if args.run_supervised:\n #opts['minibatch_size'] = 128\n opts['minibatch_size'] = 256\n # train with supervised learning\n _, _, train_videos = adnet_train_sl(args, opts)\n args.resume = os.path.join(args.save_folder, args.save_file) + '.pth'\n\n # reinitialize the network with network from SL\n net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=True,\n multidomain=args.multidomain)\n\n args.start_epoch = 0\n args.start_iter = 0\n\n else:\n assert args.resume is not None, \\\n \"Please put result of supervised learning or reinforcement learning with --resume (filename)\"\n train_videos = get_train_videos(opts)\n if train_videos == None:\n opts['num_videos'] = 1\n else:\n opts['num_videos'] = len(train_videos['video_names'])\n\n if args.start_iter == 0: # means the weight came from the SL\n # net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=True, multidomain=args.multidomain)\n net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=False,\n multidomain=args.multidomain)\n else: # resume the adnet\n net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=False, multidomain=args.multidomain)\n\n if args.cuda:\n net = nn.DataParallel(net)\n cudnn.benchmark = True\n\n net = net.cuda()\n\n # Reinforcement Learning part\n #opts['minibatch_size'] = 32\n opts['minibatch_size'] = 128\n\n net = adnet_train_rl(net, domain_specific_nets, train_videos, opts, args)\n\n\n\n" ]
[ [ "torch.nn.DataParallel", "torch.multiprocessing.set_start_method", "tensorflow.compat.v1.logging.set_verbosity" ] ]
ruclion/Fantasy_Mix-Lingual_Tacotron_Version_7_NOVAE-Phoneme-HCSI-NOLID_DBMIX
[ "baa4c8c3f3ba80acf68cec88aed53084a97c8aa1" ]
[ "tacotron/models/tacotron.py" ]
[ "import tensorflow as tf \nfrom tacotron.utils.symbols import symbols\nfrom tacotron.utils.symbols import tone_stress_symbols_max_no\nfrom tacotron.utils.symbols import symbols_tag\nfrom infolog import log\nfrom tacotron.models.helpers import TacoTrainingHelper, TacoTestHelper\nfrom tacotron.models.modules import *\nfrom tensorflow.contrib.seq2seq import dynamic_decode\nfrom tacotron.models.Architecture_wrappers import TacotronEncoderCell, VAECell, TacotronDecoderCell\nfrom tacotron.models.custom_decoder import CustomDecoder\nfrom tacotron.models.attention import LocationSensitiveAttention\n\nimport numpy as np\n\nassert symbols_tag == 'MIX_Phoneme_Version'\n\ndef split_func(x, split_pos):\n\trst = []\n\tstart = 0\n\t# x will be a numpy array with the contents of the placeholder below\n\tfor i in range(split_pos.shape[0]):\n\t\trst.append(x[:,start:start+split_pos[i]])\n\t\tstart += split_pos[i]\n\treturn rst\n\nclass Tacotron():\n\t\"\"\"Tacotron-2 Feature prediction Model.\n\t\"\"\"\n\tdef __init__(self, hparams):\n\t\tself._hparams = hparams\n\n\tdef initialize(self, inputs, inputs_tone_stress, speaker_labels, language_labels, input_lengths, mel_targets=None, stop_token_targets=None, linear_targets=None, targets_lengths=None, gta=False,\n\t\t\tglobal_step=None, is_training=False, is_evaluating=False, split_infos=None):\n\t\t\"\"\"\n\t\tInitializes the model for inference\n\t\tsets \"mel_outputs\" and \"alignments\" fields.\n\t\tArgs:\n\t\t\t- inputs: int32 Tensor with shape [N, T_in] where N is batch size, T_in is number of\n\t\t\t steps in the input time series, and values are character IDs\n\t\t\t speaker_labels: note the speaker id\n\t\t\t language_labels:note the language id\n\t\t\t- input_lengths: int32 Tensor with shape [N] where N is batch size and values are the lengths\n\t\t\tof each sequence in inputs.\n\t\t\t- mel_targets: float32 Tensor with shape [N, T_out, M] where N is batch size, T_out is number\n\t\t\tof steps in the output time series, M is num_mels, and values are entries in the mel\n\t\t\tspectrogram. Only needed for training.\n\t\t\"\"\"\n\t\tif mel_targets is None and stop_token_targets is not None:\n\t\t\traise ValueError('no multi targets were provided but token_targets were given')\n\t\tif mel_targets is not None and stop_token_targets is None and not gta:\n\t\t\traise ValueError('Mel targets are provided without corresponding token_targets')\n\t\tif not gta and self._hparams.predict_linear==True and linear_targets is None and is_training:\n\t\t\traise ValueError('Model is set to use post processing to predict linear spectrograms in training but no linear targets given!')\n\t\tif gta and linear_targets is not None:\n\t\t\traise ValueError('Linear spectrogram prediction is not supported in GTA mode!')\n\t\tif is_training and self._hparams.mask_decoder and targets_lengths is None:\n\t\t\traise RuntimeError('Model set to mask paddings but no targets lengths provided for the mask!')\n\t\tif is_training and is_evaluating:\n\t\t\traise RuntimeError('Model can not be in training and evaluation modes at the same time!')\n\n\t\t# self.inputs_printout = inputs\n\t\t# self.inputs_tone_stress_printout = inputs_tone_stress\n\n\t\tsplit_device = '/cpu:0' if self._hparams.tacotron_num_gpus > 1 or self._hparams.split_on_cpu else '/gpu:{}'.format(self._hparams.tacotron_gpu_start_idx)\n\t\twith tf.device(split_device):\n\t\t\thp = self._hparams\n\t\t\tlout_int = [tf.int32]*hp.tacotron_num_gpus\n\t\t\tlout_float = [tf.float32]*hp.tacotron_num_gpus\n\n\t\t\ttower_input_lengths = tf.split(input_lengths, num_or_size_splits=hp.tacotron_num_gpus, axis=0)\n\t\t\ttower_targets_lengths = tf.split(targets_lengths, num_or_size_splits=hp.tacotron_num_gpus, axis=0) if targets_lengths is not None else targets_lengths\n\t\t\ttower_speaker_labels = tf.split(speaker_labels, num_or_size_splits=hp.tacotron_num_gpus, axis=0)\n\t\t\ttower_language_labels = tf.split(language_labels, num_or_size_splits=hp.tacotron_num_gpus, axis=0)\n\n\t\t\tp_inputs = tf.py_func(split_func, [inputs, split_infos[:, 0]], lout_int)\n\t\t\tp_inputs_tone_stress = tf.py_func(split_func, [inputs_tone_stress, split_infos[:, 0]], lout_int)\n\t\t\tp_mel_targets = tf.py_func(split_func, [mel_targets, split_infos[:,1]], lout_float) if mel_targets is not None else mel_targets\n\t\t\tp_stop_token_targets = tf.py_func(split_func, [stop_token_targets, split_infos[:,2]], lout_float) if stop_token_targets is not None else stop_token_targets\n\t\t\tp_linear_targets = tf.py_func(split_func, [linear_targets, split_infos[:, 3]], lout_float) if linear_targets is not None else linear_targets\n\n\t\t\ttower_inputs = []\n\t\t\ttower_inputs_tone_stress = []\n\t\t\ttower_mel_targets = []\n\t\t\ttower_stop_token_targets = []\n\t\t\ttower_linear_targets = []\n\n\n\t\t\tbatch_size = tf.shape(inputs)[0]\n\t\t\tmel_channels = hp.num_mels\n\t\t\tlinear_channels = hp.num_freq\n\t\t\tfor i in range (hp.tacotron_num_gpus):\n\t\t\t\ttower_inputs.append(tf.reshape(p_inputs[i], [batch_size, -1]))\n\t\t\t\ttower_inputs_tone_stress.append(tf.reshape(p_inputs_tone_stress[i], [batch_size, -1]))\n\t\t\t\tif p_mel_targets is not None:\n\t\t\t\t\ttower_mel_targets.append(tf.reshape(p_mel_targets[i], [batch_size, -1, mel_channels]))\n\t\t\t\tif p_stop_token_targets is not None:\n\t\t\t\t\ttower_stop_token_targets.append(tf.reshape(p_stop_token_targets[i], [batch_size, -1]))\n\t\t\t\tif p_linear_targets is not None:\n\t\t\t\t\ttower_linear_targets.append(tf.reshape(p_linear_targets[i], [batch_size, -1, linear_channels]))\n\n\t\tself.tower_decoder_output = []\n\t\tself.tower_alignments = []\n\t\tself.tower_stop_token_prediction = []\n\t\tself.tower_mel_outputs = []\n\t\tself.tower_linear_outputs = []\n\t\tself.tower_predict_speaker_labels = []\n\n\t\t# 添加分别的phoneme embedding和 声调重读embedding 和 concat的inputs embedding\n\t\ttower_embedded_inputs_phoneme = []\n\t\ttower_embedded_inputs_tone_stress = []\n\t\ttower_embedded_inputs_concat = []\n\t\ttower_enc_conv_output_shape = []\n\t\ttower_encoder_outputs = []\n\t\ttower_residual = []\n\t\ttower_projected_residual = []\n\n\t\t# 1. Declare GPU Devices\n\t\tgpus = [\"/gpu:{}\".format(i) for i in range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx+hp.tacotron_num_gpus)]\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\twith tf.device(tf.train.replica_device_setter(ps_tasks=1,ps_device=\"/cpu:0\",worker_device=gpus[i])):\n\t\t\t\twith tf.variable_scope('inference') as scope:\n\t\t\t\t\tassert hp.tacotron_teacher_forcing_mode in ('constant', 'scheduled')\n\t\t\t\t\tif hp.tacotron_teacher_forcing_mode == 'scheduled' and is_training:\n\t\t\t\t\t\tassert global_step is not None\n\n\t\t\t\t\t#GTA is only used for predicting mels to train Wavenet vocoder, so we ommit post processing when doing GTA synthesis\n\t\t\t\t\tpost_condition = hp.predict_linear and not gta\n\n\t\t\t\t\t# tf.print(tower_inputs[i])\n\t\t\t\t\t# tf.print(tower_inputs[i])\n\n\t\t\t\t\t# phoneme Embeddings ==> [batch_size, sequence_length, embedding_dim], 512\n\t\t\t\t\tself.phoneme_embedding_table = tf.get_variable(\n\t\t\t\t\t\t'inputs_phoneme_embedding', [len(symbols), hp.phoneme_embedding_dim], dtype=tf.float32)\n\t\t\t\t\tembedded_inputs_phoneme = tf.nn.embedding_lookup(self.phoneme_embedding_table, tower_inputs[i])\n\n\t\t\t\t\t# tone and stress Embeddings ==> [batch_size, sequence_length, embedding_dim], 16\n\t\t\t\t\tself.tone_stress_embedding_table = tf.get_variable(\n\t\t\t\t\t\t'inputs_tone_stress_embedding', [tone_stress_symbols_max_no, hp.tone_stress_embedding_dim], dtype=tf.float32)\n\t\t\t\t\tembedded_inputs_tone_stress = tf.nn.embedding_lookup(self.tone_stress_embedding_table, tower_inputs_tone_stress[i])\n\n\t\t\t\t\t# 拼接, 512 + 16\n\t\t\t\t\tembedded_inputs_concat = tf.concat([embedded_inputs_phoneme, embedded_inputs_tone_stress], axis=-1)\n\n\n\n\t\t\t\t\tself.speaker_embedding_table = tf.get_variable(\n\t\t\t\t\t\t'speaker_embedding', [hp.speaker_num, hp.speaker_dim], dtype=tf.float32)\n\t\t\t\t\tembedded_speaker_label = tf.nn.embedding_lookup(self.speaker_embedding_table, tower_speaker_labels[i])\n\t\t\t\t\t\n\t\t\t\t\t# phoneme天然分开语言, 先不使用LID\n\t\t\t\t\t# self.language_embedding_table = tf.get_variable(\n\t\t\t\t\t# \t'language_embedding', [hp.language_num, hp.language_dim], dtype=tf.float32)\n\t\t\t\t\t# embedded_language_label = tf.nn.embedding_lookup(self.language_embedding_table, tower_language_labels[i])\n\n\t\t\t\t\t#Encoder Cell ==> [batch_size, encoder_steps, encoder_lstm_units]\n\t\t\t\t\tencoder_cell = TacotronEncoderCell(\n\t\t\t\t\t\tEncoderConvolutions(is_training, hparams=hp, scope='encoder_convolutions'),\n\t\t\t\t\t\tEncoderRNN(is_training, size=hp.encoder_lstm_units,\n\t\t\t\t\t\t\tzoneout=hp.tacotron_zoneout_rate, scope='encoder_LSTM'))\n\n\t\t\t\t\tencoder_outputs = encoder_cell(embedded_inputs_concat, tower_input_lengths[i])\n\n\t\t\t\t\t#For shape visualization purpose\n\t\t\t\t\tenc_conv_output_shape = encoder_cell.conv_output_shape\n\n\t\t\t\t\t# Adversarial Speaker-Classifiers,\tinput:encoder_output,output:predicted speaker_label\n\t\t\t\t\tspeaker_classify = Speaker_Classifier(is_training, layer_size=hp.softmax_hidden_layer,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t speaker_size=hp.speaker_num)\n\t\t\t\t\tpredict_speaker_labels = speaker_classify(encoder_outputs, hp.grad_rev_scale)\n\n\t\t\t\t\t# # Variational AutoEncoder\n\t\t\t\t\t# if is_training:\n\t\t\t\t\t# \tVAE_cell = VAECell(VAEConvolutions(is_training, hparams=hp, scope='VAE_convolutions'),\n\t\t\t\t\t# \t\t\t\t\t VAERNN(is_training, layers=hp.VAE_lstm_num_layers,\n\t\t\t\t\t# \t\t\t\t\t\t\t size=hp.VAE_lstm_layer_size,\n\t\t\t\t\t# \t\t\t\t\t\t\t zoneout=hp.tacotron_zoneout_rate, scope='VAE_LSTM'), hp.VAE_pool_size, hp.VAE_D_size)\n\t\t\t\t\t# \tresidual_encoding, self.kl_div, self.D_mean, self.D_var = VAE_cell(tower_mel_targets[i], hp.tacotron_batch_size)\n\n\t\t\t\t\t# elif is_evaluating:\n\t\t\t\t\t# \tresidual_encoding,self.kl_div = tf.zeros([hp.tacotron_batch_size, hp.VAE_D_size], dtype=tf.float32), 0\n\t\t\t\t\t# else:\n\t\t\t\t\t# \tresidual_encoding = tf.zeros([hp.tacotron_synthesis_batch_size, hp.VAE_D_size],\n\t\t\t\t\t# \t\t\t\t\t\t\t\t dtype=tf.float32)\n\t\t\t\t\t# self.residual_encoding=residual_encoding\n\n\t\t\t\t\t#Decoder Parts\n\t\t\t\t\t#Attention Decoder Prenet\n\t\t\t\t\tprenet = Prenet(is_training, layers_sizes=hp.prenet_layers, drop_rate=hp.tacotron_dropout_rate, scope='decoder_prenet')\n\t\t\t\t\t#Attention Mechanism\n\t\t\t\t\tattention_mechanism = LocationSensitiveAttention(hp.attention_dim, encoder_outputs, hparams=hp,\n\t\t\t\t\t\tmask_encoder=hp.mask_encoder, memory_sequence_length=tf.reshape(tower_input_lengths[i], [-1]), smoothing=hp.smoothing,\n\t\t\t\t\t\tcumulate_weights=hp.cumulative_weights)\n\t\t\t\t\t#Decoder LSTM Cells\n\t\t\t\t\tdecoder_lstm = DecoderRNN(is_training, layers=hp.decoder_layers,\n\t\t\t\t\t\tsize=hp.decoder_lstm_units, zoneout=hp.tacotron_zoneout_rate, scope='decoder_LSTM')\n\t\t\t\t\t#Frames Projection layer\n\t\t\t\t\tframe_projection = FrameProjection(hp.num_mels * hp.outputs_per_step, scope='linear_transform_projection')\n\t\t\t\t\t#<stop_token> projection layer\n\t\t\t\t\tstop_projection = StopProjection(is_training or is_evaluating, shape=hp.outputs_per_step, scope='stop_token_projection')\n\n\t\t\t\t\t#Decoder Cell ==> [batch_size, decoder_steps, num_mels * r] (after decoding)\n\t\t\t\t\tdecoder_cell = TacotronDecoderCell(\n\t\t\t\t\t\tprenet,\n\t\t\t\t\t\tattention_mechanism,\n\t\t\t\t\t\tdecoder_lstm,\n\t\t\t\t\t\tembedded_speaker_label,\n\t\t\t\t\t\t# embedded_language_label,\n\t\t\t\t\t\t# residual_encoding,\n\t\t\t\t\t\tframe_projection,\n\t\t\t\t\t\tstop_projection)\n\n\n\t\t\t\t\t#Define the helper for our decoder\n\t\t\t\t\tif is_training or is_evaluating or gta:\n\t\t\t\t\t\tself.helper = TacoTrainingHelper(batch_size, tower_mel_targets[i], hp, gta, is_evaluating, global_step)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.helper = TacoTestHelper(batch_size, hp)\n\n\n\t\t\t\t\t#initial decoder state\n\t\t\t\t\tdecoder_init_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32)\n\n\t\t\t\t\t#Only use max iterations at synthesis time\n\t\t\t\t\tmax_iters = hp.max_iters if not (is_training or is_evaluating) else None\n\n\t\t\t\t\t#Decode\n\t\t\t\t\t(frames_prediction, stop_token_prediction, _), final_decoder_state, _ = dynamic_decode(\n\t\t\t\t\t\tCustomDecoder(decoder_cell, self.helper, decoder_init_state),\n\t\t\t\t\t\timpute_finished=False,\n\t\t\t\t\t\tmaximum_iterations=max_iters,\n\t\t\t\t\t\tswap_memory=hp.tacotron_swap_with_cpu)\n\n\n\t\t\t\t\t# Reshape outputs to be one output per entry \n\t\t\t\t\t#==> [batch_size, non_reduced_decoder_steps (decoder_steps * r), num_mels]\n\t\t\t\t\tdecoder_output = tf.reshape(frames_prediction, [batch_size, -1, hp.num_mels])\n\t\t\t\t\tstop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])\n\n\t\t\t\t\t#Postnet\n\t\t\t\t\tpostnet = Postnet(is_training, hparams=hp, scope='postnet_convolutions')\n\n\t\t\t\t\t#Compute residual using post-net ==> [batch_size, decoder_steps * r, postnet_channels]\n\t\t\t\t\tresidual = postnet(decoder_output)\n\n\t\t\t\t\t#Project residual to same dimension as mel spectrogram \n\t\t\t\t\t#==> [batch_size, decoder_steps * r, num_mels]\n\t\t\t\t\tresidual_projection = FrameProjection(hp.num_mels, scope='postnet_projection')\n\t\t\t\t\tprojected_residual = residual_projection(residual)\n\n\n\t\t\t\t\t#Compute the mel spectrogram\n\t\t\t\t\tmel_outputs = decoder_output + projected_residual\n\n\n\t\t\t\t\tif post_condition:\n\t\t\t\t\t\t# Add post-processing CBHG. This does a great job at extracting features from mels before projection to Linear specs.\n\t\t\t\t\t\tpost_cbhg = CBHG(hp.cbhg_kernels, hp.cbhg_conv_channels, hp.cbhg_pool_size, [hp.cbhg_projection, hp.num_mels],\n\t\t\t\t\t\t\thp.cbhg_projection_kernel_size, hp.cbhg_highwaynet_layers, \n\t\t\t\t\t\t\thp.cbhg_highway_units, hp.cbhg_rnn_units, is_training, name='CBHG_postnet')\n\n\t\t\t\t\t\t#[batch_size, decoder_steps(mel_frames), cbhg_channels]\n\t\t\t\t\t\tpost_outputs = post_cbhg(mel_outputs, None)\n\n\t\t\t\t\t\t#Linear projection of extracted features to make linear spectrogram\n\t\t\t\t\t\tlinear_specs_projection = FrameProjection(hp.num_freq, scope='cbhg_linear_specs_projection')\n\n\t\t\t\t\t\t#[batch_size, decoder_steps(linear_frames), num_freq]\n\t\t\t\t\t\tlinear_outputs = linear_specs_projection(post_outputs)\n\n\t\t\t\t\t#Grab alignments from the final decoder state\n\t\t\t\t\talignments = tf.transpose(final_decoder_state.alignment_history.stack(), [1, 2, 0])\n\n\t\t\t\t\tself.tower_decoder_output.append(decoder_output)\n\t\t\t\t\tself.tower_alignments.append(alignments)\n\t\t\t\t\tself.tower_stop_token_prediction.append(stop_token_prediction)\n\t\t\t\t\tself.tower_mel_outputs.append(mel_outputs)\n\t\t\t\t\tself.tower_predict_speaker_labels.append(predict_speaker_labels)\n\t\t\t\t\ttower_embedded_inputs_phoneme.append(embedded_inputs_phoneme)\n\t\t\t\t\ttower_embedded_inputs_tone_stress.append(embedded_inputs_tone_stress)\n\t\t\t\t\ttower_embedded_inputs_concat.append(embedded_inputs_concat)\n\t\t\t\t\ttower_enc_conv_output_shape.append(enc_conv_output_shape)\n\t\t\t\t\ttower_encoder_outputs.append(encoder_outputs)\n\t\t\t\t\ttower_residual.append(residual)\n\t\t\t\t\ttower_projected_residual.append(projected_residual)\n\n\n\t\t\t\t\tif post_condition:\n\t\t\t\t\t\tself.tower_linear_outputs.append(linear_outputs)\n\t\t\tlog('initialisation done {}'.format(gpus[i]))\n\n\n\t\tif is_training:\n\t\t\tself.ratio = self.helper._ratio\n\t\tself.tower_inputs = tower_inputs\n\t\tself.tower_inputs_tone_stress = tower_inputs_tone_stress\n\t\tself.tower_input_lengths = tower_input_lengths\n\t\tself.tower_mel_targets = tower_mel_targets\n\t\tself.tower_linear_targets = tower_linear_targets\n\t\tself.tower_targets_lengths = tower_targets_lengths\n\t\tself.tower_stop_token_targets = tower_stop_token_targets\n\t\tself.tower_speaker_labels = tower_speaker_labels\n\t\tself.tower_language_labels = tower_language_labels\n\t\tself.all_vars = tf.trainable_variables()\n\n\t\tlog('Initialized Tacotron model. Dimensions (? = dynamic shape): ')\n\t\tlog(' Train mode: {}'.format(is_training))\n\t\tlog(' Eval mode: {}'.format(is_evaluating))\n\t\tlog(' GTA mode: {}'.format(gta))\n\t\tlog(' Synthesis mode: {}'.format(not (is_training or is_evaluating)))\n\t\tlog(' Input: {}'.format(inputs.shape))\n\t\tfor i in range(hp.tacotron_num_gpus+hp.tacotron_gpu_start_idx):\n\t\t\tlog(' device: {}'.format(i))\n\t\t\tlog(' phoneme embedding: {}'.format(tower_embedded_inputs_phoneme[i].shape))\n\t\t\tlog(' tone stress embedding: {}'.format(tower_embedded_inputs_tone_stress[i].shape))\n\t\t\tlog(' concat embedding: {}'.format(tower_embedded_inputs_concat[i].shape))\n\t\t\tlog(' enc conv out: {}'.format(tower_enc_conv_output_shape[i]))\n\t\t\tlog(' encoder out: {}'.format(tower_encoder_outputs[i].shape))\n\t\t\tlog(' decoder out: {}'.format(self.tower_decoder_output[i].shape))\n\t\t\tlog(' residual out: {}'.format(tower_residual[i].shape))\n\t\t\tlog(' projected residual out: {}'.format(tower_projected_residual[i].shape))\n\t\t\tlog(' mel out: {}'.format(self.tower_mel_outputs[i].shape))\n\t\t\tif post_condition:\n\t\t\t\tlog(' linear out: {}'.format(self.tower_linear_outputs[i].shape))\n\t\t\tlog(' <stop_token> out: {}'.format(self.tower_stop_token_prediction[i].shape))\n\n\t\t\t#1_000_000 is causing syntax problems for some people?! Python please :)\n\t\t\tlog(' Tacotron Parameters {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.all_vars]) / 1000000))\n\n\tdef add_loss(self):\n\t\t'''Adds loss to the model. Sets \"loss\" field. initialize must have been called.'''\n\t\thp = self._hparams\n\n\t\tself.tower_before_loss = []\n\t\tself.tower_after_loss= []\n\t\tself.tower_stop_token_loss = []\n\t\tself.tower_regularization_loss = []\n\t\tself.tower_linear_loss = []\n\t\tself.tower_adversarial_loss = []\n\t\tself.tower_loss = []\n\n\t\ttotal_before_loss = 0\n\t\ttotal_after_loss= 0\n\t\ttotal_stop_token_loss = 0\n\t\ttotal_regularization_loss = 0\n\t\ttotal_linear_loss = 0\n\t\ttotal_adversarial_loss = 0\n\t\ttotal_loss = 0\n\n\t\tgpus = [\"/gpu:{}\".format(i) for i in range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx+hp.tacotron_num_gpus)]\n\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\twith tf.device(tf.train.replica_device_setter(ps_tasks=1,ps_device=\"/cpu:0\",worker_device=gpus[i])):\n\t\t\t\twith tf.variable_scope('loss') as scope:\n\t\t\t\t\tif hp.mask_decoder:\n\t\t\t\t\t\t# Compute loss of predictions before postnet\n\t\t\t\t\t\tbefore = MaskedMSE(self.tower_mel_targets[i], self.tower_decoder_output[i], self.tower_targets_lengths[i],\n\t\t\t\t\t\t\thparams=self._hparams)\n\t\t\t\t\t\t# Compute loss after postnet\n\t\t\t\t\t\tafter = MaskedMSE(self.tower_mel_targets[i], self.tower_mel_outputs[i], self.tower_targets_lengths[i],\n\t\t\t\t\t\t\thparams=self._hparams)\n\t\t\t\t\t\t#Compute <stop_token> loss (for learning dynamic generation stop)\n\t\t\t\t\t\tstop_token_loss = MaskedSigmoidCrossEntropy(self.tower_stop_token_targets[i],\n\t\t\t\t\t\t\tself.tower_stop_token_prediction[i], self.tower_targets_lengths[i], hparams=self._hparams)\n\t\t\t\t\t\t#Compute masked linear loss\n\t\t\t\t\t\tif hp.predict_linear:\n\t\t\t\t\t\t\t#Compute Linear L1 mask loss (priority to low frequencies)\n\t\t\t\t\t\t\tlinear_loss = MaskedLinearLoss(self.tower_linear_targets[i], self.tower_linear_outputs[i],\n\t\t\t\t\t\t\t\tself.targets_lengths, hparams=self._hparams)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlinear_loss=0.\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Compute loss of predictions before postnet\n\t\t\t\t\t\tbefore = tf.losses.mean_squared_error(self.tower_mel_targets[i], self.tower_decoder_output[i])\n\t\t\t\t\t\t# Compute loss after postnet\n\t\t\t\t\t\tafter = tf.losses.mean_squared_error(self.tower_mel_targets[i], self.tower_mel_outputs[i])\n\t\t\t\t\t\t#Compute <stop_token> loss (for learning dynamic generation stop)\n\t\t\t\t\t\tstop_token_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n\t\t\t\t\t\t\tlabels=self.tower_stop_token_targets[i],\n\t\t\t\t\t\t\tlogits=self.tower_stop_token_prediction[i]))\n\t\t\t\t\t\tspeaker_loss=tf.losses\n\t\t\t\t\t\tif hp.predict_linear:\n\t\t\t\t\t\t\t#Compute linear loss\n\t\t\t\t\t\t\t#From https://github.com/keithito/tacotron/blob/tacotron2-work-in-progress/models/tacotron.py\n\t\t\t\t\t\t\t#Prioritize loss for frequencies under 2000 Hz.\n\t\t\t\t\t\t\tl1 = tf.abs(self.tower_linear_targets[i] - self.tower_linear_outputs[i])\n\t\t\t\t\t\t\tn_priority_freq = int(2000 / (hp.sample_rate * 0.5) * hp.num_freq)\n\t\t\t\t\t\t\tlinear_loss = 0.5 * tf.reduce_mean(l1) + 0.5 * tf.reduce_mean(l1[:,:,0:n_priority_freq])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlinear_loss = 0.\n\n\t\t\t\t\t# Compute the regularization weight\n\t\t\t\t\tif hp.tacotron_scale_regularization:\n\t\t\t\t\t\treg_weight_scaler = 1. / (2 * hp.max_abs_value) if hp.symmetric_mels else 1. / (hp.max_abs_value)\n\t\t\t\t\t\treg_weight = hp.tacotron_reg_weight * reg_weight_scaler\n\t\t\t\t\telse:\n\t\t\t\t\t\treg_weight = hp.tacotron_reg_weight\n\n\t\t\t\t\t# Regularize variables\n\t\t\t\t\t# Exclude all types of bias, RNN (Bengio et al. On the difficulty of training recurrent neural networks), embeddings and prediction projection layers.\n\t\t\t\t\t# Note that we consider attention mechanism v_a weights as a prediction projection layer and we don't regularize it. (This gave better stability)\n\t\t\t\t\tregularization = tf.add_n([tf.nn.l2_loss(v) for v in self.all_vars\n\t\t\t\t\t\tif not('bias' in v.name or 'Bias' in v.name or '_projection' in v.name or 'inputs_embedding' in v.name\n\t\t\t\t\t\t\tor 'RNN' in v.name or 'LSTM' in v.name)]) * reg_weight\n\t\t\t\t\t# Compute the speaker adversarial training loss\n\t\t\t\t\t# speaker_prediction: predicted speaker label for each time step of input, with shape [N, T_in, speaker_num]\n\t\t\t\t\t# speaker_targets: one-hot speaker label of current input, from shape [N, speaker_num] to [N, 1, speaker_num] to [N, T_in, speaker_num]\n\t\t\t\t\tseq_len = tf.shape(self.tower_predict_speaker_labels[i])[1]\n\t\t\t\t\tspeaker_targets = tf.one_hot(self.tower_speaker_labels[i], hp.speaker_num, dtype=tf.float32)\n\t\t\t\t\tspeaker_targets = tf.tile(tf.reshape(speaker_targets, shape=[-1, 1, hp.speaker_num]),\n\t\t\t\t\t\t\t\t\t\t\t multiples=[1, seq_len, 1])\n\t\t\t\t\tadversarial_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n\t\t\t\t\t\tlabels=speaker_targets,\n\t\t\t\t\t\tlogits=self.tower_predict_speaker_labels[i]))\n\n\t\t\t\t\t# Compute final loss term\n\t\t\t\t\tself.tower_before_loss.append(before)\n\t\t\t\t\tself.tower_after_loss.append(after)\n\t\t\t\t\tself.tower_stop_token_loss.append(stop_token_loss)\n\t\t\t\t\tself.tower_regularization_loss.append(regularization)\n\t\t\t\t\tself.tower_linear_loss.append(linear_loss)\n\t\t\t\t\tself.tower_adversarial_loss.append(adversarial_loss)\n\t\t\t\t\t# loss = before + after + stop_token_loss + regularization + linear_loss + hp.loss_weight * adversarial_loss + self.kl_div\n\t\t\t\t\tloss = before + after + stop_token_loss + regularization + linear_loss + hp.loss_weight * adversarial_loss\n\t\t\t\t\tself.tower_loss.append(loss)\n\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\ttotal_before_loss += self.tower_before_loss[i] \n\t\t\ttotal_after_loss += self.tower_after_loss[i]\n\t\t\ttotal_stop_token_loss += self.tower_stop_token_loss[i]\n\t\t\ttotal_regularization_loss += self.tower_regularization_loss[i]\n\t\t\ttotal_linear_loss += self.tower_linear_loss[i]\n\t\t\ttotal_adversarial_loss +=self.tower_adversarial_loss[i]\n\t\t\ttotal_loss += self.tower_loss[i]\n\n\t\tself.before_loss = total_before_loss / hp.tacotron_num_gpus\n\t\tself.after_loss = total_after_loss / hp.tacotron_num_gpus\n\t\tself.stop_token_loss = total_stop_token_loss / hp.tacotron_num_gpus\n\t\tself.regularization_loss = total_regularization_loss / hp.tacotron_num_gpus\n\t\tself.linear_loss = total_linear_loss / hp.tacotron_num_gpus\n\t\tself.adversarial_loss = total_adversarial_loss / hp.tacotron_num_gpus\n\t\tself.loss = total_loss / hp.tacotron_num_gpus\n\n\tdef add_optimizer(self, global_step):\n\t\t'''Adds optimizer. Sets \"gradients\" and \"optimize\" fields. add_loss must have been called.\n\t\tArgs:\n\t\t\tglobal_step: int32 scalar Tensor representing current global step in training\n\t\t'''\n\t\thp = self._hparams\n\t\ttower_gradients = []\n\n\t\t# 1. Declare GPU Devices\n\t\tgpus = [\"/gpu:{}\".format(i) for i in range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx + hp.tacotron_num_gpus)]\n\n\t\tgrad_device = '/cpu:0' if hp.tacotron_num_gpus > 1 else gpus[0]\n\n\t\twith tf.device(grad_device):\n\t\t\twith tf.variable_scope('optimizer') as scope:\n\t\t\t\tif hp.tacotron_decay_learning_rate:\n\t\t\t\t\tself.decay_steps = hp.tacotron_decay_steps\n\t\t\t\t\tself.decay_rate = hp.tacotron_decay_rate\n\t\t\t\t\tself.learning_rate = self._learning_rate_decay(hp.tacotron_initial_learning_rate, global_step)\n\t\t\t\telse:\n\t\t\t\t\tself.learning_rate = tf.convert_to_tensor(hp.tacotron_initial_learning_rate)\n\n\t\t\t\toptimizer = tf.train.AdamOptimizer(self.learning_rate, hp.tacotron_adam_beta1,\n\t\t\t\t\thp.tacotron_adam_beta2, hp.tacotron_adam_epsilon)\n\n\t\t# 2. Compute Gradient\n\t\tfor i in range(hp.tacotron_num_gpus):\n\t\t\t# Device placement\n\t\t\twith tf.device(tf.train.replica_device_setter(ps_tasks=1,ps_device=\"/cpu:0\",worker_device=gpus[i])) :\n\t\t\t\t#agg_loss += self.tower_loss[i]\n\t\t\t\twith tf.variable_scope('optimizer') as scope:\n\t\t\t\t\tgradients = optimizer.compute_gradients(self.tower_loss[i])\n\t\t\t\t\ttower_gradients.append(gradients)\n\n\t\t# 3. Average Gradient\n\t\twith tf.device(grad_device) :\n\t\t\tavg_grads = []\n\t\t\tvars = []\n\t\t\tfor grad_and_vars in zip(*tower_gradients):\n\t\t\t\t# grads_vars = [(grad1, var), (grad2, var), ...]\n\t\t\t\tgrads = []\n\t\t\t\tfor g,_ in grad_and_vars:\n\t\t\t\t\texpanded_g = tf.expand_dims(g, 0)\n\t\t\t\t\t# Append on a 'tower' dimension which we will average over below.\n\t\t\t\t\tgrads.append(expanded_g)\n\t\t\t\t\t# Average over the 'tower' dimension.\n\t\t\t\tgrad = tf.concat(axis=0, values=grads)\n\t\t\t\tgrad = tf.reduce_mean(grad, 0)\n\n\t\t\t\tv = grad_and_vars[0][1]\n\t\t\t\tavg_grads.append(grad)\n\t\t\t\tvars.append(v)\n\n\t\t\tself.gradients = avg_grads\n\t\t\t#Just for causion\n\t\t\t#https://github.com/Rayhane-mamah/Tacotron-2/issues/11\n\t\t\tif hp.tacotron_clip_gradients:\n\t\t\t\tclipped_gradients, _ = tf.clip_by_global_norm(avg_grads, 1.) # __mark 0.5 refer\n\t\t\telse:\n\t\t\t\tclipped_gradients = avg_grads\n\n\t\t\t# Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:\n\t\t\t# https://github.com/tensorflow/tensorflow/issues/1122\n\t\t\twith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n\t\t\t\tself.optimize = optimizer.apply_gradients(zip(clipped_gradients, vars),\n\t\t\t\t\tglobal_step=global_step)\n\n\tdef _learning_rate_decay(self, init_lr, global_step):\n\t\t#################################################################\n\t\t# Narrow Exponential Decay:\n\n\t\t# Phase 1: lr = 1e-3\n\t\t# We only start learning rate decay after 50k steps\n\n\t\t# Phase 2: lr in ]1e-5, 1e-3[\n\t\t# decay reach minimal value at step 310k\n\n\t\t# Phase 3: lr = 1e-5\n\t\t# clip by minimal learning rate value (step > 310k)\n\t\t#################################################################\n\t\thp = self._hparams\n\n\t\t#Compute natural exponential decay\n\t\tlr = tf.train.exponential_decay(init_lr, \n\t\t\tglobal_step - hp.tacotron_start_decay, #lr = 1e-3 at step 50k\n\t\t\tself.decay_steps, \n\t\t\tself.decay_rate, #lr = 1e-5 around step 310k\n\t\t\tname='lr_exponential_decay')\n\n\n\t\t#clip learning rate by max and min values (initial and final values)\n\t\treturn tf.minimum(tf.maximum(lr, hp.tacotron_final_learning_rate), init_lr)" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.nn.l2_loss", "tensorflow.abs", "tensorflow.one_hot", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.train.replica_device_setter", "tensorflow.split", "tensorflow.clip_by_global_norm", "tensorflow.device", "tensorflow.train.exponential_decay", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.shape", "tensorflow.get_collection", "tensorflow.expand_dims", "tensorflow.losses.mean_squared_error", "tensorflow.nn.embedding_lookup", "tensorflow.py_func", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.trainable_variables", "tensorflow.get_variable", "tensorflow.maximum" ] ]
blthayer/ecen-667
[ "cf609fa230b94e5b98af7afe554250a0824c2e11" ]
[ "hw6.py" ]
[ "import numpy as np\n\n\ndef main():\n p1()\n p2()\n p6()\n\n\ndef p1():\n # Do part 1.\n print('*' * 80)\n print('Problem 8.8, Part 1')\n a1 = np.array([\n [3, 8],\n [2, 3]\n ])\n\n _get_participation(a1)\n\n # Now part 2.\n print('*' * 80)\n print('Problem 8.8, Part 2')\n a2 = np.array([\n [1, 2, 1],\n [0, 3, 1],\n [0, 5, -1]\n ])\n\n _get_participation(a2)\n\n\ndef _get_participation(a1):\n # Get right eigenvectors.\n lambda1, v1 = np.linalg.eig(a1)\n\n # Get left eigenvectors.\n lambda_left, w1 = np.linalg.eig(a1.T)\n\n # Sort so that our eigenvectors line up.\n sort_1 = np.argsort(lambda1)\n sort_2 = np.argsort(lambda_left)\n\n # Check.\n np.testing.assert_allclose(lambda1[sort_1],\n lambda_left[sort_2])\n\n print(f'Eigenvalues: {lambda1[sort_1]}')\n\n v1 = v1[:, sort_1]\n w1 = w1[:, sort_2]\n\n # Scale left eigenvectors so that w_i^t * v_i = 1.\n for idx in range(w1.shape[0]):\n w_i = w1[:, idx]\n v_i = v1[:, idx]\n p = np.matmul(w_i, v_i)\n if p == 0:\n continue\n c = 1 / np.matmul(w_i, v_i)\n w1[:, idx] = w1[:, idx] * c\n\n # Check.\n # Commenting this out since it doesn't work well with values very\n # near zero (e.g. 1e-17).\n # np.testing.assert_allclose(np.matmul(w1.T, v1), np.identity(a1.shape[0]))\n\n # The participation factors are simple elementwise multiplication.\n p_1 = v1 * w1\n print(f'Participation Factors:\\n{p_1}')\n\n\ndef p2():\n print('*' * 80)\n print('Problem 2')\n # Given parameters\n m = 0.0133\n p_m = 0.91\n p_e = 3.24\n\n # Compute delta^s\n d_s = np.arcsin(p_m / p_e)\n\n # Compute V_cr\n v_cr = -p_m * (np.pi - 2 * d_s) + 2 * p_e * np.cos(d_s)\n\n # Initialize variables.\n t = 0\n dt = 0.005\n delta = d_s\n w = 0\n\n # Function for computing w(t)\n def w_t():\n # Consider w_0 to be 0, since we're in the \"delta w\" frame.\n return p_m * t / m\n\n # Function for computing delta(t)\n def d_t():\n # Again, consider w_0 to be 0.\n return 0.5 * p_m * t**2 / m + d_s\n\n # Energy function.\n def v():\n return 0.5 * m * w**2 - p_m * (delta - d_s) - \\\n p_e * (np.cos(delta) - np.cos(d_s))\n\n # Compute initial v\n v_t = v()\n v_list = [v_t]\n i = 0\n while v_t <= v_cr and i < 1000:\n t += dt\n # Compute delta and omega.\n delta = d_t()\n w = w_t()\n\n # Compute energy.\n v_t = v()\n v_list.append(v_t)\n\n i += 1\n\n if i >= 100:\n raise UserWarning('Maxed iterations.')\n\n print(f't_cr: {t:.3f}')\n\n\ndef p6():\n print('*' * 80)\n print('Problem 6')\n # Phase angles of vstab and speed\n vstab = -30.925\n speed = -45.306\n phi_deg = vstab + 360 - speed\n print(f'phi_deg: {phi_deg:.3f}')\n # Convert to radians, subtract 180 degrees, divide by 2.\n phi = (phi_deg - 180) / 2 * np.pi / 180\n\n # Frequency of our mode\n f = 1.67\n\n # Compute alpha\n alpha = (1 - np.sin(phi)) / (1 + np.sin(phi))\n print(f'alpha: {alpha:.3f}')\n\n # Now compute t1 and t2.\n t1 = 1 / (2 * np.pi * f * np.sqrt(alpha))\n t2 = alpha * t1\n print(f't1: {t1:.3f}')\n print(f't2: {t2:.3f}')\n pass\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.sqrt", "numpy.matmul", "numpy.arcsin", "numpy.argsort", "numpy.cos", "numpy.testing.assert_allclose", "numpy.array", "numpy.sin", "numpy.linalg.eig" ] ]
nhutnamhcmus/datacamp-playground
[ "25457e813b1145e1d335562286715eeddd1c1a7b" ]
[ "introduction-to-data-visualization-in-python/4. Analyzing time series and images/script_1.py" ]
[ "# Multiple time series on common axes\r\n\r\n# Import matplotlib.pyplot as plt\r\nimport matplotlib.pyplot as plt\r\n\r\n# Plot the aapl time series in blue\r\nplt.plot(aapl, color='blue', label='AAPL')\r\n\r\n# Plot the ibm time series in green\r\nplt.plot(ibm, color='green', label='IBM')\r\n\r\n# Plot the csco time series in red\r\nplt.plot(csco, color='red', label='CSCO')\r\n\r\n# Plot the msft time series in magenta\r\nplt.plot(msft, color='magenta', label='MSFT')\r\n\r\n# Add a legend in the top left corner of the plot\r\nplt.legend(loc='upper left')\r\n\r\n# Specify the orientation of the xticks\r\nplt.xticks(rotation=60)\r\n\r\n# Display the plot\r\nplt.show()\r\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "matplotlib.pyplot.xticks" ] ]
dimon58/miptlabs
[ "538f6c410210a6e3405ca5b61dc7bc41d251cdf8" ]
[ "src/miptlabs/interpolators/interpolators.py" ]
[ "from numpy import linspace\nfrom scipy.interpolate import interp1d\n\n\nclass Interpolator:\n \"\"\"\n Базовый класс интерполятора\n \"\"\"\n\n def __init__(self, points=100):\n self.points = points\n\n def interpolate(self, x, y):\n pass\n\n\nclass Quadratic(Interpolator):\n \"\"\"\n Квдратичный интерполятор\n \"\"\"\n\n def gen_x_axis(self, start, end):\n \"\"\"\n Генерирует набор точек по оси абсцисс\n :param start:\n :param end:\n :return:\n \"\"\"\n return linspace(start, end, self.points)\n\n def interpolate(self, x, y):\n \"\"\"\n Производит квадратическую интерполяцию, подробнее в `документации numpy`_\n\n .. _`документации numpy`: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html\n\n :param x: координаты по оси x\n :param y: координаты по оси y\n\n :return: набор координат по оси x и набор координат по оси y после интерполяции\n \"\"\"\n points = interp1d(x, y, kind='quadratic')\n x = linspace(min(x), max(x), self.points)\n y = points(x)\n\n return x, y\n" ]
[ [ "numpy.linspace", "scipy.interpolate.interp1d" ] ]
NeuroML/Documentation
[ "06e355a8268c848b872b4e4c44d990b77b1fcb37" ]
[ "source/Userdocs/NML2_examples/izhikevich-single-neuron.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nSimulating a regular spiking Izhikevich neuron with NeuroML.\n\nFile: izhikevich-single-neuron.py\n\"\"\"\n\nfrom neuroml import NeuroMLDocument\nfrom neuroml import Izhikevich2007Cell\nfrom neuroml import Population\nfrom neuroml import Network\nfrom neuroml import PulseGenerator\nfrom neuroml import ExplicitInput\nimport neuroml.writers as writers\nfrom neuroml.utils import validate_neuroml2\nfrom pyneuroml import pynml\nfrom pyneuroml.lems import LEMSSimulation\nimport numpy as np\n\n\n# Create a new NeuroML model document\nnml_doc = NeuroMLDocument(id=\"IzhSingleNeuron\")\n\n# Define the Izhikevich cell and add it to the model in the document\nizh0 = Izhikevich2007Cell(\n id=\"izh2007RS0\", v0=\"-60mV\", C=\"100pF\", k=\"0.7nS_per_mV\", vr=\"-60mV\",\n vt=\"-40mV\", vpeak=\"35mV\", a=\"0.03per_ms\", b=\"-2nS\", c=\"-50.0mV\", d=\"100pA\")\nnml_doc.izhikevich2007_cells.append(izh0)\n\n# Create a network and add it to the model\nnet = Network(id=\"IzhNet\")\nnml_doc.networks.append(net)\n\n# Create a population of defined cells and add it to the model\nsize0 = 1\npop0 = Population(id=\"IzhPop0\", component=izh0.id, size=size0)\nnet.populations.append(pop0)\n\n# Define an external stimulus and add it to the model\npg = PulseGenerator(\n id=\"pulseGen_%i\" % 0, delay=\"0ms\", duration=\"1000ms\",\n amplitude=\"0.07 nA\"\n)\nnml_doc.pulse_generators.append(pg)\nexp_input = ExplicitInput(target=\"%s[%i]\" % (pop0.id, 0), input=pg.id)\nnet.explicit_inputs.append(exp_input)\n\n# Write the NeuroML model to a file\nnml_file = 'izhikevich2007_single_cell_network.nml'\nwriters.NeuroMLWriter.write(nml_doc, nml_file)\nprint(\"Written network file to: \" + nml_file)\n\n# Validate the NeuroML model against the NeuroML schema\nvalidate_neuroml2(nml_file)\n\n################################################################################\n## The NeuroML file has now been created and validated. The rest of the code\n## involves writing a LEMS simulation file to run the model\n\n# Create a simulation instance of the model\nsimulation_id = \"example-single-izhikevich2007cell-sim\"\nsimulation = LEMSSimulation(sim_id=simulation_id,\n duration=1000, dt=0.1, simulation_seed=123)\nsimulation.assign_simulation_target(net.id)\nsimulation.include_neuroml2_file(nml_file)\n\n# Define the output file to store simulation outputs\n# we record the neuron's membrane potential\nsimulation.create_output_file(\n \"output0\", \"%s.v.dat\" % simulation_id\n)\nsimulation.add_column_to_output_file(\"output0\", 'IzhPop0[0]', 'IzhPop0[0]/v')\n\n# Save the simulation to a file\nlems_simulation_file = simulation.save_to_file()\n\n# Run the simulation using the jNeuroML simulator\npynml.run_lems_with_jneuroml(\n lems_simulation_file, max_memory=\"2G\", nogui=True, plot=False\n)\n\n# Load the data from the file and plot the graph for the membrane potential\n# using the pynml generate_plot utility function.\ndata_array = np.loadtxt(\"%s.v.dat\" % simulation_id)\npynml.generate_plot(\n [data_array[:, 0]], [data_array[:, 1]],\n \"Membrane potential\", show_plot_already=False,\n save_figure_to=\"%s-v.png\" % simulation_id,\n xaxis=\"time (s)\", yaxis=\"membrane potential (V)\"\n)\n" ]
[ [ "numpy.loadtxt" ] ]
WesleyBatista/fklearn
[ "7a606d246545de5ab68b2d9f38d0fdbeec6ca630" ]
[ "src/fklearn/metrics/pd_extractors.py" ]
[ "import collections\nfrom datetime import datetime\nfrom itertools import chain, repeat\n\nimport pandas as pd\nfrom toolz import curry\nfrom numpy import nan\n\n\n@curry\ndef evaluator_extractor(result, evaluator_name):\n metric_value = result[evaluator_name] if result else nan\n return pd.DataFrame({evaluator_name: [metric_value]})\n\n\n@curry\ndef combined_evaluator_extractor(result, base_extractors):\n return pd.concat([x(result) for x in base_extractors], axis=1)\n\n\n@curry\ndef split_evaluator_extractor_iteration(split_value, result, split_col, base_extractor):\n key = 'split_evaluator__' + split_col + '_' + str(split_value)\n return (base_extractor(result.get(key, {}))\n .assign(**{'split_evaluator__' + split_col: split_value}))\n\n\n@curry\ndef split_evaluator_extractor(result, split_col, split_values, base_extractor):\n return pd.concat(\n list(map(split_evaluator_extractor_iteration(result=result, split_col=split_col, base_extractor=base_extractor),\n split_values)))\n\n\n@curry\ndef temporal_split_evaluator_extractor(result, time_col, base_extractor, time_format=\"%Y-%m\", eval_name=None):\n if eval_name is None:\n eval_name = 'split_evaluator__' + time_col\n\n split_keys = [key for key in result.keys() if eval_name in key]\n split_values = []\n for key in split_keys:\n date = key.split(eval_name)[1][1:]\n try:\n # just check time format\n datetime.strptime(date, time_format)\n split_values.append(date)\n except ValueError:\n # this might happen if result has temporal splitters using different data formats\n pass\n\n return split_evaluator_extractor(result, time_col, split_values, base_extractor)\n\n\n@curry\ndef learning_curve_evaluator_extractor(result, base_extractor):\n return base_extractor(result).assign(lc_period_end=result['lc_period_end'])\n\n\n@curry\ndef reverse_learning_curve_evaluator_extractor(result, base_extractor):\n return base_extractor(result).assign(reverse_lc_period_start=result['reverse_lc_period_start'])\n\n\n@curry\ndef stability_curve_evaluator_extractor(result, base_extractor):\n return base_extractor(result).assign(sc_period=result['sc_period'])\n\n\n@curry\ndef repeat_split_log(split_log, results_len):\n if isinstance(split_log, collections.Iterable):\n n_repeat = results_len // len(split_log)\n # The logic below makes [1, 2, 3] into [1, 1, 1, 2, 2, 2, 3, 3, 3] for n_repeat=3\n return list(chain.from_iterable(zip(*repeat(split_log, n_repeat))))\n else:\n return split_log\n\n\n@curry\ndef extract_base_iteration(result, extractor):\n extracted_results = pd.concat(list(map(extractor, result['eval_results'])))\n repeat_fn = repeat_split_log(results_len=len(extracted_results))\n\n keys = result['split_log'].keys()\n assignments = {k: repeat_fn(result['split_log'][k]) for k in keys}\n\n return (extracted_results\n .assign(fold_num=result['fold_num'])\n .assign(**assignments))\n\n\n@curry\ndef extract(validator_results, extractor):\n return pd.concat(list(map(extract_base_iteration(extractor=extractor), validator_results)))\n\n\n@curry\ndef extract_lc(validator_results, extractor):\n return extract(validator_results, learning_curve_evaluator_extractor(base_extractor=extractor))\n\n\n@curry\ndef extract_reverse_lc(validator_results, extractor):\n return extract(validator_results, reverse_learning_curve_evaluator_extractor(base_extractor=extractor))\n\n\n@curry\ndef extract_sc(validator_results, extractor):\n return extract(validator_results, stability_curve_evaluator_extractor(base_extractor=extractor))\n\n\n@curry\ndef extract_param_tuning_iteration(iteration, tuning_log, base_extractor, model_learner_name):\n iter_df = base_extractor(tuning_log[iteration][\"validator_log\"])\n return iter_df.assign(**tuning_log[iteration][\"train_log\"][model_learner_name][\"parameters\"])\n\n\n@curry\ndef extract_tuning(tuning_log, base_extractor, model_learner_name):\n iter_fn = extract_param_tuning_iteration(tuning_log=tuning_log, base_extractor=base_extractor,\n model_learner_name=model_learner_name)\n return pd.concat(list(map(iter_fn, range(len(tuning_log)))))\n\n\n@curry\ndef permutation_extractor(results, base_extractor):\n df = pd.concat(base_extractor(r) for r in results['permutation_importance'].values())\n df.index = results['permutation_importance'].keys()\n if 'permutation_importance_baseline' in results: # With baseline comparison\n baseline = base_extractor(results['permutation_importance_baseline'])\n baseline.index = [\"baseline\"]\n df = pd.concat((df, baseline))\n for c in baseline.columns:\n df[c + '_delta_from_baseline'] = baseline[c].iloc[0] - df[c]\n return df\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
JustinACoder/H22-GR3-UnrealAI
[ "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "1fa4cd6a566c8745f455fc3d2273208f21f88ced", "1fa4cd6a566c8745f455fc3d2273208f21f88ced" ]
[ "Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/timeseries/python/timeseries/input_pipeline.py", "Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/ops/random_ops.py", "Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/training/checkpoint_management.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Defines ways of splicing and re-arranging time series.\r\n\r\nThis file provides methods for reading, parsing, and re-arranging a time\r\nseries. The main departure from standard TensorFlow input pipelines is a focus\r\non \"chunking\" a time series, i.e. slicing it into small contiguous windows which\r\nare then batched together for training, a form of truncated\r\nbackpropagation. This typically provides a significant speedup compared to\r\nlooping over the whole series sequentially, by exploiting data parallelism and\r\nby reducing redundant contributions to gradients (due to redundant information\r\nin the series itself).\r\n\r\nA series, consisting of times (an increasing vector of integers) and values (one\r\nor more floating point values for each time) along with any exogenous features,\r\nis stored either in memory or on disk in various formats (e.g. \"one record per\r\ntimestep\" on disk, or as a dictionary of Numpy arrays in memory). The location\r\nand format is specified by configuring a `TimeSeriesReader` object\r\n(e.g. `NumpyReader`, `CSVReader`), which reads the data into the TensorFlow\r\ngraph. A `TimeSeriesInputFn` object (typically `RandomWindowInputFn`) then\r\nperforms windowing and batching.\r\n\r\nTime series are passed through this pipeline as dictionaries mapping feature\r\nnames to their values. For training and evaluation, these require at minimum\r\n`TrainEvalFeatures.TIMES` (scalar integers, one per timestep) and\r\n`TrainEvalFeatures.VALUES` (may be either univariate or multivariate). Exogenous\r\nfeatures may have any shape, but are likewise associated with a timestep. Times\r\nthemselves need not be contiguous or regular (although smaller/fewer gaps are\r\ngenerally better), but each timestep must have all `VALUES` and any exogenous\r\nfeatures (i.e. times may be missing, but given that a time is specified, every\r\nother feature must also be specified for that step; some models may support\r\nmaking exogenous updates conditional).\r\n\r\nThe expected use case of a `TimeSeriesInputFn` is that it is first configured\r\n(for example setting a batch or window size) and passed a reader (a\r\n`TimeSeriesReader` object). The `TimeSeriesInputFn` can then be passed as the\r\ninput_fn of an Estimator.\r\n\r\nFor example, `RandomWindowInputFn` is useful for creating batches of random\r\nchunks of a series for training:\r\n\r\n```\r\n # Read data in the default \"time,value\" CSV format with no header\r\n reader = input_pipeline.CSVReader(csv_file_name)\r\n # Set up windowing and batching for training\r\n train_input_fn = input_pipeline.RandomWindowInputFn(\r\n reader, batch_size=16, window_size=16)\r\n # Fit model parameters to data\r\n estimator.train(input_fn=train_input_fn, steps=150)\r\n```\r\n\r\n`RandomWindowInputFn` is the primary tool for training and quantitative\r\nevaluation of time series. `WholeDatasetInputFn`, which reads a whole series\r\ninto memory, is useful for qualitative evaluation and preparing to make\r\npredictions with `predict_continuation_input_fn`.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport abc\r\n\r\nimport numpy\r\n\r\nfrom tensorflow.contrib.timeseries.python.timeseries import feature_keys\r\nfrom tensorflow.contrib.timeseries.python.timeseries import model_utils\r\n\r\nfrom tensorflow.python.estimator import estimator_lib\r\nfrom tensorflow.python.framework import constant_op\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import io_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import nn\r\nfrom tensorflow.python.ops import parsing_ops\r\nfrom tensorflow.python.ops import random_ops\r\nfrom tensorflow.python.ops import state_ops\r\nfrom tensorflow.python.ops import tensor_array_ops\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.training import input as input_lib\r\nfrom tensorflow.python.training import training\r\nfrom tensorflow.python.util import nest\r\n\r\n\r\ndef predict_continuation_input_fn(\r\n evaluation, steps=None, times=None, exogenous_features=None):\r\n \"\"\"An Estimator input_fn for running predict() after evaluate().\r\n\r\n If the call to evaluate() we are making predictions based on had a batch_size\r\n greater than one, predictions will start after each of these windows\r\n (i.e. will have the same batch dimension).\r\n\r\n Args:\r\n evaluation: The dictionary returned by `Estimator.evaluate`, with keys\r\n FilteringResults.STATE_TUPLE and FilteringResults.TIMES.\r\n steps: The number of steps to predict (scalar), starting after the\r\n evaluation. If `times` is specified, `steps` must not be; one is required.\r\n times: A [batch_size x window_size] array of integers (not a Tensor)\r\n indicating times to make predictions for. These times must be after the\r\n corresponding evaluation. If `steps` is specified, `times` must not be;\r\n one is required. If the batch dimension is omitted, it is assumed to be 1.\r\n exogenous_features: Optional dictionary. If specified, indicates exogenous\r\n features for the model to use while making the predictions. Values must\r\n have shape [batch_size x window_size x ...], where `batch_size` matches\r\n the batch dimension used when creating `evaluation`, and `window_size` is\r\n either the `steps` argument or the `window_size` of the `times` argument\r\n (depending on which was specified).\r\n Returns:\r\n An `input_fn` suitable for passing to the `predict` function of a time\r\n series `Estimator`.\r\n Raises:\r\n ValueError: If `times` or `steps` are misspecified.\r\n \"\"\"\r\n if exogenous_features is None:\r\n exogenous_features = {}\r\n predict_times = model_utils.canonicalize_times_or_steps_from_output(\r\n times=times, steps=steps, previous_model_output=evaluation)\r\n features = {\r\n feature_keys.PredictionFeatures.STATE_TUPLE:\r\n evaluation[feature_keys.FilteringResults.STATE_TUPLE],\r\n feature_keys.PredictionFeatures.TIMES:\r\n predict_times\r\n }\r\n features.update(exogenous_features)\r\n def _predict_input_fn():\r\n \"\"\"An input_fn for predict().\"\"\"\r\n # Prevents infinite iteration with a constant output in an Estimator's\r\n # predict().\r\n limited_features = {}\r\n for key, values in features.items():\r\n limited_values = nest.map_structure(\r\n lambda value: training.limit_epochs(value, num_epochs=1), values)\r\n limited_features[key] = limited_values\r\n return (limited_features, None)\r\n return _predict_input_fn\r\n\r\n\r\nclass TimeSeriesReader(object):\r\n \"\"\"Reads from and parses a data source for a `TimeSeriesInputFn`.\r\n\r\n This class provides methods that read a few records (`read`) or the full data\r\n set at once (`read_full`), and returns them as dictionaries mapping feature\r\n names to feature Tensors. Please see note at the top of the file for the\r\n structure of these dictionaries. The output is generally chunked by a\r\n `TimeSeriesInputFn` before being passed to the model.\r\n \"\"\"\r\n\r\n def check_dataset_size(self, minimum_dataset_size):\r\n \"\"\"When possible, raises an error if the dataset is too small.\r\n\r\n This method allows TimeSeriesReaders to raise informative error messages if\r\n the user has selected a window size in their TimeSeriesInputFn which is\r\n larger than the dataset size. However, many TimeSeriesReaders will not have\r\n access to a dataset size, in which case they do not need to override this\r\n method.\r\n\r\n Args:\r\n minimum_dataset_size: The minimum number of records which should be\r\n contained in the dataset. Readers should attempt to raise an error when\r\n possible if an epoch of data contains fewer records.\r\n \"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def read(self):\r\n \"\"\"Parses one or more records into a feature dictionary.\r\n\r\n This method is expected to be called by a `TimeSeriesInputFn` object, and is\r\n not for use with models directly.\r\n\r\n A `TimeSeriesReader` object reads multiple records at a single time for\r\n efficiency; the size of these batches is an implementation detail internal\r\n to the input pipeline. These records should generally be sequential,\r\n although some out-of-order records due to file wraparounds are expected and\r\n must be handled by callers.\r\n\r\n Returns:\r\n A dictionary mapping feature names to `Tensor` values, each with an\r\n arbitrary batch dimension (for efficiency) as their first dimension.\r\n \"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def read_full(self):\r\n \"\"\"Return the full dataset.\r\n\r\n Largely for interactive use/plotting (or evaluation on small\r\n datasets). Generally not very efficient. Not recommended for training.\r\n\r\n Returns:\r\n Same return type as `read`, but with the full dataset rather than an\r\n arbitrary chunk of it. A dictionary mapping feature names to `Tensor`\r\n values, where the size of the first dimension of each `Tensor` is the\r\n number of samples in the entire dataset. These `Tensor`s should be\r\n constant across graph invocations, assuming that the underlying data\r\n remains constant. Current implementations re-read data on each graph\r\n invocation, although this may change in the future.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass NumpyReader(TimeSeriesReader):\r\n \"\"\"A time series parser for feeding Numpy arrays to a `TimeSeriesInputFn`.\r\n\r\n Avoids embedding data in the graph as constants.\r\n \"\"\"\r\n\r\n def __init__(self, data, read_num_records_hint=4096):\r\n \"\"\"Numpy array input for a `TimeSeriesInputFn`.\r\n\r\n Args:\r\n data: A dictionary mapping feature names to Numpy arrays, with two\r\n possible shapes (requires keys `TrainEvalFeatures.TIMES` and\r\n `TrainEvalFeatures.VALUES`):\r\n Univariate; `TIMES` and `VALUES` are both vectors of shape [series\r\n length]\r\n Multivariate; `TIMES` is a vector of shape [series length], `VALUES`\r\n has shape [series length x number of features].\r\n In any case, `VALUES` and any exogenous features must have their shapes\r\n prefixed by the shape of the value corresponding to the `TIMES` key.\r\n read_num_records_hint: The maximum number of samples to read at one time,\r\n for efficiency.\r\n \"\"\"\r\n self._features = _canonicalize_numpy_data(\r\n data, require_single_batch=True)\r\n self._read_num_records_hint = read_num_records_hint\r\n\r\n def check_dataset_size(self, minimum_dataset_size):\r\n \"\"\"Raise an error if the dataset is too small.\"\"\"\r\n dataset_size = self._features[feature_keys.TrainEvalFeatures.TIMES].shape[1]\r\n if dataset_size < minimum_dataset_size:\r\n raise ValueError(\r\n (\"A TimeSeriesInputFn is configured to create windows of size {}, \"\r\n \"but only {} records were available in the dataset. Either decrease \"\r\n \"the window size or provide more records.\").format(\r\n minimum_dataset_size, dataset_size))\r\n\r\n def read(self):\r\n \"\"\"Returns a large chunk of the Numpy arrays for later re-chunking.\"\"\"\r\n # Remove the batch dimension from all features\r\n features = {key: numpy.squeeze(value, axis=0)\r\n for key, value in self._features.items()}\r\n return estimator_lib.inputs.numpy_input_fn(\r\n x=features,\r\n # The first dimensions of features are the series length, since we have\r\n # removed the batch dimension above. We now pull out\r\n # self._read_num_records_hint steps of this single time series to pass\r\n # to the TimeSeriesInputFn.\r\n batch_size=self._read_num_records_hint,\r\n num_epochs=None,\r\n shuffle=False)()\r\n\r\n def read_full(self):\r\n \"\"\"Returns `Tensor` versions of the full Numpy arrays.\"\"\"\r\n features = estimator_lib.inputs.numpy_input_fn(\r\n x=self._features,\r\n batch_size=1,\r\n num_epochs=None,\r\n queue_capacity=2, # Each queue element is a full copy of the dataset\r\n shuffle=False)()\r\n # TimeSeriesInputFn expect just a batch dimension\r\n return {feature_name: array_ops.squeeze(feature_value, axis=0)\r\n for feature_name, feature_value in features.items()}\r\n\r\n\r\nclass ReaderBaseTimeSeriesParser(TimeSeriesReader):\r\n \"\"\"Base for time series readers which wrap a `tf.ReaderBase`.\"\"\"\r\n\r\n def __init__(self, filenames, read_num_records_hint=4096):\r\n \"\"\"Configure the time series reader.\r\n\r\n Args:\r\n filenames: A string or list of strings indicating files to read records\r\n from.\r\n read_num_records_hint: When not reading a full dataset, indicates the\r\n number of records to transfer in a single chunk (for efficiency). The\r\n actual number transferred at one time may vary.\r\n \"\"\"\r\n self._filenames = filenames\r\n self._read_num_records_hint = read_num_records_hint\r\n\r\n @abc.abstractmethod\r\n def _get_reader(self):\r\n \"\"\"Get an instance of the tf.ReaderBase associated with this class.\"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def _process_records(self, lines):\r\n \"\"\"Given string items, return a processed dictionary of Tensors.\r\n\r\n Args:\r\n lines: A 1-dimensional string Tensor, each representing a record to parse\r\n (source dependent, e.g. a line of a file, or a serialized protocol\r\n buffer).\r\n\r\n Returns:\r\n A dictionary mapping feature names to their values. The batch dimensions\r\n should match the length of `lines`.\r\n \"\"\"\r\n pass\r\n\r\n def _get_filename_queue(self, epoch_limit):\r\n \"\"\"Constructs a filename queue with an epoch limit.\r\n\r\n `epoch_limit` is intended as an error checking fallback to prevent a reader\r\n from infinitely looping in its requests for more work items if none are\r\n available in any file. It should be set high enough that it is never reached\r\n assuming at least one record exists in some file.\r\n\r\n Args:\r\n epoch_limit: The maximum number of times to read through the complete list\r\n of files before throwing an OutOfRangeError.\r\n Returns:\r\n A tuple of (filename_queue, epoch_limiter):\r\n filename_queue: A FIFOQueue with filename work items.\r\n epoch_limiter: The local variable used for epoch limitation. This should\r\n be set to zero before a reader is passed `filename_queue` in order to\r\n reset the epoch limiter's state.\r\n \"\"\"\r\n epoch_limiter = variable_scope.variable(\r\n initial_value=constant_op.constant(0, dtype=dtypes.int64),\r\n name=\"epoch_limiter\",\r\n trainable=False,\r\n collections=[ops.GraphKeys.LOCAL_VARIABLES])\r\n filenames_tensor = array_ops.reshape(\r\n ops.convert_to_tensor(self._filenames), [-1])\r\n # We can't rely on epoch_limiter being initialized, since queue runners are\r\n # started before local variables are initialized. Instead, we ignore epoch\r\n # limits before variable initialization. This means that prior to variable\r\n # initialization, a QueueRunner may cause a reader to enter an un-checked\r\n # infinite loop. However, as soon as local variables are initialized, we\r\n # will start incrementing and checking epoch_limiter, which will interrupt\r\n # any in-progress loops.\r\n conditional_count_up_to = control_flow_ops.cond(\r\n state_ops.is_variable_initialized(epoch_limiter),\r\n lambda: epoch_limiter.count_up_to(epoch_limit),\r\n lambda: constant_op.constant(0, dtype=dtypes.int64))\r\n with ops.control_dependencies([conditional_count_up_to]):\r\n filenames_tensor = array_ops.identity(filenames_tensor)\r\n filename_queue = input_lib.string_input_producer(\r\n filenames_tensor, shuffle=False, capacity=1)\r\n return filename_queue, epoch_limiter\r\n\r\n def read(self):\r\n \"\"\"Reads a chunk of data from the `tf.ReaderBase` for later re-chunking.\"\"\"\r\n # Assuming there is at least one item to be read among all of the files in\r\n # self._filenames, we will not need to go through more than\r\n # self._read_num_records_hint epochs to get a batch of\r\n # self._read_num_records_hint records. Setting this limit and resetting it\r\n # before each reader.read_up_to call prevents infinite looping when there\r\n # are no records available in any of the files.\r\n filename_queue, epoch_limiter = self._get_filename_queue(\r\n epoch_limit=self._read_num_records_hint)\r\n reader = self._get_reader()\r\n epoch_reset_op = state_ops.assign(epoch_limiter, 0)\r\n with ops.control_dependencies([epoch_reset_op]):\r\n _, records = reader.read_up_to(\r\n filename_queue, self._read_num_records_hint)\r\n return self._process_records(records)\r\n\r\n def read_full(self):\r\n \"\"\"Reads a full epoch of data into memory.\"\"\"\r\n reader = self._get_reader()\r\n # Set a hard limit of 2 epochs through self._filenames. If there are any\r\n # records available, we should only end up reading the first record in the\r\n # second epoch before exiting the while loop and subsequently resetting the\r\n # epoch limit. If there are no records available in any of the files, this\r\n # hard limit prevents the reader.read_up_to call from looping infinitely.\r\n filename_queue, epoch_limiter = self._get_filename_queue(epoch_limit=2)\r\n epoch_reset_op = state_ops.assign(epoch_limiter, 0)\r\n with ops.control_dependencies([epoch_reset_op]):\r\n first_key, first_value = reader.read_up_to(filename_queue, 1)\r\n # Read until we get a duplicate key (one epoch)\r\n def _while_condition(\r\n current_key, current_value, current_index, collected_records):\r\n del current_value, current_index, collected_records # unused\r\n return math_ops.not_equal(array_ops.squeeze(current_key, axis=0),\r\n array_ops.squeeze(first_key, axis=0))\r\n\r\n def _while_body(\r\n current_key, current_value, current_index, collected_records):\r\n del current_key # unused\r\n new_key, new_value = reader.read_up_to(filename_queue, 1)\r\n new_key.set_shape([1])\r\n new_value.set_shape([1])\r\n return (new_key,\r\n new_value,\r\n current_index + 1,\r\n collected_records.write(current_index, current_value))\r\n _, _, _, records_ta = control_flow_ops.while_loop(\r\n _while_condition,\r\n _while_body,\r\n [constant_op.constant([\"\"]), first_value,\r\n 0, # current_index starting value\r\n tensor_array_ops.TensorArray( # collected_records\r\n dtype=dtypes.string, size=0, dynamic_size=True)])\r\n records = records_ta.concat()\r\n # Reset the reader when we're done so that subsequent requests for data get\r\n # the dataset in the proper order.\r\n with ops.control_dependencies([records]):\r\n reader_reset_op = reader.reset()\r\n with ops.control_dependencies([reader_reset_op]):\r\n records = array_ops.identity(records)\r\n return self._process_records(records)\r\n\r\n\r\nclass CSVReader(ReaderBaseTimeSeriesParser):\r\n \"\"\"Reads from a collection of CSV-formatted files.\"\"\"\r\n\r\n def __init__(self,\r\n filenames,\r\n column_names=(feature_keys.TrainEvalFeatures.TIMES,\r\n feature_keys.TrainEvalFeatures.VALUES),\r\n column_dtypes=None,\r\n skip_header_lines=None,\r\n read_num_records_hint=4096):\r\n \"\"\"CSV-parsing reader for a `TimeSeriesInputFn`.\r\n\r\n Args:\r\n filenames: A filename or list of filenames to read the time series\r\n from. Each line must have columns corresponding to `column_names`.\r\n column_names: A list indicating names for each\r\n feature. `TrainEvalFeatures.TIMES` and `TrainEvalFeatures.VALUES` are\r\n required; `VALUES` may be repeated to indicate a multivariate series.\r\n column_dtypes: If provided, must be a list with the same length as\r\n `column_names`, indicating dtypes for each column. Defaults to\r\n `tf.int64` for `TrainEvalFeatures.TIMES` and `tf.float32` for\r\n everything else.\r\n skip_header_lines: Passed on to `tf.TextLineReader`; skips this number of\r\n lines at the beginning of each file.\r\n read_num_records_hint: When not reading a full dataset, indicates the\r\n number of records to parse/transfer in a single chunk (for\r\n efficiency). The actual number transferred at one time may be more or\r\n less.\r\n Raises:\r\n ValueError: If required column names are not specified, or if lengths do\r\n not match.\r\n \"\"\"\r\n if feature_keys.TrainEvalFeatures.TIMES not in column_names:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.TIMES))\r\n if feature_keys.TrainEvalFeatures.VALUES not in column_names:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.VALUES))\r\n if column_dtypes is not None and len(column_dtypes) != len(column_names):\r\n raise ValueError(\r\n (\"If specified, the length of column_dtypes must match the length of \"\r\n \"column_names (got column_dtypes={} and column_names={}).\").format(\r\n column_dtypes, column_names))\r\n if sum(1 for column_name in column_names\r\n if column_name == feature_keys.TrainEvalFeatures.TIMES) != 1:\r\n raise ValueError(\r\n \"Got more than one times column ('{}'), but exactly \"\r\n \"one is required.\".format(feature_keys.TrainEvalFeatures.TIMES))\r\n self._column_names = column_names\r\n self._column_dtypes = column_dtypes\r\n self._skip_header_lines = skip_header_lines\r\n super(CSVReader, self).__init__(\r\n filenames=filenames, read_num_records_hint=read_num_records_hint)\r\n\r\n def _get_reader(self):\r\n return io_ops.TextLineReader(skip_header_lines=self._skip_header_lines)\r\n\r\n def _process_records(self, lines):\r\n \"\"\"Parse `lines` as CSV records.\"\"\"\r\n if self._column_dtypes is None:\r\n default_values = [(array_ops.zeros([], dtypes.int64),)\r\n if column_name == feature_keys.TrainEvalFeatures.TIMES\r\n else () for column_name in self._column_names]\r\n else:\r\n default_values = [(array_ops.zeros([], dtype),)\r\n for dtype in self._column_dtypes]\r\n columns = parsing_ops.decode_csv(lines, default_values)\r\n features_lists = {}\r\n for column_name, value in zip(self._column_names, columns):\r\n features_lists.setdefault(column_name, []).append(value)\r\n features = {}\r\n for column_name, values in features_lists.items():\r\n if column_name == feature_keys.TrainEvalFeatures.TIMES:\r\n features[column_name] = values[0]\r\n else:\r\n features[column_name] = array_ops.stack(values, axis=1)\r\n return features\r\n\r\n\r\nclass TFExampleReader(ReaderBaseTimeSeriesParser):\r\n \"\"\"Reads and parses `tf.Example`s from a TFRecords file.\"\"\"\r\n\r\n def __init__(self,\r\n filenames,\r\n features):\r\n \"\"\"Configure `tf.Example` parsing.\r\n\r\n Args:\r\n filenames: A filename or list of filenames to read the time series\r\n from. Each line must have columns corresponding to `column_names`.\r\n features: A dictionary mapping from feature keys to `tf.FixedLenFeature`\r\n objects. Must include `TrainEvalFeatures.TIMES` (scalar integer) and\r\n `TrainEvalFeatures.VALUES` (floating point vector) features.\r\n Raises:\r\n ValueError: If required times/values features are not present.\r\n \"\"\"\r\n if feature_keys.TrainEvalFeatures.TIMES not in features:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.TIMES))\r\n if feature_keys.TrainEvalFeatures.VALUES not in features:\r\n raise ValueError(\"'{}' is a required column.\".format(\r\n feature_keys.TrainEvalFeatures.VALUES))\r\n self._features = features\r\n super(TFExampleReader, self).__init__(filenames=filenames)\r\n\r\n def _get_reader(self):\r\n return io_ops.TFRecordReader()\r\n\r\n def _process_records(self, examples):\r\n \"\"\"Parse `tf.Example`s into `Tensors`.\"\"\"\r\n return parsing_ops.parse_example(\r\n serialized=examples, features=self._features)\r\n\r\n\r\nclass TimeSeriesInputFn(object):\r\n \"\"\"Base for classes which create batches of windows from a time series.\"\"\"\r\n\r\n @abc.abstractmethod\r\n def create_batch(self):\r\n \"\"\"Creates chunked Tensors from times, values, and other features.\r\n\r\n Suitable for use as the input_fn argument of a tf.estimator.Estimator's\r\n fit() or evaluate() method.\r\n\r\n Returns:\r\n A tuple of (features, targets):\r\n features: A dictionary with `TrainEvalFeatures.TIMES` and\r\n `TrainEvalFeatures.VALUES` as keys, `TIMES` having an associated value\r\n with shape [batch size x window length], `VALUES` with shape [batch\r\n size x window length x number of features]. Any other features will\r\n also have shapes prefixed with [batch size x window length].\r\n targets: Not used, but must have a value for compatibility with the\r\n Estimator API. That value should be None.\r\n \"\"\"\r\n pass\r\n\r\n def __call__(self):\r\n # Allow a TimeSeriesInputFn to be used as an input function directly\r\n return self.create_batch()\r\n\r\n\r\nclass WholeDatasetInputFn(TimeSeriesInputFn):\r\n \"\"\"Supports passing a full time series to a model for evaluation/inference.\r\n\r\n Note that this `TimeSeriesInputFn` is not designed for high throughput, and\r\n should not be used for training. It allows for sequential evaluation on a full\r\n dataset (with sequential in-sample predictions), which then feeds naturally\r\n into `predict_continuation_input_fn` for making out-of-sample\r\n predictions. While this is useful for plotting and interactive use,\r\n `RandomWindowInputFn` is better suited to training and quantitative\r\n evaluation.\r\n \"\"\"\r\n # TODO(allenl): A SequentialWindowInputFn for getting model end state without\r\n # loading the whole dataset into memory (or for quantitative evaluation of\r\n # sequential models). Note that an Estimator using such a TimeSeriesInputFn\r\n # won't return in-sample predictions for the whole dataset, which means it\r\n # won't be terribly useful for interactive use/plotting (unless the user\r\n # passes in concat metrics). Also need to be careful about state saving for\r\n # sequential models, particularly the gaps between chunks.\r\n\r\n def __init__(self, time_series_reader):\r\n \"\"\"Initialize the `TimeSeriesInputFn`.\r\n\r\n Args:\r\n time_series_reader: A TimeSeriesReader object.\r\n \"\"\"\r\n self._reader = time_series_reader\r\n super(WholeDatasetInputFn, self).__init__()\r\n\r\n def create_batch(self):\r\n \"\"\"A suitable `input_fn` for an `Estimator`'s `evaluate()`.\r\n\r\n Returns:\r\n A dictionary mapping feature names to `Tensors`, each shape\r\n prefixed by [1, data set size] (i.e. a batch size of 1).\r\n \"\"\"\r\n features = self._reader.read_full()\r\n # Add a batch dimension of one to each feature.\r\n return ({feature_name: feature_value[None, ...]\r\n for feature_name, feature_value in features.items()},\r\n None)\r\n\r\n\r\nclass RandomWindowInputFn(TimeSeriesInputFn):\r\n \"\"\"Wraps a `TimeSeriesReader` to create random batches of windows.\r\n\r\n Tensors are first collected into sequential windows (in a windowing queue\r\n created by `tf.train.batch`, based on the order returned from\r\n `time_series_reader`), then these windows are randomly batched (in a\r\n `RandomShuffleQueue`), the Tensors returned by `create_batch` having shapes\r\n prefixed by [`batch_size`, `window_size`].\r\n\r\n This `TimeSeriesInputFn` is useful for both training and quantitative\r\n evaluation (but be sure to run several epochs for sequential models such as\r\n `StructuralEnsembleRegressor` to completely flush stale state left over from\r\n training). For qualitative evaluation or when preparing for predictions, use\r\n `WholeDatasetInputFn`.\r\n \"\"\"\r\n\r\n def __init__(\r\n self, time_series_reader, window_size, batch_size,\r\n queue_capacity_multiplier=1000, shuffle_min_after_dequeue_multiplier=2,\r\n discard_out_of_order=True, discard_consecutive_batches_limit=1000,\r\n jitter=True, num_threads=2, shuffle_seed=None):\r\n \"\"\"Configure the RandomWindowInputFn.\r\n\r\n Args:\r\n time_series_reader: A TimeSeriesReader object.\r\n window_size: The number of examples to keep together sequentially. This\r\n controls the length of truncated backpropagation: smaller values mean\r\n less sequential computation, which can lead to faster training, but\r\n create a coarser approximation to the gradient (which would ideally be\r\n computed by a forward pass over the entire sequence in order).\r\n batch_size: The number of windows to place together in a batch. Larger\r\n values will lead to more stable gradients during training.\r\n queue_capacity_multiplier: The capacity for the queues used to create\r\n batches, specified as a multiple of `batch_size` (for\r\n RandomShuffleQueue) and `batch_size * window_size` (for the\r\n FIFOQueue). Controls the maximum number of windows stored. Should be\r\n greater than `shuffle_min_after_dequeue_multiplier`.\r\n shuffle_min_after_dequeue_multiplier: The minimum number of windows in the\r\n RandomShuffleQueue after a dequeue, which controls the amount of entropy\r\n introduced during batching. Specified as a multiple of `batch_size`.\r\n discard_out_of_order: If True, windows of data which have times which\r\n decrease (a higher time followed by a lower time) are discarded. If\r\n False, the window and associated features are instead sorted so that\r\n times are non-decreasing. Discarding is typically faster, as models do\r\n not have to deal with artificial gaps in the data. However, discarding\r\n does create a bias where the beginnings and endings of files are\r\n under-sampled.\r\n discard_consecutive_batches_limit: Raise an OutOfRangeError if more than\r\n this number of batches are discarded without a single non-discarded\r\n window (prevents infinite looping when the dataset is too small).\r\n jitter: If True, randomly discards examples between some windows in order\r\n to avoid deterministic chunking patterns. This is important for models\r\n like AR which may otherwise overfit a fixed chunking.\r\n num_threads: Use this number of threads for queues. Setting a value of 1\r\n removes one source of non-determinism (and in combination with\r\n shuffle_seed should provide deterministic windowing).\r\n shuffle_seed: A seed for window shuffling. The default value of None\r\n provides random behavior. With `shuffle_seed` set and\r\n `num_threads=1`, provides deterministic behavior.\r\n \"\"\"\r\n self._reader = time_series_reader\r\n self._window_size = window_size\r\n self._reader.check_dataset_size(minimum_dataset_size=self._window_size)\r\n self._batch_size = batch_size\r\n self._queue_capacity_multiplier = queue_capacity_multiplier\r\n self._shuffle_min_after_dequeue_multiplier = (\r\n shuffle_min_after_dequeue_multiplier)\r\n self._discard_out_of_order = discard_out_of_order\r\n self._discard_limit = discard_consecutive_batches_limit\r\n self._jitter = jitter\r\n if num_threads is None:\r\n self._num_threads = self._batch_size\r\n else:\r\n self._num_threads = num_threads\r\n self._shuffle_seed = shuffle_seed\r\n super(RandomWindowInputFn, self).__init__()\r\n\r\n def create_batch(self):\r\n \"\"\"Create queues to window and batch time series data.\r\n\r\n Returns:\r\n A dictionary of Tensors corresponding to the output of `self._reader`\r\n (from the `time_series_reader` constructor argument), each with shapes\r\n prefixed by [`batch_size`, `window_size`].\r\n \"\"\"\r\n features = self._reader.read()\r\n if self._jitter:\r\n # TODO(agarwal, allenl): Figure out if more jitter is needed here.\r\n jitter = random_ops.random_uniform(shape=[], maxval=2, dtype=dtypes.int32)\r\n else:\r\n jitter = 0\r\n # To keep things efficient, we pass from the windowing batcher to the\r\n # batch-of-windows batcher in batches. This avoids the need for huge numbers\r\n # of threads, but does mean that jitter is only applied occasionally.\r\n # TODO(allenl): Experiment with different internal passing sizes.\r\n internal_passing_size = self._batch_size\r\n features_windowed = input_lib.batch(\r\n features,\r\n batch_size=self._window_size * internal_passing_size + jitter,\r\n enqueue_many=True,\r\n capacity=(self._queue_capacity_multiplier\r\n * internal_passing_size * self._window_size),\r\n num_threads=self._num_threads)\r\n raw_features_windowed = features_windowed\r\n if self._jitter:\r\n features_windowed = {\r\n key: value[jitter:]\r\n for key, value in features_windowed.items()}\r\n features_windowed = {\r\n key: array_ops.reshape(\r\n value,\r\n array_ops.concat(\r\n [[internal_passing_size, self._window_size],\r\n array_ops.shape(value)[1:]],\r\n axis=0))\r\n for key, value in features_windowed.items()}\r\n batch_and_window_shape = tensor_shape.TensorShape(\r\n [internal_passing_size, self._window_size])\r\n for key in features_windowed.keys():\r\n features_windowed[key].set_shape(\r\n batch_and_window_shape.concatenate(\r\n raw_features_windowed[key].get_shape()[1:]))\r\n # When switching files, we may end up with windows where the time is not\r\n # decreasing, even if times within each file are sorted (and even if those\r\n # files are visited in order, when looping back around to the beginning of\r\n # the first file). This is hard for models to deal with, so we either\r\n # discard such examples, creating a bias where the beginning and end of the\r\n # series is under-sampled, or we sort the window, creating large gaps.\r\n times = features_windowed[feature_keys.TrainEvalFeatures.TIMES]\r\n if self._discard_out_of_order:\r\n non_decreasing = math_ops.reduce_all(\r\n times[:, 1:] >= times[:, :-1], axis=1)\r\n # Ensure that no more than self._discard_limit complete batches are\r\n # discarded contiguously (resetting the count when we find a single clean\r\n # window). This prevents infinite looping when the dataset is smaller than\r\n # the window size.\r\n # TODO(allenl): Figure out a way to return informative errors from\r\n # count_up_to.\r\n discarded_windows_limiter = variable_scope.variable(\r\n initial_value=constant_op.constant(0, dtype=dtypes.int64),\r\n name=\"discarded_windows_limiter\",\r\n trainable=False,\r\n collections=[ops.GraphKeys.LOCAL_VARIABLES])\r\n def _initialized_limit_check():\r\n return control_flow_ops.cond(\r\n math_ops.reduce_any(non_decreasing),\r\n lambda: state_ops.assign(discarded_windows_limiter, 0),\r\n lambda: discarded_windows_limiter.count_up_to(self._discard_limit))\r\n discard_limit_op = control_flow_ops.cond(\r\n state_ops.is_variable_initialized(discarded_windows_limiter),\r\n _initialized_limit_check,\r\n lambda: constant_op.constant(0, dtype=dtypes.int64))\r\n with ops.control_dependencies([discard_limit_op]):\r\n non_decreasing = array_ops.identity(non_decreasing)\r\n else:\r\n _, indices_descending = nn.top_k(\r\n times, k=array_ops.shape(times)[-1], sorted=True)\r\n indices = array_ops.reverse(indices_descending, axis=[0])\r\n features_windowed = {\r\n key: array_ops.gather(params=value, indices=indices)\r\n for key, value in features_windowed.items()\r\n }\r\n non_decreasing = True\r\n features_batched = input_lib.maybe_shuffle_batch(\r\n features_windowed,\r\n num_threads=self._num_threads,\r\n seed=self._shuffle_seed,\r\n batch_size=self._batch_size,\r\n capacity=self._queue_capacity_multiplier * self._batch_size,\r\n min_after_dequeue=(self._shuffle_min_after_dequeue_multiplier *\r\n self._batch_size),\r\n keep_input=non_decreasing,\r\n enqueue_many=True)\r\n return (features_batched, None)\r\n\r\n\r\ndef _canonicalize_numpy_data(data, require_single_batch):\r\n \"\"\"Do basic checking and reshaping for Numpy data.\r\n\r\n Args:\r\n data: A dictionary mapping keys to Numpy arrays, with several possible\r\n shapes (requires keys `TrainEvalFeatures.TIMES` and\r\n `TrainEvalFeatures.VALUES`):\r\n Single example; `TIMES` is a scalar and `VALUES` is either a scalar or a\r\n vector of length [number of features].\r\n Sequence; `TIMES` is a vector of shape [series length], `VALUES` either\r\n has shape [series length] (univariate) or [series length x number of\r\n features] (multivariate).\r\n Batch of sequences; `TIMES` is a vector of shape [batch size x series\r\n length], `VALUES` has shape [batch size x series length] or [batch\r\n size x series length x number of features].\r\n In any case, `VALUES` and any exogenous features must have their shapes\r\n prefixed by the shape of the value corresponding to the `TIMES` key.\r\n require_single_batch: If True, raises an error if the provided data has a\r\n batch dimension > 1.\r\n Returns:\r\n A dictionary with features normalized to have shapes prefixed with [batch\r\n size x series length]. The sizes of dimensions which were omitted in the\r\n inputs are 1.\r\n Raises:\r\n ValueError: If dimensions are incorrect or do not match, or required\r\n features are missing.\r\n \"\"\"\r\n features = {key: numpy.array(value) for key, value in data.items()}\r\n if (feature_keys.TrainEvalFeatures.TIMES not in features or\r\n feature_keys.TrainEvalFeatures.VALUES not in features):\r\n raise ValueError(\"{} and {} are required features.\".format(\r\n feature_keys.TrainEvalFeatures.TIMES,\r\n feature_keys.TrainEvalFeatures.VALUES))\r\n times = features[feature_keys.TrainEvalFeatures.TIMES]\r\n for key, value in features.items():\r\n if value.shape[:len(times.shape)] != times.shape:\r\n raise ValueError(\r\n (\"All features must have their shapes prefixed by the shape of the\"\r\n \" times feature. Got shape {} for feature '{}', but shape {} for\"\r\n \" '{}'\").format(value.shape, key, times.shape,\r\n feature_keys.TrainEvalFeatures.TIMES))\r\n if not times.shape: # a single example\r\n if not features[feature_keys.TrainEvalFeatures.VALUES].shape: # univariate\r\n # Add a feature dimension (with one feature)\r\n features[feature_keys.TrainEvalFeatures.VALUES] = features[\r\n feature_keys.TrainEvalFeatures.VALUES][..., None]\r\n elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 1:\r\n raise ValueError(\r\n (\"Got an unexpected number of dimensions for the '{}' feature.\"\r\n \" Was expecting at most 1 dimension\"\r\n \" ([number of features]) since '{}' does not \"\r\n \"have a batch or time dimension, but got shape {}\").format(\r\n feature_keys.TrainEvalFeatures.VALUES,\r\n feature_keys.TrainEvalFeatures.TIMES,\r\n features[feature_keys.TrainEvalFeatures.VALUES].shape))\r\n # Add trivial batch and time dimensions for every feature\r\n features = {key: value[None, None, ...] for key, value in features.items()}\r\n if len(times.shape) == 1: # shape [series length]\r\n if len(features[feature_keys.TrainEvalFeatures.VALUES]\r\n .shape) == 1: # shape [series length]\r\n # Add a feature dimension (with one feature)\r\n features[feature_keys.TrainEvalFeatures.VALUES] = features[\r\n feature_keys.TrainEvalFeatures.VALUES][..., None]\r\n elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 2:\r\n raise ValueError(\r\n (\"Got an unexpected number of dimensions for the '{}' feature.\"\r\n \" Was expecting at most 2 dimensions\"\r\n \" ([series length, number of features]) since '{}' does not \"\r\n \"have a batch dimension, but got shape {}\").format(\r\n feature_keys.TrainEvalFeatures.VALUES,\r\n feature_keys.TrainEvalFeatures.TIMES,\r\n features[feature_keys.TrainEvalFeatures.VALUES].shape))\r\n # Add trivial batch dimensions for every feature\r\n features = {key: value[None, ...] for key, value in features.items()}\r\n elif len(features[feature_keys.TrainEvalFeatures.TIMES]\r\n .shape) != 2: # shape [batch size, series length]\r\n raise ValueError(\r\n (\"Got an unexpected number of dimensions for times. Was expecting at \"\r\n \"most two ([batch size, series length]), but got shape {}.\").format(\r\n times.shape))\r\n if require_single_batch:\r\n # We don't expect input to be already batched; batching is done later\r\n if features[feature_keys.TrainEvalFeatures.TIMES].shape[0] != 1:\r\n raise ValueError(\"Got batch input, was expecting unbatched input.\")\r\n return features\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Operations for generating random numbers.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.framework import random_seed\r\nfrom tensorflow.python.ops import array_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import gen_random_ops\r\nfrom tensorflow.python.ops import math_ops\r\n# go/tf-wildcard-import\r\n# pylint: disable=wildcard-import\r\nfrom tensorflow.python.ops.gen_random_ops import *\r\nfrom tensorflow.python.util import deprecation\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n# pylint: enable=wildcard-import\r\n\r\n\r\ndef _ShapeTensor(shape):\r\n \"\"\"Convert to an int32 or int64 tensor, defaulting to int32 if empty.\"\"\"\r\n if isinstance(shape, (tuple, list)) and not shape:\r\n dtype = dtypes.int32\r\n else:\r\n dtype = None\r\n return ops.convert_to_tensor(shape, dtype=dtype, name=\"shape\")\r\n\r\n\r\n@tf_export(\"random.normal\", \"random_normal\")\r\ndef random_normal(shape,\r\n mean=0.0,\r\n stddev=1.0,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a normal distribution.\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal\r\n distribution.\r\n stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation\r\n of the normal distribution.\r\n dtype: The type of the output.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random normal values.\r\n \"\"\"\r\n with ops.name_scope(name, \"random_normal\", [shape, mean, stddev]) as name:\r\n shape_tensor = _ShapeTensor(shape)\r\n mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name=\"mean\")\r\n stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name=\"stddev\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n rnd = gen_random_ops.random_standard_normal(\r\n shape_tensor, dtype, seed=seed1, seed2=seed2)\r\n mul = rnd * stddev_tensor\r\n value = math_ops.add(mul, mean_tensor, name=name)\r\n return value\r\n\r\n\r\nops.NotDifferentiable(\"RandomStandardNormal\")\r\n\r\n\r\ndef parameterized_truncated_normal(shape,\r\n means=0.0,\r\n stddevs=1.0,\r\n minvals=-2.0,\r\n maxvals=2.0,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a truncated normal distribution.\r\n\r\n The generated values follow a normal distribution with specified mean and\r\n standard deviation, except that values whose magnitude is more than 2 standard\r\n deviations from the mean are dropped and re-picked.\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n means: A 0-D Tensor or Python value of type `dtype`. The mean of the\r\n truncated normal distribution.\r\n stddevs: A 0-D Tensor or Python value of type `dtype`. The standard\r\n deviation of the truncated normal distribution.\r\n minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of\r\n the truncated normal distribution.\r\n maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of\r\n the truncated normal distribution.\r\n dtype: The type of the output.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random truncated normal values.\r\n \"\"\"\r\n with ops.name_scope(name, \"parameterized_truncated_normal\",\r\n [shape, means, stddevs, minvals, maxvals]) as name:\r\n shape_tensor = _ShapeTensor(shape)\r\n means_tensor = ops.convert_to_tensor(means, dtype=dtype, name=\"means\")\r\n stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name=\"stddevs\")\r\n minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name=\"minvals\")\r\n maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name=\"maxvals\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n rnd = gen_random_ops.parameterized_truncated_normal(\r\n shape_tensor,\r\n means_tensor,\r\n stddevs_tensor,\r\n minvals_tensor,\r\n maxvals_tensor,\r\n seed=seed1,\r\n seed2=seed2)\r\n return rnd\r\n\r\n\r\n@tf_export(\"random.truncated_normal\", \"truncated_normal\")\r\ndef truncated_normal(shape,\r\n mean=0.0,\r\n stddev=1.0,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a truncated normal distribution.\r\n\r\n The generated values follow a normal distribution with specified mean and\r\n standard deviation, except that values whose magnitude is more than 2 standard\r\n deviations from the mean are dropped and re-picked.\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n mean: A 0-D Tensor or Python value of type `dtype`. The mean of the\r\n truncated normal distribution.\r\n stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation\r\n of the normal distribution, before truncation.\r\n dtype: The type of the output.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random truncated normal values.\r\n \"\"\"\r\n with ops.name_scope(name, \"truncated_normal\", [shape, mean, stddev]) as name:\r\n shape_tensor = _ShapeTensor(shape)\r\n mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name=\"mean\")\r\n stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name=\"stddev\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n rnd = gen_random_ops.truncated_normal(\r\n shape_tensor, dtype, seed=seed1, seed2=seed2)\r\n mul = rnd * stddev_tensor\r\n value = math_ops.add(mul, mean_tensor, name=name)\r\n return value\r\n\r\n\r\nops.NotDifferentiable(\"ParameterizedTruncatedNormal\")\r\nops.NotDifferentiable(\"TruncatedNormal\")\r\n\r\n\r\n@tf_export(\"random.uniform\", \"random_uniform\")\r\ndef random_uniform(shape,\r\n minval=0,\r\n maxval=None,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Outputs random values from a uniform distribution.\r\n\r\n The generated values follow a uniform distribution in the range\r\n `[minval, maxval)`. The lower bound `minval` is included in the range, while\r\n the upper bound `maxval` is excluded.\r\n\r\n For floats, the default range is `[0, 1)`. For ints, at least `maxval` must\r\n be specified explicitly.\r\n\r\n In the integer case, the random integers are slightly biased unless\r\n `maxval - minval` is an exact power of two. The bias is small for values of\r\n `maxval - minval` significantly smaller than the range of the output (either\r\n `2**32` or `2**64`).\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output tensor.\r\n minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the\r\n range of random values to generate. Defaults to 0.\r\n maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on\r\n the range of random values to generate. Defaults to 1 if `dtype` is\r\n floating point.\r\n dtype: The type of the output: `float16`, `float32`, `float64`, `int32`,\r\n or `int64`.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of the specified shape filled with random uniform values.\r\n\r\n Raises:\r\n ValueError: If `dtype` is integral and `maxval` is not specified.\r\n \"\"\"\r\n dtype = dtypes.as_dtype(dtype)\r\n if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,\r\n dtypes.float64, dtypes.int32, dtypes.int64):\r\n raise ValueError(\"Invalid dtype %r\" % dtype)\r\n if maxval is None:\r\n if dtype.is_integer:\r\n raise ValueError(\"Must specify maxval for integer dtype %r\" % dtype)\r\n maxval = 1\r\n with ops.name_scope(name, \"random_uniform\", [shape, minval, maxval]) as name:\r\n shape = _ShapeTensor(shape)\r\n minval = ops.convert_to_tensor(minval, dtype=dtype, name=\"min\")\r\n maxval = ops.convert_to_tensor(maxval, dtype=dtype, name=\"max\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n if dtype.is_integer:\r\n return gen_random_ops.random_uniform_int(\r\n shape, minval, maxval, seed=seed1, seed2=seed2, name=name)\r\n else:\r\n rnd = gen_random_ops.random_uniform(shape, dtype, seed=seed1, seed2=seed2)\r\n return math_ops.add(rnd * (maxval - minval), minval, name=name)\r\n\r\n\r\nops.NotDifferentiable(\"RandomUniform\")\r\n\r\n\r\n@tf_export(\"random.shuffle\", \"random_shuffle\")\r\ndef random_shuffle(value, seed=None, name=None):\r\n \"\"\"Randomly shuffles a tensor along its first dimension.\r\n\r\n The tensor is shuffled along dimension 0, such that each `value[j]` is mapped\r\n to one and only one `output[i]`. For example, a mapping that might occur for a\r\n 3x2 tensor is:\r\n\r\n ```python\r\n [[1, 2], [[5, 6],\r\n [3, 4], ==> [1, 2],\r\n [5, 6]] [3, 4]]\r\n ```\r\n\r\n Args:\r\n value: A Tensor to be shuffled.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tensor of same shape and type as `value`, shuffled along its first\r\n dimension.\r\n \"\"\"\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return gen_random_ops.random_shuffle(\r\n value, seed=seed1, seed2=seed2, name=name)\r\n\r\n\r\n@tf_export(\"image.random_crop\", \"random_crop\")\r\ndef random_crop(value, size, seed=None, name=None):\r\n \"\"\"Randomly crops a tensor to a given size.\r\n\r\n Slices a shape `size` portion out of `value` at a uniformly chosen offset.\r\n Requires `value.shape >= size`.\r\n\r\n If a dimension should not be cropped, pass the full size of that dimension.\r\n For example, RGB images can be cropped with\r\n `size = [crop_height, crop_width, 3]`.\r\n\r\n Args:\r\n value: Input tensor to crop.\r\n size: 1-D tensor with size the rank of `value`.\r\n seed: Python integer. Used to create a random seed. See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: A name for this operation (optional).\r\n\r\n Returns:\r\n A cropped tensor of the same rank as `value` and shape `size`.\r\n \"\"\"\r\n # TODO(shlens): Implement edge case to guarantee output size dimensions.\r\n # If size > value.shape, zero pad the result so that it always has shape\r\n # exactly size.\r\n with ops.name_scope(name, \"random_crop\", [value, size]) as name:\r\n value = ops.convert_to_tensor(value, name=\"value\")\r\n size = ops.convert_to_tensor(size, dtype=dtypes.int32, name=\"size\")\r\n shape = array_ops.shape(value)\r\n check = control_flow_ops.Assert(\r\n math_ops.reduce_all(shape >= size),\r\n [\"Need value.shape >= size, got \", shape, size],\r\n summarize=1000)\r\n shape = control_flow_ops.with_dependencies([check], shape)\r\n limit = shape - size + 1\r\n offset = random_uniform(\r\n array_ops.shape(shape),\r\n dtype=size.dtype,\r\n maxval=size.dtype.max,\r\n seed=seed) % limit\r\n return array_ops.slice(value, offset, size, name=name)\r\n\r\n\r\n@tf_export(\"random.multinomial\", \"multinomial\")\r\ndef multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):\r\n \"\"\"Draws samples from a multinomial distribution.\r\n\r\n Example:\r\n\r\n ```python\r\n # samples has shape [1, 5], where each value is either 0 or 1 with equal\r\n # probability.\r\n samples = tf.multinomial(tf.log([[10., 10.]]), 5)\r\n ```\r\n\r\n Args:\r\n logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice\r\n `[i, :]` represents the unnormalized log-probabilities for all classes.\r\n num_samples: 0-D. Number of independent samples to draw for each row slice.\r\n seed: A Python integer. Used to create a random seed for the distribution.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: Optional name for the operation.\r\n output_dtype: integer type to use for the output. Defaults to int64.\r\n\r\n Returns:\r\n The drawn samples of shape `[batch_size, num_samples]`.\r\n \"\"\"\r\n with ops.name_scope(name, \"multinomial\", [logits]):\r\n logits = ops.convert_to_tensor(logits, name=\"logits\")\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return gen_random_ops.multinomial(\r\n logits, num_samples, seed=seed1, seed2=seed2, output_dtype=output_dtype)\r\n\r\n\r\nops.NotDifferentiable(\"Multinomial\")\r\n\r\n\r\n@tf_export(\"random.gamma\", \"random_gamma\")\r\[email protected]_endpoints(\"random_gamma\")\r\ndef random_gamma(shape,\r\n alpha,\r\n beta=None,\r\n dtype=dtypes.float32,\r\n seed=None,\r\n name=None):\r\n \"\"\"Draws `shape` samples from each of the given Gamma distribution(s).\r\n\r\n `alpha` is the shape parameter describing the distribution(s), and `beta` is\r\n the inverse scale parameter(s).\r\n\r\n Note: Because internal calculations are done using `float64` and casting has\r\n `floor` semantics, we must manually map zero outcomes to the smallest\r\n possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This\r\n means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise\r\n should. This bias can only happen for small values of `alpha`, i.e.,\r\n `alpha << 1` or large values of `beta`, i.e., `beta >> 1`.\r\n\r\n The samples are differentiable w.r.t. alpha and beta.\r\n The derivatives are computed using the approach described in the paper\r\n\r\n [Michael Figurnov, Shakir Mohamed, Andriy Mnih.\r\n Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)\r\n\r\n Example:\r\n\r\n ```python\r\n samples = tf.random_gamma([10], [0.5, 1.5])\r\n # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents\r\n # the samples drawn from each distribution\r\n\r\n samples = tf.random_gamma([7, 5], [0.5, 1.5])\r\n # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]\r\n # represents the 7x5 samples drawn from each of the two distributions\r\n\r\n alpha = tf.constant([[1.],[3.],[5.]])\r\n beta = tf.constant([[3., 4.]])\r\n samples = tf.random_gamma([30], alpha=alpha, beta=beta)\r\n # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.\r\n\r\n loss = tf.reduce_mean(tf.square(samples))\r\n dloss_dalpha, dloss_dbeta = tf.gradients(loss, [alpha, beta])\r\n # unbiased stochastic derivatives of the loss function\r\n alpha.shape == dloss_dalpha.shape # True\r\n beta.shape == dloss_dbeta.shape # True\r\n ```\r\n\r\n Args:\r\n shape: A 1-D integer Tensor or Python array. The shape of the output samples\r\n to be drawn per alpha/beta-parameterized distribution.\r\n alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha`\r\n provides the shape parameter(s) describing the gamma distribution(s) to\r\n sample. Must be broadcastable with `beta`.\r\n beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1.\r\n `beta` provides the inverse scale parameter(s) of the gamma\r\n distribution(s) to sample. Must be broadcastable with `alpha`.\r\n dtype: The type of alpha, beta, and the output: `float16`, `float32`, or\r\n `float64`.\r\n seed: A Python integer. Used to create a random seed for the distributions.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: Optional name for the operation.\r\n\r\n Returns:\r\n samples: a `Tensor` of shape\r\n `tf.concat([shape, tf.shape(alpha + beta)], axis=0)` with values of type\r\n `dtype`.\r\n \"\"\"\r\n with ops.name_scope(name, \"random_gamma\", [shape, alpha, beta]):\r\n shape = ops.convert_to_tensor(shape, name=\"shape\", dtype=dtypes.int32)\r\n alpha = ops.convert_to_tensor(alpha, name=\"alpha\", dtype=dtype)\r\n beta = ops.convert_to_tensor(\r\n beta if beta is not None else 1, name=\"beta\", dtype=dtype)\r\n alpha_broadcast = alpha + array_ops.zeros_like(beta)\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return math_ops.maximum(\r\n np.finfo(dtype.as_numpy_dtype).tiny,\r\n gen_random_ops.random_gamma(\r\n shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta)\r\n\r\n\r\n@tf_export(\"random.poisson\", \"random_poisson\")\r\[email protected]_endpoints(\"random_poisson\")\r\ndef random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):\r\n \"\"\"Draws `shape` samples from each of the given Poisson distribution(s).\r\n\r\n `lam` is the rate parameter describing the distribution(s).\r\n\r\n Example:\r\n\r\n ```python\r\n samples = tf.random_poisson([0.5, 1.5], [10])\r\n # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents\r\n # the samples drawn from each distribution\r\n\r\n samples = tf.random_poisson([12.2, 3.3], [7, 5])\r\n # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]\r\n # represents the 7x5 samples drawn from each of the two distributions\r\n ```\r\n\r\n Args:\r\n lam: A Tensor or Python value or N-D array of type `dtype`.\r\n `lam` provides the rate parameter(s) describing the poisson\r\n distribution(s) to sample.\r\n shape: A 1-D integer Tensor or Python array. The shape of the output samples\r\n to be drawn per \"rate\"-parameterized distribution.\r\n dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or\r\n `int64`.\r\n seed: A Python integer. Used to create a random seed for the distributions.\r\n See\r\n `tf.set_random_seed`\r\n for behavior.\r\n name: Optional name for the operation.\r\n\r\n Returns:\r\n samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)`\r\n with values of type `dtype`.\r\n \"\"\"\r\n with ops.name_scope(name, \"random_poisson\", [lam, shape]):\r\n shape = ops.convert_to_tensor(shape, name=\"shape\", dtype=dtypes.int32)\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n return gen_random_ops.random_poisson_v2(\r\n shape, lam, dtype=dtype, seed=seed1, seed2=seed2)\r\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n# pylint: disable=invalid-name\r\n\"\"\"Save and restore variables.\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport os.path\r\nimport re\r\nimport time\r\n\r\nfrom google.protobuf import text_format\r\n\r\nfrom tensorflow.core.protobuf import saver_pb2\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import errors\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.lib.io import file_io\r\nfrom tensorflow.python.ops import variable_scope\r\nfrom tensorflow.python.platform import tf_logging as logging\r\nfrom tensorflow.python.training import training_util\r\nfrom tensorflow.python.training.checkpoint_state_pb2 import CheckpointState\r\nfrom tensorflow.python.util import compat\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\ndef _GetCheckpointFilename(save_dir, latest_filename):\r\n \"\"\"Returns a filename for storing the CheckpointState.\r\n\r\n Args:\r\n save_dir: The directory for saving and restoring checkpoints.\r\n latest_filename: Name of the file in 'save_dir' that is used\r\n to store the CheckpointState.\r\n\r\n Returns:\r\n The path of the file that contains the CheckpointState proto.\r\n \"\"\"\r\n if latest_filename is None:\r\n latest_filename = \"checkpoint\"\r\n return os.path.join(save_dir, latest_filename)\r\n\r\n\r\n@tf_export(\"train.generate_checkpoint_state_proto\")\r\ndef generate_checkpoint_state_proto(save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=None,\r\n all_model_checkpoint_timestamps=None,\r\n last_preserved_timestamp=None):\r\n \"\"\"Generates a checkpoint state proto.\r\n\r\n Args:\r\n save_dir: Directory where the model was saved.\r\n model_checkpoint_path: The checkpoint file.\r\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\r\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\r\n the last element must be equal to model_checkpoint_path. These paths\r\n are also saved in the CheckpointState proto.\r\n all_model_checkpoint_timestamps: A list of floats, indicating the number of\r\n seconds since the Epoch when each checkpoint was generated.\r\n last_preserved_timestamp: A float, indicating the number of seconds since\r\n the Epoch when the last preserved checkpoint was written, e.g. due to a\r\n `keep_checkpoint_every_n_hours` parameter (see\r\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\r\n Returns:\r\n CheckpointState proto with model_checkpoint_path and\r\n all_model_checkpoint_paths updated to either absolute paths or\r\n relative paths to the current save_dir.\r\n\r\n Raises:\r\n ValueError: If `all_model_checkpoint_timestamps` was provided but its length\r\n does not match `all_model_checkpoint_paths`.\r\n \"\"\"\r\n if all_model_checkpoint_paths is None:\r\n all_model_checkpoint_paths = []\r\n\r\n if (not all_model_checkpoint_paths or\r\n all_model_checkpoint_paths[-1] != model_checkpoint_path):\r\n logging.info(\"%s is not in all_model_checkpoint_paths. Manually adding it.\",\r\n model_checkpoint_path)\r\n all_model_checkpoint_paths.append(model_checkpoint_path)\r\n\r\n if (all_model_checkpoint_timestamps\r\n and (len(all_model_checkpoint_timestamps)\r\n != len(all_model_checkpoint_paths))):\r\n raise ValueError(\r\n (\"Checkpoint timestamps, if provided, must match checkpoint paths (got \"\r\n \"paths %s and timestamps %s)\")\r\n % (all_model_checkpoint_paths, all_model_checkpoint_timestamps))\r\n\r\n # Relative paths need to be rewritten to be relative to the \"save_dir\"\r\n # if model_checkpoint_path already contains \"save_dir\".\r\n if not os.path.isabs(save_dir):\r\n if not os.path.isabs(model_checkpoint_path):\r\n model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)\r\n for i in range(len(all_model_checkpoint_paths)):\r\n p = all_model_checkpoint_paths[i]\r\n if not os.path.isabs(p):\r\n all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)\r\n\r\n coord_checkpoint_proto = CheckpointState(\r\n model_checkpoint_path=model_checkpoint_path,\r\n all_model_checkpoint_paths=all_model_checkpoint_paths,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n\r\n return coord_checkpoint_proto\r\n\r\n\r\n@tf_export(\"train.update_checkpoint_state\")\r\ndef update_checkpoint_state(save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=None,\r\n latest_filename=None,\r\n all_model_checkpoint_timestamps=None,\r\n last_preserved_timestamp=None):\r\n \"\"\"Updates the content of the 'checkpoint' file.\r\n\r\n This updates the checkpoint file containing a CheckpointState\r\n proto.\r\n\r\n Args:\r\n save_dir: Directory where the model was saved.\r\n model_checkpoint_path: The checkpoint file.\r\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\r\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\r\n the last element must be equal to model_checkpoint_path. These paths\r\n are also saved in the CheckpointState proto.\r\n latest_filename: Optional name of the checkpoint file. Default to\r\n 'checkpoint'.\r\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\r\n seconds since the Epoch) indicating when the checkpoints in\r\n `all_model_checkpoint_paths` were created.\r\n last_preserved_timestamp: A float, indicating the number of seconds since\r\n the Epoch when the last preserved checkpoint was written, e.g. due to a\r\n `keep_checkpoint_every_n_hours` parameter (see\r\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\r\n Raises:\r\n RuntimeError: If any of the model checkpoint paths conflict with the file\r\n containing CheckpointSate.\r\n \"\"\"\r\n update_checkpoint_state_internal(\r\n save_dir=save_dir,\r\n model_checkpoint_path=model_checkpoint_path,\r\n all_model_checkpoint_paths=all_model_checkpoint_paths,\r\n latest_filename=latest_filename,\r\n save_relative_paths=False,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n\r\n\r\ndef update_checkpoint_state_internal(save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=None,\r\n latest_filename=None,\r\n save_relative_paths=False,\r\n all_model_checkpoint_timestamps=None,\r\n last_preserved_timestamp=None):\r\n \"\"\"Updates the content of the 'checkpoint' file.\r\n\r\n This updates the checkpoint file containing a CheckpointState\r\n proto.\r\n\r\n Args:\r\n save_dir: Directory where the model was saved.\r\n model_checkpoint_path: The checkpoint file.\r\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\r\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\r\n the last element must be equal to model_checkpoint_path. These paths\r\n are also saved in the CheckpointState proto.\r\n latest_filename: Optional name of the checkpoint file. Default to\r\n 'checkpoint'.\r\n save_relative_paths: If `True`, will write relative paths to the checkpoint\r\n state file.\r\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\r\n seconds since the Epoch) indicating when the checkpoints in\r\n `all_model_checkpoint_paths` were created.\r\n last_preserved_timestamp: A float, indicating the number of seconds since\r\n the Epoch when the last preserved checkpoint was written, e.g. due to a\r\n `keep_checkpoint_every_n_hours` parameter (see\r\n `tf.contrib.checkpoint.CheckpointManager` for an implementation).\r\n\r\n Raises:\r\n RuntimeError: If any of the model checkpoint paths conflict with the file\r\n containing CheckpointSate.\r\n \"\"\"\r\n # Writes the \"checkpoint\" file for the coordinator for later restoration.\r\n coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)\r\n if save_relative_paths:\r\n if os.path.isabs(model_checkpoint_path):\r\n rel_model_checkpoint_path = os.path.relpath(\r\n model_checkpoint_path, save_dir)\r\n else:\r\n rel_model_checkpoint_path = model_checkpoint_path\r\n rel_all_model_checkpoint_paths = []\r\n for p in all_model_checkpoint_paths:\r\n if os.path.isabs(p):\r\n rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))\r\n else:\r\n rel_all_model_checkpoint_paths.append(p)\r\n ckpt = generate_checkpoint_state_proto(\r\n save_dir,\r\n rel_model_checkpoint_path,\r\n all_model_checkpoint_paths=rel_all_model_checkpoint_paths,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n else:\r\n ckpt = generate_checkpoint_state_proto(\r\n save_dir,\r\n model_checkpoint_path,\r\n all_model_checkpoint_paths=all_model_checkpoint_paths,\r\n all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,\r\n last_preserved_timestamp=last_preserved_timestamp)\r\n\r\n if coord_checkpoint_filename == ckpt.model_checkpoint_path:\r\n raise RuntimeError(\"Save path '%s' conflicts with path used for \"\r\n \"checkpoint state. Please use a different save path.\" %\r\n model_checkpoint_path)\r\n\r\n # Preventing potential read/write race condition by *atomically* writing to a\r\n # file.\r\n file_io.atomic_write_string_to_file(coord_checkpoint_filename,\r\n text_format.MessageToString(ckpt))\r\n\r\n\r\n@tf_export(\"train.get_checkpoint_state\")\r\ndef get_checkpoint_state(checkpoint_dir, latest_filename=None):\r\n \"\"\"Returns CheckpointState proto from the \"checkpoint\" file.\r\n\r\n If the \"checkpoint\" file contains a valid CheckpointState\r\n proto, returns it.\r\n\r\n Args:\r\n checkpoint_dir: The directory of checkpoints.\r\n latest_filename: Optional name of the checkpoint file. Default to\r\n 'checkpoint'.\r\n\r\n Returns:\r\n A CheckpointState if the state was available, None\r\n otherwise.\r\n\r\n Raises:\r\n ValueError: if the checkpoint read doesn't have model_checkpoint_path set.\r\n \"\"\"\r\n ckpt = None\r\n coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,\r\n latest_filename)\r\n f = None\r\n try:\r\n # Check that the file exists before opening it to avoid\r\n # many lines of errors from colossus in the logs.\r\n if file_io.file_exists(coord_checkpoint_filename):\r\n file_content = file_io.read_file_to_string(\r\n coord_checkpoint_filename)\r\n ckpt = CheckpointState()\r\n text_format.Merge(file_content, ckpt)\r\n if not ckpt.model_checkpoint_path:\r\n raise ValueError(\"Invalid checkpoint state loaded from \"\r\n + checkpoint_dir)\r\n # For relative model_checkpoint_path and all_model_checkpoint_paths,\r\n # prepend checkpoint_dir.\r\n if not os.path.isabs(ckpt.model_checkpoint_path):\r\n ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,\r\n ckpt.model_checkpoint_path)\r\n for i in range(len(ckpt.all_model_checkpoint_paths)):\r\n p = ckpt.all_model_checkpoint_paths[i]\r\n if not os.path.isabs(p):\r\n ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)\r\n except errors.OpError as e:\r\n # It's ok if the file cannot be read\r\n logging.warning(\"%s: %s\", type(e).__name__, e)\r\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\r\n return None\r\n except text_format.ParseError as e:\r\n logging.warning(\"%s: %s\", type(e).__name__, e)\r\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\r\n return None\r\n finally:\r\n if f:\r\n f.close()\r\n return ckpt\r\n\r\n\r\ndef _prefix_to_checkpoint_path(prefix, format_version):\r\n \"\"\"Returns the pathname of a checkpoint file, given the checkpoint prefix.\r\n\r\n For V1 checkpoint, simply returns the prefix itself (the data file). For V2,\r\n returns the pathname to the index file.\r\n\r\n Args:\r\n prefix: a string, the prefix of a checkpoint.\r\n format_version: the checkpoint format version that corresponds to the\r\n prefix.\r\n Returns:\r\n The pathname of a checkpoint file, taking into account the checkpoint\r\n format version.\r\n \"\"\"\r\n if format_version == saver_pb2.SaverDef.V2:\r\n return prefix + \".index\" # The index file identifies a checkpoint.\r\n return prefix # Just the data file.\r\n\r\n\r\n@tf_export(\"train.latest_checkpoint\")\r\ndef latest_checkpoint(checkpoint_dir, latest_filename=None):\r\n \"\"\"Finds the filename of latest saved checkpoint file.\r\n\r\n Args:\r\n checkpoint_dir: Directory where the variables were saved.\r\n latest_filename: Optional name for the protocol buffer file that\r\n contains the list of most recent checkpoint filenames.\r\n See the corresponding argument to `Saver.save()`.\r\n\r\n Returns:\r\n The full path to the latest checkpoint or `None` if no checkpoint was found.\r\n \"\"\"\r\n # Pick the latest checkpoint based on checkpoint state.\r\n ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)\r\n if ckpt and ckpt.model_checkpoint_path:\r\n # Look for either a V2 path or a V1 path, with priority for V2.\r\n v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\r\n saver_pb2.SaverDef.V2)\r\n v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\r\n saver_pb2.SaverDef.V1)\r\n if file_io.get_matching_files(v2_path) or file_io.get_matching_files(\r\n v1_path):\r\n return ckpt.model_checkpoint_path\r\n else:\r\n logging.error(\"Couldn't match files for checkpoint %s\",\r\n ckpt.model_checkpoint_path)\r\n return None\r\n\r\n\r\n@tf_export(\"train.checkpoint_exists\")\r\ndef checkpoint_exists(checkpoint_prefix):\r\n \"\"\"Checks whether a V1 or V2 checkpoint exists with the specified prefix.\r\n\r\n This is the recommended way to check if a checkpoint exists, since it takes\r\n into account the naming difference between V1 and V2 formats.\r\n\r\n Args:\r\n checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking\r\n priority. Typically the result of `Saver.save()` or that of\r\n `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or\r\n V1/V2.\r\n Returns:\r\n A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.\r\n \"\"\"\r\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\r\n saver_pb2.SaverDef.V2)\r\n if file_io.get_matching_files(pathname):\r\n return True\r\n elif file_io.get_matching_files(checkpoint_prefix):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n@tf_export(\"train.get_checkpoint_mtimes\")\r\ndef get_checkpoint_mtimes(checkpoint_prefixes):\r\n \"\"\"Returns the mtimes (modification timestamps) of the checkpoints.\r\n\r\n Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files\r\n exist, collect their mtime. Both V2 and V1 checkpoints are considered, in\r\n that priority.\r\n\r\n This is the recommended way to get the mtimes, since it takes into account\r\n the naming difference between V1 and V2 formats.\r\n\r\n Args:\r\n checkpoint_prefixes: a list of checkpoint paths, typically the results of\r\n `Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of\r\n sharded/non-sharded or V1/V2.\r\n Returns:\r\n A list of mtimes (in microseconds) of the found checkpoints.\r\n \"\"\"\r\n mtimes = []\r\n\r\n def match_maybe_append(pathname):\r\n fnames = file_io.get_matching_files(pathname)\r\n if fnames:\r\n mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)\r\n return True\r\n return False\r\n\r\n for checkpoint_prefix in checkpoint_prefixes:\r\n # Tries V2's metadata file first.\r\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\r\n saver_pb2.SaverDef.V2)\r\n if match_maybe_append(pathname):\r\n continue\r\n # Otherwise, tries V1, where the prefix is the complete pathname.\r\n match_maybe_append(checkpoint_prefix)\r\n\r\n return mtimes\r\n\r\n\r\n@tf_export(\"train.remove_checkpoint\")\r\ndef remove_checkpoint(checkpoint_prefix,\r\n checkpoint_format_version=saver_pb2.SaverDef.V2,\r\n meta_graph_suffix=\"meta\"):\r\n \"\"\"Removes a checkpoint given by `checkpoint_prefix`.\r\n\r\n Args:\r\n checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result\r\n of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of\r\n sharded/non-sharded or V1/V2.\r\n checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to\r\n `SaverDef.V2`.\r\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\r\n \"\"\"\r\n _delete_file_if_exists(\r\n meta_graph_filename(checkpoint_prefix, meta_graph_suffix))\r\n if checkpoint_format_version == saver_pb2.SaverDef.V2:\r\n # V2 has a metadata file and some data files.\r\n _delete_file_if_exists(checkpoint_prefix + \".index\")\r\n _delete_file_if_exists(checkpoint_prefix + \".data-?????-of-?????\")\r\n else:\r\n # V1, Legacy. Exact match on the data file.\r\n _delete_file_if_exists(checkpoint_prefix)\r\n\r\n\r\ndef _delete_file_if_exists(filespec):\r\n \"\"\"Deletes files matching `filespec`.\"\"\"\r\n for pathname in file_io.get_matching_files(filespec):\r\n file_io.delete_file(pathname)\r\n\r\n\r\ndef meta_graph_filename(checkpoint_filename, meta_graph_suffix=\"meta\"):\r\n \"\"\"Returns the meta graph filename.\r\n\r\n Args:\r\n checkpoint_filename: Name of the checkpoint file.\r\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\r\n\r\n Returns:\r\n MetaGraph file name.\r\n \"\"\"\r\n # If the checkpoint_filename is sharded, the checkpoint_filename could\r\n # be of format model.ckpt-step#-?????-of-shard#. For example,\r\n # model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.\r\n basename = re.sub(r\"-[\\d\\?]+-of-\\d+$\", \"\", checkpoint_filename)\r\n suffixed_filename = \".\".join([basename, meta_graph_suffix])\r\n return suffixed_filename\r\n\r\n\r\n# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?\r\nclass CheckpointManager(object):\r\n \"\"\"Deletes old checkpoints.\r\n\r\n Example usage:\r\n ```python\r\n import tensorflow as tf\r\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\r\n manager = tf.contrib.checkpoint.CheckpointManager(\r\n checkpoint, directory=\"/tmp/model\", max_to_keep=5)\r\n status = checkpoint.restore(manager.latest_checkpoint)\r\n while True:\r\n # train\r\n manager.save()\r\n ```\r\n\r\n `CheckpointManager` preserves its own state across instantiations (see the\r\n `__init__` documentation for details). Only one should be active in a\r\n particular directory at a time.\r\n \"\"\"\r\n\r\n def __init__(self, checkpoint, directory,\r\n max_to_keep, keep_checkpoint_every_n_hours=None):\r\n \"\"\"Configure a `CheckpointManager` for use in `directory`.\r\n\r\n If a `CheckpointManager` was previously used in `directory`, its\r\n state will be restored. This includes the list of managed checkpoints and\r\n the timestamp bookkeeping necessary to support\r\n `keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`\r\n will be the same as the previous `CheckpointManager`, including cleaning up\r\n existing checkpoints if appropriate.\r\n\r\n Checkpoints are only considered for deletion just after a new checkpoint has\r\n been added. At that point, `max_to_keep` checkpoints will remain in an\r\n \"active set\". Once a checkpoint is preserved by\r\n `keep_checkpoint_every_n_hours` it will not be deleted by this\r\n `CheckpointManager` or any future `CheckpointManager` instantiated in\r\n `directory` (regardless of the new setting of\r\n `keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the\r\n active set may be deleted by this `CheckpointManager` or a future\r\n `CheckpointManager` instantiated in `directory` (subject to its\r\n `max_to_keep` and `keep_checkpoint_every_n_hours` settings).\r\n\r\n Args:\r\n checkpoint: The `tf.train.Checkpoint` instance to save and manage\r\n checkpoints for.\r\n directory: The path to a directory in which to write checkpoints. A\r\n special file named \"checkpoint\" is also written to this directory (in a\r\n human-readable text format) which contains the state of the\r\n `CheckpointManager`.\r\n max_to_keep: An integer, the number of checkpoints to keep. Unless\r\n preserved by `keep_checkpoint_every_n_hours`, checkpoints will be\r\n deleted from the active set, oldest first, until only `max_to_keep`\r\n checkpoints remain. If `None`, no checkpoints are deleted and everything\r\n stays in the active set. Note that `max_to_keep=None` will keep all\r\n checkpoint paths in memory and in the checkpoint state protocol buffer\r\n on disk.\r\n keep_checkpoint_every_n_hours: Upon removal from the active set, a\r\n checkpoint will be preserved if it has been at least\r\n `keep_checkpoint_every_n_hours` since the last preserved checkpoint. The\r\n default setting of `None` does not preserve any checkpoints in this way.\r\n\r\n Raises:\r\n ValueError: If `max_to_keep` is not a positive integer.\r\n \"\"\"\r\n self._checkpoint = checkpoint\r\n self._save_counter_assign = None\r\n if max_to_keep is not None and max_to_keep <= 0:\r\n raise ValueError(\r\n (\"Expected a positive integer or `None` for `max_to_max_to_keep`, \"\r\n \"got %d.\")\r\n % (max_to_keep,))\r\n self._max_to_keep = max_to_keep\r\n self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours\r\n self._directory = directory\r\n self._checkpoint_prefix = os.path.join(directory, \"ckpt\")\r\n recovered_state = get_checkpoint_state(directory)\r\n current_clock = time.time()\r\n self._maybe_delete = collections.OrderedDict()\r\n if recovered_state is None:\r\n self._latest_checkpoint = None\r\n # Set the clock back slightly to avoid race conditions when quckly\r\n # re-creating a CheckpointManager.\r\n self._last_preserved_timestamp = current_clock - 1.\r\n else:\r\n self._latest_checkpoint = recovered_state.model_checkpoint_path\r\n self._last_preserved_timestamp = recovered_state.last_preserved_timestamp\r\n if current_clock < self._last_preserved_timestamp:\r\n # Time seems to have reversed itself. In addition to this warning, we'll\r\n # min() saved checkpoint timestamps with the current time to ensure that\r\n # old checkpoints don't get deleted accidentally.\r\n logging.warning(\r\n (\"time.time() returned a value %f seconds behind the last \"\r\n \"preserved checkpoint timestamp.\")\r\n % (self._last_preserved_timestamp - current_clock,))\r\n self._last_preserved_timestamp = current_clock\r\n all_timestamps = recovered_state.all_model_checkpoint_timestamps\r\n all_paths = recovered_state.all_model_checkpoint_paths\r\n del recovered_state # Uses modified values from now on\r\n if not all_timestamps:\r\n all_timestamps = [self._last_preserved_timestamp] * len(all_paths)\r\n\r\n for filename, timestamp in zip(all_paths, all_timestamps):\r\n timestamp = min(timestamp, current_clock)\r\n if timestamp > self._last_preserved_timestamp:\r\n self._maybe_delete[filename] = timestamp\r\n\r\n @property\r\n def latest_checkpoint(self):\r\n \"\"\"The prefix of the most recent checkpoint in `directory`.\r\n\r\n Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is\r\n the constructor argument to `CheckpointManager`.\r\n\r\n Suitable for passing to `tf.train.Checkpoint.restore` to resume training.\r\n\r\n Returns:\r\n The checkpoint prefix. If there are no checkpoints, returns `None`.\r\n \"\"\"\r\n return self._latest_checkpoint\r\n\r\n @property\r\n def checkpoints(self):\r\n \"\"\"A list of managed checkpoints.\r\n\r\n Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not\r\n show up in this list (to avoid ever-growing filename lists).\r\n\r\n Returns:\r\n A list of filenames, sorted from oldest to newest.\r\n \"\"\"\r\n return list(self._maybe_delete.keys())\r\n\r\n def _sweep(self):\r\n \"\"\"Deletes or preserves managed checkpoints.\"\"\"\r\n if not self._max_to_keep:\r\n # Does not update self._last_preserved_timestamp, since everything is kept\r\n # in the active set.\r\n return\r\n while len(self._maybe_delete) > self._max_to_keep:\r\n filename, timestamp = self._maybe_delete.popitem(last=False)\r\n # Even if we're keeping this checkpoint due to\r\n # keep_checkpoint_every_n_hours, we won't reference it to avoid\r\n # infinitely-growing CheckpointState protos.\r\n if (self._keep_checkpoint_every_n_hours\r\n and (timestamp - self._keep_checkpoint_every_n_hours * 3600.\r\n >= self._last_preserved_timestamp)):\r\n self._last_preserved_timestamp = timestamp\r\n continue\r\n remove_checkpoint(filename)\r\n\r\n def _record_state(self):\r\n \"\"\"Saves the `CheckpointManager`'s state in `directory`.\"\"\"\r\n filenames, timestamps = zip(*self._maybe_delete.items())\r\n update_checkpoint_state_internal(\r\n self._directory,\r\n model_checkpoint_path=self.latest_checkpoint,\r\n all_model_checkpoint_paths=filenames,\r\n all_model_checkpoint_timestamps=timestamps,\r\n last_preserved_timestamp=self._last_preserved_timestamp,\r\n save_relative_paths=True)\r\n\r\n @property\r\n def _prefix(self):\r\n \"\"\"A common prefix for all checkpoints saved with this manager.\r\n\r\n For example, if `directory` (a constructor argument) were `\"/tmp/tf-model\"`,\r\n `prefix` would be `\"/tmp/tf-model/ckpt\"` and checkpoints would generally be\r\n numbered `\"/tmp/tf-model/ckpt-1\"`, `\"/tmp/tf-model/ckpt-2\"`, and so on. Each\r\n checkpoint has several associated files\r\n (e.g. `\"/tmp/tf-model/ckpt-2.index\"`).\r\n\r\n Returns:\r\n A string prefix.\r\n \"\"\"\r\n return self._checkpoint_prefix\r\n\r\n def save(self, session=None, checkpoint_number=None):\r\n \"\"\"Creates a new checkpoint and manages it.\r\n\r\n Args:\r\n session: The session to evaluate variables in. Ignored when executing\r\n eagerly. If not provided when graph building, the default session is\r\n used.\r\n checkpoint_number: An optional integer, or an integer-dtype `Variable` or\r\n `Tensor`, used to number the checkpoint. If `None` (default),\r\n checkpoints are numbered using `checkpoint.save_counter`. Even if\r\n `checkpoint_number` is provided, `save_counter` is still incremented. A\r\n user-provided `checkpoint_number` is not incremented even if it is a\r\n `Variable`.\r\n\r\n Returns:\r\n The path to the new checkpoint. It is also recorded in the `checkpoints`\r\n and `latest_checkpoint` properies.\r\n \"\"\"\r\n # Save counter logic duplicated from tf.train.Checkpoint, soon to diverge\r\n # slightly with a custom numbering option.\r\n if context.executing_eagerly():\r\n save_counter = self._checkpoint.save_counter\r\n save_counter.assign_add(1)\r\n else:\r\n if session is None:\r\n session = ops.get_default_session()\r\n\r\n def _initializing_creator(next_creator, **kwargs):\r\n \"\"\"Initialize the save counter if it has been newly created.\"\"\"\r\n v = next_creator(**kwargs)\r\n session.run(v.initializer)\r\n return v\r\n\r\n with variable_scope.variable_creator_scope(_initializing_creator):\r\n save_counter = self._checkpoint.save_counter\r\n if self._save_counter_assign is None:\r\n self._save_counter_assign = save_counter.assign_add(1, read_value=False)\r\n session.run(self._save_counter_assign)\r\n if checkpoint_number is None:\r\n checkpoint_number = save_counter\r\n if not isinstance(checkpoint_number, compat.integral_types):\r\n checkpoint_number = training_util.global_step(\r\n sess=session, global_step_tensor=checkpoint_number)\r\n prefix = \"%s-%d\" % (self._prefix, checkpoint_number)\r\n save_path = self._checkpoint.write(prefix)\r\n timestamp = time.time()\r\n # If this is an overwritten checkpoint we were previously tracking, delete\r\n # and reinsert it to make sure it goes to the end of the queue.\r\n if save_path in self._maybe_delete:\r\n del self._maybe_delete[save_path]\r\n self._maybe_delete[save_path] = timestamp\r\n self._latest_checkpoint = save_path\r\n self._sweep()\r\n self._record_state()\r\n return save_path\r\n" ]
[ [ "tensorflow.python.ops.array_ops.reverse", "tensorflow.python.ops.io_ops.TextLineReader", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.state_ops.is_variable_initialized", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.estimator.estimator_lib.inputs.numpy_input_fn", "tensorflow.python.ops.io_ops.TFRecordReader", "tensorflow.python.training.input.batch", "tensorflow.python.ops.parsing_ops.parse_example", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.ops.parsing_ops.decode_csv", "tensorflow.contrib.timeseries.python.timeseries.model_utils.canonicalize_times_or_steps_from_output", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.reduce_any", "tensorflow.python.training.input.maybe_shuffle_batch", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.training.training.limit_epochs", "tensorflow.python.training.input.string_input_producer", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.state_ops.assign", "numpy.squeeze", "tensorflow.python.ops.tensor_array_ops.TensorArray", "tensorflow.python.ops.array_ops.stack", "numpy.array" ], [ "tensorflow.python.util.deprecation.deprecated_endpoints", "tensorflow.python.ops.gen_random_ops.parameterized_truncated_normal", "tensorflow.python.ops.gen_random_ops.random_uniform_int", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.math_ops.add", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.ops.gen_random_ops.truncated_normal", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.gen_random_ops.random_poisson_v2", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.gen_random_ops.random_standard_normal", "numpy.finfo", "tensorflow.python.ops.gen_random_ops.multinomial", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.framework.random_seed.get_seed", "tensorflow.python.framework.ops.NotDifferentiable", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.gen_random_ops.random_gamma", "tensorflow.python.ops.gen_random_ops.random_shuffle", "tensorflow.python.ops.gen_random_ops.random_uniform" ], [ "tensorflow.python.platform.tf_logging.info", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.lib.io.file_io.delete_file", "tensorflow.python.platform.tf_logging.error", "tensorflow.python.training.training_util.global_step", "tensorflow.python.framework.ops.get_default_session", "tensorflow.python.lib.io.file_io.stat", "tensorflow.python.ops.variable_scope.variable_creator_scope", "tensorflow.python.lib.io.file_io.get_matching_files", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.lib.io.file_io.file_exists", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.lib.io.file_io.read_file_to_string", "tensorflow.python.training.checkpoint_state_pb2.CheckpointState" ] ]
ebrunet28/MultiDecoder-DPRNN
[ "36fd6c35e730379e4f676a25eac451409a01f068" ]
[ "src/data.py" ]
[ "\"\"\"\nDataset classes for variable number of speakers\nAuthor: Junzhe Zhu\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom librosa import load\nfrom time import time\nimport glob\nimport os\nimport random\nimport json\nfrom tqdm import tqdm\ndef load_json(filename):\n with open(filename) as f:\n data = json.load(f)\n return data\n\ndef pad_audio(audio, len_samples=4*8000):\n if len(audio) < len_samples:\n audio = np.concatenate([audio, np.zeros(len_samples - len(audio))])\n return audio\n\nclass MixtureDataset(data.Dataset):\n def __init__(self, root, json_folders, sr=8000, seglen=4.0, minlen=2.0, debug=False): # segment and cv_maxlen not implemented\n \"\"\"\n each line of textfile comes in the form of:\n filename1, dB1, filename2, dB2, ...\n args:\n root: folder where dataset/ is located\n json_folders: folders containing json files, **/dataset/#speakers/wav8k/min/tr/**\n sr: sample rate\n seglen: length of each segment in seconds\n minlen: minimum segment length\n \"\"\"\n str_tmp = '_debug' if debug else ''\n seglen = int(seglen * sr)\n minlen = int(minlen * sr)\n self.sr = sr\n self.mixes = []\n for json_folder in json_folders:\n mixfiles, wavlens = list(zip(*load_json(os.path.join(root + str_tmp, json_folder, 'mix.json')))) # list of 20000 filenames, and 20000 lengths\n mixfiles = [os.path.join(root, mixfile.split('dataset/')[1]) for mixfile in mixfiles]\n sig_json = [load_json(file) for file in sorted(glob.glob(os.path.join(root + str_tmp, json_folder, 's*.json')))] # list C, each have 20000 filenames\n for i, spkr_json in enumerate(sig_json):\n sig_json[i] = [os.path.join(root, line[0].split('dataset/')[1]) for line in spkr_json] # list C, each have 20000 filenames\n siglists = list(zip(*sig_json)) # list of 20000, each have C filenames\n self.mixes += list(zip(mixfiles, siglists, wavlens))\n\n self.examples = []\n for i, mix in enumerate(self.mixes):\n if mix[2] < minlen:\n continue\n start = 0\n while start + minlen <= mix[2]:\n end = min(start + seglen, mix[2])\n self.examples.append({'mixfile': mix[0], 'sourcefiles': mix[1], 'start': start, 'end':end})\n start += minlen\n random.seed(0)\n self.examples = random.sample(self.examples, len(self.examples))\n\n # Count.\n example_source_files_len = [len(tmp['sourcefiles'] )for tmp in self.examples]\n unique, counts = np.unique(np.array(example_source_files_len), return_counts=True)\n self.example_weights =[]\n for tmp in example_source_files_len:\n self.example_weights.append(1./counts[tmp-2])\n self.example_weights = torch.Tensor(self.example_weights)\n def __len__(self):\n return len(self.examples)\n def __getitem__(self, idx):\n \"\"\"\n Returns:\n mixture: [T]\n sources: list of C, each [T]\n \"\"\"\n example = self.examples[idx]\n mixfile, sourcefiles, start, end = example['mixfile'], example['sourcefiles'], example['start'], example['end']\n mixture, sr = load(mixfile, sr=self.sr)\n assert sr == self.sr, 'need to resample'\n mixture = mixture[start:end]\n sources = [load(sourcefile, sr=sr)[0][start:end] for sourcefile in sourcefiles]\n return mixture, sources\n\ndef _collate_fn(batch):\n \"\"\"\n Args:\n batch: list, len(batch) = batch_size, each entry is a tuple of (mixture, sources)\n Returns:\n mixtures_list: B x T, torch.Tensor, padded mixtures\n ilens : B, torch.Tensor, length of each mixture before padding\n sources_list: list of B Tensors, each C x T, where C is (variable) number of source audios\n \"\"\"\n ilens = [] # shape of mixtures\n mixtures = [] # mixtures, same length as longest source in whole batch\n sources_list = [] # padded sources, same length as mixtures\n for mixture, sources in batch: # compute length to pad to\n assert len(mixture) == len(sources[0])\n assert len(mixture) <= 32000\n ilens.append(len(mixture))\n mixtures.append(pad_audio(mixture))\n sources = torch.Tensor(np.stack([pad_audio(source) for source in sources], axis=0)).float()\n sources_list.append(sources)\n mixtures = torch.Tensor(np.stack(mixtures, axis=0)).float()\n ilens = torch.Tensor(np.stack(ilens)).int()\n return mixtures, ilens, sources_list\n\nclass TestDataset(data.Dataset):\n def __init__(self, root, json_folders, sr=8000): # segment and cv_maxlen not implemented\n \"\"\"\n each line of textfile comes in the form of:\n filename1, dB1, filename2, dB2, ...\n args:\n root: folder where dataset/ is located\n json_folders: folders containing json files, **/dataset/#speakers/wav8k/min/tr/**\n sr: sample rate\n seglen: length of each segment in seconds\n minlen: minimum segment length\n \"\"\"\n self.sr = sr\n self.mixes = []\n for json_folder in json_folders:\n mixfiles, wavlens = list(zip(*load_json(os.path.join(root, json_folder, 'mix.json')))) # list of 20000 filenames, and 20000 lengths\n mixfiles = [os.path.join(root, mixfile.split('dataset/')[1]) for mixfile in mixfiles]\n sig_json = [load_json(file) for file in sorted(glob.glob(os.path.join(root, json_folder, 's*.json')))] # list C, each have 20000 filenames\n for i, spkr_json in enumerate(sig_json):\n sig_json[i] = [os.path.join(root, line[0].split('dataset/')[1]) for line in spkr_json] # list C, each have 20000 filenames\n siglists = list(zip(*sig_json)) # list of 20000, each have C filenames\n self.mixes += list(zip(mixfiles, siglists, wavlens))\n #printlist(self.mixes)\n self.examples = []\n for i, mix in enumerate(self.mixes):\n self.examples.append({'mixfile': mix[0], 'sourcefiles': mix[1], 'start': 0, 'end': mix[2]})\n random.seed(0)\n self.examples = random.sample(self.examples, len(self.examples))\n def __len__(self):\n return len(self.examples)\n def __getitem__(self, idx):\n \"\"\"\n Returns:\n mixture: [T]\n sources: list of C, each [T]\n \"\"\"\n example = self.examples[idx]\n mixfile, sourcefiles, start, end = example['mixfile'], example['sourcefiles'], example['start'], example['end']\n mixture, sr = load(mixfile, sr=self.sr)\n assert sr == self.sr, 'need to resample'\n mixture = mixture[start:end]\n sources = [load(sourcefile, sr=sr)[0][start:end] for sourcefile in sourcefiles]\n return mixture, sources\n \nif __name__ == \"__main__\":\n root = \"/ws/ifp-10_3/hasegawa/junzhez2/Baseline_Model/dataset\"\n tr_json = [\"2spkr_json/tr/\",\n \"3spkr_json/tr/\",\n \"4spkr_json/tr/\",\n \"5spkr_json/tr/\"]\n val_json = [\"2spkr_json/cv/\",\n \"3spkr_json/cv/\",\n \"4spkr_json/cv/\",\n \"5spkr_json/cv/\"]\n test_json = [\"2spkr_json/tt\",\n \"3spkr_json/tt\",\n \"4spkr_json/tt\",\n \"5spkr_json/tt\"]\n dataset = MixtureDataset(root, tr_json)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=3, collate_fn=_collate_fn)\n print(len(dataset))\n for mixtures, ilens, sources_list in tqdm(dataloader):\n start = time()\n print(mixtures.shape, ilens.shape, [len(sources) for sources in sources_list])\n print(time() - start)\n" ]
[ [ "numpy.array", "torch.utils.data.DataLoader", "numpy.stack", "torch.Tensor" ] ]
cimat-ris/OF-PathPred
[ "85ca275707e5988491d0a510b9d31883824411db" ]
[ "path_prediction/utils/process_file_trajnetplusplus.py" ]
[ "import os, glob, sys, logging, math\nfrom tqdm import tqdm\nimport numpy as np\nfrom .interaction_optical_flow import OpticalFlowSimulator\nfrom .obstacles import load_world_obstacle_polygons\n# Since it is used as a submodule, the trajnetplusplustools directory should be there\nsys.path.append(\"../../trajnetplusplustools\")\nfrom trajnetplusplustools import Reader\n\ndef prepare_data_trajnetplusplus(datasets_path, datasets_names,parameters,keep_neighbors=True):\n \"\"\" Prepares the train/val scenes and corresponding goals\n Parameters\n ----------\n parameters: Experiment_Parameters\n Defines the prediction experiment parameters.\n path:\n Path to the dataset (set of json files)\n\n Returns\n -------\n data : Dictionary\n Contains the different processed data as numpy nd arrays\n \"\"\"\n all_ped_traj_abs = []\n all_neigbors_traj_abs = []\n all_flows = []\n all_visible_neighbors = []\n neighbors_n_max = 0\n primary_path = []\n # Optical flow\n of_sim = OpticalFlowSimulator()\n ## Iterate over file names\n for dataset_name in datasets_names:\n reader = Reader(datasets_path + dataset_name + '.ndjson', scene_type='paths')\n ## Necessary modification of train scene to add filename\n scene = [(dataset_name, s_id, s) for s_id, s in reader.scenes()]\n logging.info(\"File \"+dataset_name+\" with {} scenes.\".format(len(scene)))\n for scene_i, (filename, scene_id, paths) in enumerate(scene):\n # Get the trajectories\n raw_traj_abs = Reader.paths_to_xy(paths)\n ped_traj_abs = raw_traj_abs[:,0,:]\n if ped_traj_abs.shape[0]<1+parameters.obs_len+parameters.pred_len:\n continue\n # Keep the full trajectory of the pedestrian of interest (start at 0)\n all_ped_traj_abs.append(ped_traj_abs)\n # Save info path scene scene_id\n primary_path.append((scene_id, paths[0],reader.scenes_by_id[scene_id]))\n # Neighbors\n neigbors_traj_abs = raw_traj_abs[1:1+parameters.obs_len,1:,:]\n neigbors_traj_abs = np.concatenate([np.ones([neigbors_traj_abs.shape[0],neigbors_traj_abs.shape[1],1]),neigbors_traj_abs],axis=2)\n if keep_neighbors:\n neighbors_n = neigbors_traj_abs.shape[1]\n if neighbors_n>neighbors_n_max:\n neighbors_n_max = neighbors_n\n all_neigbors_traj_abs.append(neigbors_traj_abs)\n # Optical flow\n flow,vis_neigh,__ = of_sim.compute_opticalflow_seq(ped_traj_abs[1:1+parameters.obs_len,:],neigbors_traj_abs[0:parameters.obs_len,:,:], None)\n all_flows.append(flow)\n all_visible_neighbors.append(vis_neigh)\n\n all_ped_traj_abs = np.array(all_ped_traj_abs, dtype=\"float32\")\n all_flows = np.array(all_flows, dtype=\"float32\")\n all_visible_neighbors= np.array(all_visible_neighbors)\n\n # Data sanity check\n logging.debug(\"Checking data consistency\")\n logging.debug(\"Nan in all_ped_traj_abs {} \".format(np.isnan(all_ped_traj_abs).any()))\n logging.debug(\"Nan in all_flows {} \".format(np.isnan(all_flows).any()))\n logging.debug(\"Inf in all_flows {} \".format(np.isinf(all_flows).any()))\n logging.debug(\"Nan in all_visible_neighbors {} \".format(np.isnan(all_visible_neighbors).any()))\n logging.debug(\"Inf in all_visible_neighbors {} \".format(np.isinf(all_visible_neighbors).any()))\n\n if keep_neighbors:\n for i in range(len(all_neigbors_traj_abs)):\n # TODO: avoid using 3 dimensions?\n tmp=np.NaN*np.ones([all_neigbors_traj_abs[i].shape[0],neighbors_n_max,3])\n tmp[:,:all_neigbors_traj_abs[i].shape[1],:]=all_neigbors_traj_abs[i]\n all_neigbors_traj_abs[i]=tmp\n all_neigbors_traj_abs= np.array(all_neigbors_traj_abs)\n logging.info(\"Total trajectories: {}\".format(all_ped_traj_abs.shape[0]))\n\n\n # By broadcasting, center these data\n seq_pos_centered_all = all_ped_traj_abs - all_ped_traj_abs[:,parameters.obs_len:parameters.obs_len+1,0:2]\n # Displacements\n seq_rel_all = np.zeros_like(all_ped_traj_abs)\n seq_rel_all[:,1:,:] = all_ped_traj_abs[:,1:,:]-all_ped_traj_abs[:,:-1,:]\n # All directions\n seq_theta_all = np.zeros_like(all_ped_traj_abs[:,:,0:1])\n seq_theta_all[:,:,0] = np.arctan2(seq_rel_all[:,:,1],seq_rel_all[:,:,0])\n # Cosine and sine of the orientation angle at the last observed point\n costheta = np.cos(seq_theta_all[:,parameters.obs_len:parameters.obs_len+1,0:1])\n sintheta = np.sin(seq_theta_all[:,parameters.obs_len:parameters.obs_len+1,0:1])\n seq_pos_rot_all = np.zeros_like(all_ped_traj_abs)\n seq_pos_rot_all[:,:,0:1]= costheta*(seq_pos_centered_all[:,:,0:1])+sintheta*(seq_pos_centered_all[:,:,1:2])\n seq_pos_rot_all[:,:,1:2]=-sintheta*(seq_pos_centered_all[:,:,0:1])+costheta*(seq_pos_centered_all[:,:,1:2])\n # All the displacements are estimated here.\n seq_rel_rot_all = np.zeros_like(seq_pos_rot_all)\n seq_rel_rot_all[:,1:,:] = seq_pos_rot_all[:,1:,:]-seq_pos_rot_all[:,:-1,:]\n # Save all these data as a dictionary\n data = {\n \"obs_traj\": all_ped_traj_abs[:,1:1+parameters.obs_len,:],\n \"obs_traj_rel\": seq_rel_all[:,1:1+parameters.obs_len,:],\n \"obs_traj_theta\":seq_theta_all[:,1:1+parameters.obs_len,:],\n \"obs_optical_flow\": all_flows[:,1:1+parameters.obs_len,:],\n \"obs_visible_neighbors\": all_visible_neighbors[:,1:1+parameters.obs_len,:],\n \"pred_traj\": all_ped_traj_abs[:,1+parameters.obs_len:,:],\n \"pred_traj_rel\": seq_rel_all[:,1+parameters.obs_len:,:],\n \"index\": np.array(range(len(primary_path)))\n }\n if keep_neighbors:\n data[\"obs_neighbors\"] = all_neigbors_traj_abs[:,1:parameters.obs_len+1,:]\n return data, primary_path\n" ]
[ [ "numpy.zeros_like", "numpy.arctan2", "numpy.ones", "numpy.isinf", "numpy.cos", "numpy.isnan", "numpy.array", "numpy.sin" ] ]
Jimmy-INL/OpenPNM
[ "1546fa1ac2204443bde916f2037fac383c5069ae" ]
[ "openpnm/io/Pandas.py" ]
[ "import numpy as np\nimport scipy as sp\nfrom flatdict import FlatDict\nfrom collections import namedtuple\nfrom openpnm.io import Dict, GenericIO\nfrom openpnm.utils import sanitize_dict, logging\nlogger = logging.getLogger(__name__)\n\n\nclass Pandas(GenericIO):\n r\"\"\"\n Combines all data arrays into a Pandas DataFrame object\n\n The structure of a DataFrame is a very close match to OpenPNMs data\n storage. Each key becomes a column header in the Dataframe, and each\n pore or throat entry becomes a row.\n\n Limitations of the DataFrame are the inability to have multidimensional\n data in a single column. The methods on a DataFrame are also oriented\n towards time-series data.\n\n Nonetheless, Pandas offers many useful features such as performing\n statistical analysis on property. DataFrames also offer *many* options for\n exporting to other file formats, so if a format is not yet supported\n by OpenPNM, this could be an solution.\n\n \"\"\"\n @classmethod\n def to_dataframe(cls, network=None, phases=[], join=False, delim=' | '):\n r\"\"\"\n Convert the Network (and optionally Phase) data to Pandas DataFrames.\n\n Parameters\n ----------\n network: OpenPNM Network Object\n The network containing the data to be stored\n\n phases : list of OpenPNM Phase Objects\n The data on each supplied phase will be added to DataFrame\n\n join : boolean\n If ``False`` (default), two DataFrames are returned with *pore*\n data in one, and *throat* data in the other. If ``True`` the pore\n and throat data are combined into a single DataFrame. This can be\n problematic as it will put NaNs into all the *pore* columns which\n are shorter than the *throat* columns.\n\n Returns\n -------\n Pandas ``DataFrame`` object containing property and label data in each\n column. If ``join`` was False (default) the two DataFrames are\n returned i a named tuple, or else a single DataFrame with pore and\n throat data in the same file, despite the column length being\n different.\n\n \"\"\"\n from pandas import DataFrame\n\n project, network, phases = cls._parse_args(network=network,\n phases=phases)\n\n # Initialize pore and throat data dictionary using Dict class\n pdata = Dict.to_dict(network=network, phases=phases, element='pore',\n interleave=True, flatten=True,\n categorize_by=['object'])\n tdata = Dict.to_dict(network=network, phases=phases, element='throat',\n interleave=True, flatten=True,\n categorize_by=['object'])\n pdata = FlatDict(pdata, delimiter=delim)\n tdata = FlatDict(tdata, delimiter=delim)\n\n # Scan data and convert non-1d arrays to multiple columns\n for key in list(pdata.keys()):\n if np.shape(pdata[key]) != (network[0].Np,):\n arr = pdata.pop(key)\n tmp = np.split(arr, arr.shape[1], axis=1)\n cols = range(len(tmp))\n pdata.update({key+'['+str(i)+']': tmp[i].squeeze()\n for i in cols})\n for key in list(tdata.keys()):\n if np.shape(tdata[key]) != (network[0].Nt,):\n arr = tdata.pop(key)\n tmp = np.split(arr, arr.shape[1], axis=1)\n cols = range(len(tmp))\n tdata.update({key+'['+str(i)+']': tmp[i].squeeze()\n for i in cols})\n\n # Convert sanitized dictionaries to DataFrames\n pdata = DataFrame(sanitize_dict(pdata))\n tdata = DataFrame(sanitize_dict(tdata))\n\n # Prepare DataFrames to be returned\n if join:\n data = tdata.join(other=pdata, how='left')\n else:\n nt = namedtuple('dataframes', ('pore', 'throat'))\n data = nt(pore=pdata, throat=tdata)\n\n return data\n\n @classmethod\n def from_dataframe(cls):\n r\"\"\"\n \"\"\"\n raise NotImplementedError()\n" ]
[ [ "numpy.shape", "numpy.split" ] ]
krmurtha/fw-heudiconv
[ "cf41f7e6eb770317ab7c0aec051b4567ab634d01" ]
[ "fw_heudiconv/backend_funcs/convert.py" ]
[ "import logging\nimport re\nimport pdb\nimport operator\nimport pprint\nimport mimetypes\nimport flywheel\nimport json\nimport pandas as pd\nfrom os import path\nfrom pathvalidate import is_valid_filename\nfrom pathlib import Path\nfrom fw_heudiconv.cli.export import get_nested\n\nlogger = logging.getLogger('fw-heudiconv-curator')\n\n\ndef build_intention_path(f):\n \"\"\"Builds a string of the path to the file w.r.t. subject dir\n \"\"\"\n fname = f.info[\"BIDS\"][\"Filename\"]\n folder = f.info[\"BIDS\"][\"Folder\"]\n ses = fname.split(\"_\")[1]\n return(\"/\".join([ses, folder, fname]))\n\n\ndef none_replace(str_input):\n return str_input\n\n\ndef force_template_format(str_input):\n\n # if we get a reproin heuristic, the str format is:\n #\n # {bids_subject_session_dir}/anat/{bids_subject_session_prefix}_scout\n #\n # here we replace the {} with the sub-sess format fw-heudiconv uses\n\n str_input = re.sub(\"{bids_subject_session_dir}\", \"sub-{subject}/ses-{session}\", str_input)\n str_input = re.sub(\"{bids_subject_session_prefix}\", \"sub-{subject}_ses-{session}\", str_input)\n\n # next, we remove extra sub-sub or ses-ses\n str_input = re.sub(\"(?<!ses-){session}\", \"ses-{session}\", str_input)\n str_input = re.sub(\"(?<!sub-){subject}\", \"sub-{subject}\", str_input)\n\n return(str_input)\n\n\ndef force_label_format(str_input):\n\n str_input = re.sub(\"ses-\", \"\", str_input)\n str_input = re.sub(\"sub-\", \"\", str_input)\n\n return(str_input)\n\n\ndef apply_heuristic(client, heur, acquisition_id, dry_run=False, intended_for=[],\n metadata_extras={}, subj_replace=None, ses_replace=None, item_num=1):\n \"\"\" Apply heuristic to rename files\n\n This function applies the specified heuristic to the files given in the\n list of acquisitions.\n\n Args:\n client (Client): The flywheel sdk client\n heur (tuple): 3-tuple, the \"key\" of a seq_info dictionary, where\n the first item of the tuple is the naming convention as a string\n acquisition_ids (list): The \"value\" of a seq_info dictionary, the list\n of acquisitions to which the naming convention applies\n \"\"\"\n suffixes = {'nifti': \".nii.gz\", 'bval': \".bval\", 'bvec': \".bvec\"}\n ftypes = ['nifti', 'bval', 'bvec', 'tsv']\n template, outtype, annotation_classes = heur\n template = force_template_format(template)\n\n subj_replace = none_replace if subj_replace is None else subj_replace\n ses_replace = none_replace if ses_replace is None else ses_replace\n\n acquisition_object = client.get(acquisition_id)\n subj_label = subj_replace(force_label_format(client.get(acquisition_object.parents.subject).label))\n sess_label = ses_replace(force_label_format(client.get(acquisition_object.parents.session).label))\n\n files = [f for f in acquisition_object.files if f.type in ftypes]\n bids_keys = ['sub', 'ses', 'folder', 'name']\n\n files.sort(key=operator.itemgetter(\"name\"))\n for fnum, f in enumerate(files):\n bids_vals = template.format(subject=subj_label, session=sess_label, item=fnum+1, seqitem=item_num).split(\"/\")\n bids_dict = dict(zip(bids_keys, bids_vals))\n suffix = suffixes[f.type]\n\n if 'BIDS' not in f.info:\n f.info['BIDS'] = \"\"\n new_bids = f.info['BIDS']\n if new_bids in (\"NA\", \"\"):\n new_bids = add_empty_bids_fields(bids_dict['folder'], bids_dict['name'])\n new_bids['Filename'] = bids_dict['name']+suffix\n new_bids['Folder'] = bids_dict['folder']\n new_bids['Path'] = \"/\".join([bids_dict['sub'],\n bids_dict['ses'],\n bids_dict['folder']])\n new_bids['error_message'] = \"\"\n new_bids['valid'] = True\n\n infer_params_from_filename(new_bids)\n\n destination = \"\\n\" + f.name + \"\\n\\t\" + new_bids['Filename'] + \" -> \" \\\n + new_bids[\"Path\"] + \"/\" + new_bids['Filename']\n logger.debug(destination)\n\n if not dry_run:\n acquisition_object.update_file_info(f.name, {'BIDS': new_bids})\n acquisition_object = client.get(acquisition_id) # Refresh the acquisition object\n\n if intended_for and (f.name.endswith(\".nii.gz\") or f.name.endswith(\".nii\")):\n\n intendeds = [force_template_format(intend)\n for intend in intended_for]\n intendeds = [intend.format(subject=subj_label, session=sess_label)\n for intend in intendeds]\n\n logger.debug(\"%s IntendedFor: %s\", pprint.pformat(new_bids['Filename']),\n pprint.pformat(intendeds))\n if not dry_run:\n acquisition_object.update_file_info(f.name, {'IntendedFor': intendeds})\n acquisition_object = client.get(acquisition_id)\n # Check that it was applied\n file_info = acquisition_object.get_file(f.name)\n assert file_info['info']['IntendedFor'] == intendeds\n logger.debug(\"Applied!\")\n\n if metadata_extras:\n logger.debug(\"%s metadata: %s\", f.name, metadata_extras)\n if not dry_run:\n acquisition_object.update_file_info(f.name, metadata_extras)\n\n\ndef add_empty_bids_fields(folder, fname=None):\n\n if \"fmap\" in folder:\n if not fname:\n logger.debug(\"No filename given, can't set intentions for this fieldmap!\")\n IntendedFor = \"\"\n Modality = \"\"\n else:\n IntendedFor = \"[{'Folder': 'func'}]\"\n Modality = \"fieldmap\"\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"fmap\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"fieldmap_file\",\n \"valid\": False}\n\n elif \"dwi\" in folder:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"dwi\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"diffusion_file\",\n \"valid\": False}\n\n elif \"func\" in folder:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"\",\n \"valid\": False}\n\n elif \"anat\" in folder:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": \"anat\",\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"T1w\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"anat_file\",\n \"valid\": False}\n\n else:\n\n new_bids = {\"Acq\": \"\",\n \"Ce\": \"\",\n \"Dir\": \"\",\n \"Echo\": \"\",\n \"error_message\": \"\",\n \"Filename\": \"\",\n \"Folder\": folder,\n \"ignore\": \"\",\n \"IntendedFor\": \"\",\n \"Mod\": \"\",\n \"Modality\": \"\",\n \"Path\": \"\",\n \"Rec\": \"\",\n \"Run\": \"\",\n \"Task\": \"\",\n \"template\": \"\",\n \"valid\": False}\n\n return(new_bids)\n\n\ndef infer_params_from_filename(bdict):\n\n fname = bdict['Filename']\n\n params = ['Acq', 'Ce', 'Dir', 'Echo', 'Mod', 'Rec', 'Run', 'Task']\n to_fill = {}\n for x in params:\n search = re.search(r'(?<={}-)[A-Za-z0-9]+(?=_)'.format(x.lower()), fname)\n to_fill[x] = search.group() if search is not None else \"\"\n\n bdict.update(to_fill)\n\n\ndef confirm_intentions(client, session, dry_run=False):\n \"\"\"Ensure that files in \"IntededFor\" will ultimately exist in the BIDS directory.\n \"\"\"\n try:\n acqs = [client.get(s.id) for s in session.acquisitions()]\n acq_files = [f for a in acqs for f in a.files if '.nii' in f.name]\n bids_filenames = [get_nested(f, 'info', 'BIDS', 'Filename') for f in acq_files]\n bids_paths = [get_nested(f, 'info', 'BIDS', 'Path') for f in acq_files]\n full_filenames = []\n for folder, filename in zip(bids_paths, bids_filenames):\n if None in (folder, filename) or '' in (filename, folder):\n continue\n full_filenames.append(folder + \"/\" + filename)\n\n bids_files = [re.sub(\"sub-[a-zA-z0-9]+/\", \"\", x) for x in full_filenames]\n\n # Go through all the acquisitions in the session\n for acq in acqs:\n for acq_file in acq.files:\n if not acq_file.type == 'nifti':\n continue\n intendeds = get_nested(acq_file.to_dict(), 'info', 'IntendedFor')\n if not intendeds:\n continue\n # If there are \"IntendedFor\" values, check that they will exist\n logger.debug(\n \"Ensuring all intentions apply for acquisition %s: %s\",\n acq.label, acq_file.name)\n\n ok_intentions = []\n bad_intentions = []\n for intendedfor in intendeds:\n if intendedfor in bids_files:\n ok_intentions.append(intendedfor)\n else:\n bad_intentions.append(intendedfor)\n\n if bad_intentions:\n logger.warning(\n \"IntendedFor values do not point to a BIDS file: %s\",\n bad_intentions)\n # pdb.set_trace()\n if not dry_run:\n acq.update_file_info(acq_file.name,\n {'IntendedFor': ok_intentions})\n\n except Exception as e:\n logger.warning(\"Trouble updating intentions for this session %s\", session.label)\n logger.warning(e)\n\n\ndef confirm_bids_namespace(project_obj, dry_run):\n\n bids_info = get_nested(project_obj, 'info', 'BIDS')\n if bids_info in (None, ''):\n\n logger.debug(\"{} has no BIDS namespace!\".format(project_obj.label))\n\n if not dry_run:\n\n logger.debug(\"Adding default BIDS namespace...\")\n\n bids = {\n 'BIDS': {'Acknowledgements': '',\n 'Authors': [],\n 'BIDSVersion': '1.0.2',\n 'DatasetDOI': '',\n 'Funding': [],\n 'HowToAcknowledge': '',\n 'License': '',\n 'Name': project_obj.label,\n 'ReferencesAndLinks': [],\n 'template': 'project'}\n }\n\n project_obj.update_info(bids)\n project_obj = project_obj.reload()\n\n return project_obj\n\n\ndef verify_attachment(name, data, dtype='text/tab-separated-values'):\n\n types = mimetypes.types_map\n\n # check for extension\n # if found, check its dtype matches\n ext = path.splitext(name)[1]\n valid_fname = is_valid_filename(name)\n\n if ext:\n\n output_dtype = types.get(ext, None)\n if dtype == output_dtype:\n valid_dtype = True\n else:\n valid_dtype = False\n else:\n # no extension, just check dtype\n valid_dtype = dtype in list(mimetypes.types_map.values())\n\n valid_data = isinstance(data, str)\n\n return valid_fname, valid_data, valid_dtype\n\n\ndef upload_attachment(\n client, target_object, level, attachment_dict,\n subject_rename=None, session_rename=None,\n folders=['anat', 'dwi', 'func', 'fmap', 'perf'],\n dry_run=True\n ):\n '''processes and uploads the attachment\n '''\n\n bids = {\n \"Filename\": None,\n \"Folder\": None,\n \"Path\": None\n }\n\n if level == 'project':\n bids.update({\n \"Filename\": attachment_dict['name'],\n \"Path\": '.'\n })\n else:\n\n # manipulate sub and ses labels\n subj_replace = none_replace if subject_rename is None else subject_rename\n subj_label = subj_replace(force_label_format(target_object.subject.label))\n\n ses_replace = none_replace if session_rename is None else session_rename\n sess_label = ses_replace(force_label_format(target_object.label))\n\n attachment_dict['name'] = force_template_format(attachment_dict['name'])\n attachment_dict['name'] = attachment_dict['name'].format(subject=subj_label, session=sess_label)\n\n # get the dir/folder/path\n dirs = Path(attachment_dict['name']).parts\n folder = [x for x in dirs if x in folders]\n if not folder:\n folder = None\n else:\n folder = folder[0]\n\n path = str(Path(attachment_dict['name']).parent)\n\n # get filename\n attachment_dict['name'] = str(Path(attachment_dict['name']).name)\n\n # get BIDS ready\n bids.update({\n \"Filename\": str(Path(attachment_dict['name']).name),\n \"Folder\": folder,\n \"Path\": path\n })\n logger.debug(\n \"Attachment details:\\n\\tFilename: {}\\n\\tData: {}\\n\\tMIMEType: {}\".format(\n attachment_dict['name'], attachment_dict['data'], attachment_dict['type']\n )\n )\n logger.debug(\n \"Updating BIDS: \\n\\t{}\".format(bids)\n )\n\n verify_name, verify_data, verify_type = verify_attachment(\n attachment_dict['name'], attachment_dict['data'], attachment_dict['type']\n )\n\n if not all([verify_name, verify_data, verify_type]):\n\n logger.warning(\"Attachments may not be valid for upload!\")\n logger.debug(\n \"\\tFilename valid: {}\\n\\tData valid: {}\\n\\tMIMEType valid: {}\".format(\n verify_name, verify_data, verify_type\n )\n )\n\n if not dry_run:\n file_spec = flywheel.FileSpec(\n attachment_dict['name'], attachment_dict['data'], attachment_dict['type']\n )\n target_object.upload_file(file_spec)\n target_object = target_object.reload()\n target_object.update_file_info(attachment_dict['name'], {'BIDS': bids})\n logger.info(\"Attachment uploaded!\")\n\ndef parse_validator(path):\n\n with open(path, 'r') as read_file:\n data = json.load(read_file)\n\n issues = data['issues']\n\n def parse_issue(issue_dict):\n\n return_dict = {}\n return_dict['files'] = [get_nested(x, 'file', 'relativePath') for x in issue_dict.get('files', '')]\n return_dict['type'] = issue_dict.get('key' '')\n return_dict['severity'] = issue_dict.get('severity', '')\n return_dict['description'] = issue_dict.get('reason', '')\n return_dict['code'] = issue_dict.get('code', '')\n return_dict['url'] = issue_dict.get('helpUrl', '')\n\n return(return_dict)\n\n df = pd.DataFrame()\n\n for warn in issues['warnings']:\n\n parsed = parse_issue(warn)\n parsed = pd.DataFrame(parsed)\n df = df.append(parsed, ignore_index=True)\n\n for err in issues['errors']:\n\n parsed = parse_issue(err)\n parsed = pd.DataFrame(parsed)\n df = df.append(parsed, ignore_index=True)\n\n return df\n" ]
[ [ "pandas.DataFrame" ] ]
StadlerMaximilian/Detectron.pytorch
[ "b7a7c053b15da21418f53d9e97f4652d0d139523" ]
[ "tools/train_net.py" ]
[ "\"\"\" Training Script \"\"\"\n\nimport argparse\nimport distutils.util\nimport os\nimport sys\nimport pickle\nimport resource\nimport traceback\nimport logging\nfrom collections import defaultdict\n\nimport numpy as np\nimport yaml\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport cv2\ncv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader\n\nimport _init_paths # pylint: disable=unused-import\nimport nn as mynn\nimport utils.net as net_utils\nimport utils.misc as misc_utils\nfrom core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\nfrom datasets.roidb import combined_roidb_for_training\nfrom modeling.model_builder import Generalized_RCNN\nfrom roi_data.loader import RoiDataLoader, MinibatchSampler, collate_minibatch\nfrom utils.detectron_weight_helper import load_caffe2_detectron_weights\nfrom utils.logging import log_stats\nfrom utils.timer import Timer\nfrom utils.training_stats import TrainingStats\n\n# OpenCL may be enabled by default in OpenCV3; disable it because it's not\n# thread safe and causes unwanted GPU memory allocations.\ncv2.ocl.setUseOpenCL(False)\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973\nrlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\nresource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))\n\n\ndef parse_args():\n \"\"\"Parse input arguments\"\"\"\n parser = argparse.ArgumentParser(description='Train a X-RCNN network')\n\n parser.add_argument(\n '--dataset', dest='dataset', required=True,\n help='Dataset to use')\n parser.add_argument(\n '--cfg', dest='cfg_file', required=True,\n help='Config file for training (and optionally testing)')\n parser.add_argument(\n '--set', dest='set_cfgs',\n help='Set config keys. Key value sequence seperate by whitespace.'\n 'e.g. [key] [value] [key] [value]',\n default=[], nargs='+')\n\n parser.add_argument(\n '--disp_interval',\n help='Display training info every N iterations',\n default=100, type=int)\n parser.add_argument(\n '--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')\n\n # Optimization\n # These options has the highest prioity and can overwrite the values in config file\n # or values set by set_cfgs. `None` means do not overwrite.\n parser.add_argument(\n '--bs', dest='batch_size',\n help='Explicitly specify to overwrite the value comed from cfg_file.',\n type=int)\n parser.add_argument(\n '--nw', dest='num_workers',\n help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',\n type=int)\n\n parser.add_argument(\n '--o', dest='optimizer', help='Training optimizer.',\n default=None)\n parser.add_argument(\n '--lr', help='Base learning rate.',\n default=None, type=float)\n parser.add_argument(\n '--lr_decay_gamma',\n help='Learning rate decay rate.',\n default=None, type=float)\n parser.add_argument(\n '--lr_decay_epochs',\n help='Epochs to decay the learning rate on. '\n 'Decay happens on the beginning of a epoch. '\n 'Epoch is 0-indexed.',\n default=[4, 5], nargs='+', type=int)\n\n # Epoch\n parser.add_argument(\n '--start_iter',\n help='Starting iteration for first training epoch. 0-indexed.',\n default=0, type=int)\n parser.add_argument(\n '--start_epoch',\n help='Starting epoch count. Epoch is 0-indexed.',\n default=0, type=int)\n parser.add_argument(\n '--epochs', dest='num_epochs',\n help='Number of epochs to train',\n default=6, type=int)\n\n # Resume training: requires same iterations per epoch\n parser.add_argument(\n '--resume',\n help='resume to training on a checkpoint',\n action='store_true')\n\n parser.add_argument(\n '--no_save', help='do not save anything', action='store_true')\n\n parser.add_argument(\n '--ckpt_num_per_epoch',\n help='number of checkpoints to save in each epoch. '\n 'Not include the one at the end of an epoch.',\n default=3, type=int)\n\n parser.add_argument(\n '--load_ckpt', help='checkpoint path to load')\n parser.add_argument(\n '--load_detectron', help='path to the detectron weight pickle file')\n\n parser.add_argument(\n '--use_tfboard', help='Use tensorflow tensorboard to log training info',\n action='store_true')\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n args = parse_args()\n print('Called with args:')\n print(args)\n\n if not torch.cuda.is_available():\n sys.exit(\"Need a CUDA device to run the code.\")\n\n if args.cuda or cfg.NUM_GPUS > 0:\n cfg.CUDA = True\n else:\n raise ValueError(\"Need Cuda device to run !\")\n\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n ### Adaptively adjust some configs ###\n original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH\n if args.batch_size is None:\n args.batch_size = original_batch_size\n cfg.NUM_GPUS = torch.cuda.device_count()\n assert (args.batch_size % cfg.NUM_GPUS) == 0, \\\n 'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)\n cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS\n print('Batch size change from {} (in config file) to {}'.format(\n original_batch_size, args.batch_size))\n print('NUM_GPUs: %d, TRAIN.IMS_PER_BATCH: %d' % (cfg.NUM_GPUS, cfg.TRAIN.IMS_PER_BATCH))\n\n if args.num_workers is not None:\n cfg.DATA_LOADER.NUM_THREADS = args.num_workers\n print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)\n\n ### Adjust learning based on batch size change linearly\n old_base_lr = cfg.SOLVER.BASE_LR\n cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size\n print('Adjust BASE_LR linearly according to batch size change: {} --> {}'.format(\n old_base_lr, cfg.SOLVER.BASE_LR))\n\n ### Overwrite some solver settings from command line arguments\n if args.optimizer is not None:\n cfg.SOLVER.TYPE = args.optimizer\n if args.lr is not None:\n cfg.SOLVER.BASE_LR = args.lr\n if args.lr_decay_gamma is not None:\n cfg.SOLVER.GAMMA = args.lr_decay_gamma\n\n timers = defaultdict(Timer)\n\n ### Dataset ###\n timers['roidb'].tic()\n roidb, ratio_list, ratio_index = combined_roidb_for_training(\n cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)\n timers['roidb'].toc()\n train_size = len(roidb)\n logger.info('{:d} roidb entries'.format(train_size))\n logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)\n\n sampler = MinibatchSampler(ratio_list, ratio_index)\n dataset = RoiDataLoader(\n roidb,\n cfg.MODEL.NUM_CLASSES,\n training=True)\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n sampler=sampler,\n num_workers=cfg.DATA_LOADER.NUM_THREADS,\n collate_fn=collate_minibatch)\n\n assert_and_infer_cfg()\n\n ### Model ###\n maskRCNN = Generalized_RCNN()\n\n if cfg.CUDA:\n maskRCNN.cuda()\n\n ### Optimizer ###\n bias_params = []\n nonbias_params = []\n for key, value in dict(maskRCNN.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n bias_params.append(value)\n else:\n nonbias_params.append(value)\n params = [\n {'params': nonbias_params,\n 'lr': cfg.SOLVER.BASE_LR,\n 'weight_decay': cfg.SOLVER.WEIGHT_DECAY},\n {'params': bias_params,\n 'lr': cfg.SOLVER.BASE_LR * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),\n 'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0}\n ]\n\n if cfg.SOLVER.TYPE == \"SGD\":\n optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)\n elif cfg.SOLVER.TYPE == \"Adam\":\n optimizer = torch.optim.Adam(params)\n\n ### Load checkpoint\n if args.load_ckpt:\n load_name = args.load_ckpt\n logging.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(maskRCNN, checkpoint['model'])\n if args.resume:\n assert checkpoint['iters_per_epoch'] == train_size // args.batch_size, \\\n \"iters_per_epoch should match for resume\"\n # There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.\n # However it's fixed on master.\n # optimizer.load_state_dict(checkpoint['optimizer'])\n misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])\n if checkpoint['step'] == (checkpoint['iters_per_epoch'] - 1):\n # Resume from end of an epoch\n args.start_epoch = checkpoint['epoch'] + 1\n args.start_iter = 0\n else:\n # Resume from the middle of an epoch.\n # NOTE: dataloader is not synced with previous state\n args.start_epoch = checkpoint['epoch']\n args.start_iter = checkpoint['step'] + 1\n del checkpoint\n torch.cuda.empty_cache()\n\n if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)\n logging.info(\"loading Detectron weights %s\", args.load_detectron)\n load_caffe2_detectron_weights(maskRCNN, args.load_detectron)\n\n lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.\n\n maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],\n minibatch=True)\n\n ### Training Setups ###\n args.run_name = misc_utils.get_run_name()\n output_dir = misc_utils.get_output_dir(args, args.run_name)\n args.cfg_filename = os.path.basename(args.cfg_file)\n\n if not args.no_save:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n blob = {'cfg': yaml.dump(cfg), 'args': args}\n with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:\n pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)\n\n if args.use_tfboard:\n from tensorboardX import SummaryWriter\n # Set the Tensorboard logger\n tblogger = SummaryWriter(output_dir)\n\n ### Training Loop ###\n maskRCNN.train()\n\n training_stats = TrainingStats(\n args,\n args.disp_interval,\n tblogger if args.use_tfboard and not args.no_save else None)\n\n iters_per_epoch = int(train_size / args.batch_size) # drop last\n args.iters_per_epoch = iters_per_epoch\n ckpt_interval_per_epoch = iters_per_epoch // args.ckpt_num_per_epoch\n try:\n logger.info('Training starts !')\n args.step = args.start_iter\n global_step = iters_per_epoch * args.start_epoch + args.step\n for args.epoch in range(args.start_epoch, args.start_epoch + args.num_epochs):\n # ---- Start of epoch ----\n\n # adjust learning rate\n if args.lr_decay_epochs and args.epoch == args.lr_decay_epochs[0] and args.start_iter == 0:\n args.lr_decay_epochs.pop(0)\n net_utils.decay_learning_rate(optimizer, lr, cfg.SOLVER.GAMMA)\n lr *= cfg.SOLVER.GAMMA\n\n for args.step, input_data in zip(range(args.start_iter, iters_per_epoch), dataloader):\n\n for key in input_data:\n if key != 'roidb': # roidb is a list of ndarrays with inconsistent length\n input_data[key] = list(map(Variable, input_data[key]))\n\n training_stats.IterTic()\n net_outputs = maskRCNN(**input_data)\n training_stats.UpdateIterStats(net_outputs)\n loss = net_outputs['total_loss']\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n training_stats.IterToc()\n\n if (args.step+1) % ckpt_interval_per_epoch == 0:\n net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)\n\n if args.step % args.disp_interval == 0:\n log_training_stats(training_stats, global_step, lr)\n\n global_step += 1\n\n # ---- End of epoch ----\n # save checkpoint\n net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)\n # reset starting iter number after first epoch\n args.start_iter = 0\n\n # ---- Training ends ----\n if iters_per_epoch % args.disp_interval != 0:\n # log last stats at the end\n log_training_stats(training_stats, global_step, lr)\n\n except (RuntimeError, KeyboardInterrupt):\n logger.info('Save ckpt on exception ...')\n net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)\n logger.info('Save ckpt done.')\n stack_trace = traceback.format_exc()\n print(stack_trace)\n\n finally:\n if args.use_tfboard and not args.no_save:\n tblogger.close()\n\n\ndef log_training_stats(training_stats, global_step, lr):\n stats = training_stats.GetStats(global_step, lr)\n log_stats(stats, training_stats.misc_args)\n if training_stats.tblogger:\n training_stats.tb_log_stats(stats, global_step)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.cuda.empty_cache", "torch.utils.data.DataLoader", "torch.optim.SGD", "torch.load", "torch.cuda.device_count", "torch.optim.Adam", "torch.cuda.is_available" ] ]
inspurer/ImageProcess
[ "f826c36f3ae17bee5694c3f1748f9e5319a46fd9" ]
[ "codes/3_1.py" ]
[ "# -*- coding: utf-8 -*-\n# pc_type lenovo\n# create_time: 2019/11/9 15:15\n# file_name: 3_1.py\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\n# 设置中文字体和负号正常显示\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\nsns.set_context(\"paper\") # 背景\nsns.set_style('whitegrid') # 主题\nsns.set(font='SimHei') # 解决Seaborn中文显示问题,这一句必须放在前两后面\n\ndef sp_noise(image,prob):\n '''\n 添加椒盐噪声\n prob:噪声比例\n '''\n output = np.zeros(image.shape,np.uint8)\n thres = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n output[i][j] = 0\n elif rdn > thres:\n output[i][j] = 255\n else:\n output[i][j] = image[i][j]\n return output\n\ndef gauss_noise(image, mean=0, var=0.001):\n '''\n 添加高斯噪声\n mean : 均值 mean = 0 是高斯白噪声\n var : 方差 方差越大,图像越模糊\n '''\n image = np.array(image / 255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.\n else:\n low_clip = 0.\n # 把 out 的元素限制在 low_clip 和 1 之间\n out = np.clip(out, low_clip, 1.0)\n out = out*255\n #cv.imshow(\"gasuss\", out)\n return out\nfrom PIL import Image\n\n# 解决 opencv 不能读取 gif\ngif = cv2.VideoCapture('img/test3.gif')\nret,frame = gif.read()\nimg = Image.fromarray(frame)\n# L : 灰度图 , RGB : RGB 彩色图\nimg = img.convert('L')\nimg = np.array(img)\n\nsp_img = sp_noise(img,0.015)\n\ngs_img = gauss_noise(img,var=0.02)\n\n# 邻域平均法\ndef fspeical_average(image,kernel):\n a = len(kernel)\n kernel = kernel/(a**2)\n step = a//2\n h,w = image.shape[0],image.shape[1]\n nh,nw = h+2*step,w+2*step\n lbimg = np.zeros((nh,nw), np.float32)\n tmpimg = np.zeros((nh,nw))\n newimg = np.array(image)\n tmpimg[step:nh - step, step:nw - step] = newimg[0:h, 0:w]\n for y in range(step, nh - step):\n for x in range(step, nw - step):\n lbimg[y, x] = np.sum(kernel * tmpimg[y - step:y + step + 1, x - step:x + step + 1])\n resultimg = np.array(lbimg[step:nh - step, step:nw - step], np.uint8)\n return resultimg\n# 中值滤波法\ndef fspeical_medium(image,a):\n step = a // 2\n h, w = image.shape[0], image.shape[1]\n nh, nw = h + 2 * step, w + 2 * step\n lbimg = np.zeros((nh, nw), np.float32)\n tmpimg = np.zeros((nh, nw))\n newimg = np.array(image)\n tmpimg[step:nh - step, step:nw - step] = newimg[0:h, 0:w]\n for y in range(step, nh - step):\n for x in range(step, nw - step):\n lbimg[y, x] = np.median(tmpimg[y - step:y + step + 1, x - step:x + step + 1])\n resultimg = np.array(lbimg[step:nh - step, step:nw - step], np.uint8)\n return resultimg\n\nplt.figure()\nplt.subplot(2,4,1)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,5)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,2)\nplt.imshow(sp_img,cmap='gray')\nplt.title(\"加椒盐噪声\")\nplt.subplot(2,4,3)\nplt.imshow(fspeical_average(sp_img,kernel=np.array([[1,1,1],[1,1,1],[1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去椒盐噪声(3x3)\")\nplt.subplot(2,4,4)\nplt.imshow(fspeical_average(sp_img,kernel=np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去椒盐噪声(5x5)\")\nplt.subplot(2,4,6)\nplt.imshow(gs_img,cmap='gray')\nplt.title(\"加高斯噪声\")\nplt.subplot(2,4,7)\nplt.imshow(fspeical_average(gs_img,kernel=np.array([[1,1,1],[1,1,1],[1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去高斯噪声(3x3)\")\nplt.subplot(2,4,8)\nplt.imshow(fspeical_average(gs_img,kernel=np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])),cmap='gray')\nplt.title(\"邻域平均法去高斯噪声(5x5)\")\n\n\nplt.figure()\nplt.subplot(2,4,1)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,5)\nplt.imshow(img,cmap='gray')\nplt.title(\"原图\")\nplt.subplot(2,4,2)\nplt.imshow(sp_img,cmap='gray')\nplt.title(\"加椒盐噪声\")\nplt.subplot(2,4,3)\nplt.imshow(cv2.medianBlur(sp_img,3),cmap='gray')\nplt.title(\"中值滤波法去椒盐噪声(3x3)\")\nplt.subplot(2,4,4)\nplt.imshow(cv2.medianBlur(sp_img,5),cmap='gray')\nplt.title(\"中值滤波法去椒盐噪声(5x5)\")\nplt.subplot(2,4,6)\nplt.imshow(gs_img,cmap='gray')\nplt.title(\"加高斯噪声\")\n\nplt.subplot(2,4,7)\nplt.imshow(fspeical_medium(gs_img,3),cmap='gray')\nplt.title(\"中值滤波法去高斯噪声(3x3)\")\nplt.subplot(2,4,8)\nplt.imshow(fspeical_medium(gs_img,5),cmap='gray')\nplt.title(\"中值滤波法去高斯噪声(5x5)\")\n\n# for h in range(gs_img.shape[0]):\n# for w in range(gs_img.shape[1]):\n# if gs_img[h][w]<0:\n# gs_img[h][w] = -gs_img[h][w]\n\n# medianBlur 仅接收无符号整数类型元素\n# gs_img = np.uint8(gs_img)\n# print(gs_img)\n# plt.subplot(2,4,7)\n# print(sp_img,gs_img)\n# plt.imshow(cv2.medianBlur(gs_img,3),cmap='gray')\n# plt.title(\"中值滤波法去高斯噪声(3x3)\")\n# plt.subplot(2,4,8)\n# plt.imshow(cv2.medianBlur(gs_img,5),cmap='gray')\n# plt.title(\"中值滤波法去高斯噪声(5x5)\")\n\n\nplt.show()\n\n" ]
[ [ "numpy.sum", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.random.normal", "numpy.median", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.clip", "numpy.array" ] ]
koheikawata/objectdetectiontest
[ "a4cb01911fa3d0e10bd2c9aa3fd985113af10b1b" ]
[ "research/object_detection_inference_test1.py" ]
[ "import numpy as np\nimport os\nimport tensorflow as tf\nimport time\nimport json\n\nfrom PIL import Image\nfrom object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\n\nTHRESHOLD = 0.6\nLABEL_PATH = 'object_detection/test1/pascal_label_map.pbtxt'\nMODEL_PATH = 'object_detection/test1/output/frozen_inference_graph.pb'\nIMAGE_PATH = 'object_detection/test1/JPEGImages/IMG_00000.jpg'\n\nimage = Image.open(IMAGE_PATH)\n(im_width, im_height) = image.size\nimage_np = np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\nimage_np_expanded = np.expand_dims(image_np, axis=0)\n\nwith tf.gfile.GFile(MODEL_PATH, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\ntf.import_graph_def(graph_def, name='')\nops = tf.get_default_graph().get_operations()\nall_tensor_names = {output.name for op in ops for output in op.outputs}\ntensor_dict = {}\nfor key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)\nimage_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\nstart_time = time.time()\nwith tf.Session() as sess:\n output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image_np_expanded})\nend_time = time.time()\nprint('Inference takes {:.4f} sec'.format(end_time - start_time))\n\noutput_dict['num_detections'] = int(output_dict['num_detections'][0])\noutput_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8).tolist()\noutput_dict['detection_boxes'] = output_dict['detection_boxes'][0].tolist()\noutput_dict['detection_scores'] = output_dict['detection_scores'][0].tolist()\n\ncategory_index = label_map_util.create_category_index_from_labelmap(LABEL_PATH, use_display_name=True)\n\nresult = []\nfor idx, score in enumerate(output_dict['detection_scores']):\n if score > THRESHOLD:\n result.append({\n 'class': output_dict['detection_classes'][idx],\n 'label': category_index[output_dict['detection_classes'][idx]]['name'],\n 'confidence': output_dict['detection_scores'][idx],\n 'bounding_box': output_dict['detection_boxes'][idx]\n })\n\njson.dumps(result)" ]
[ [ "tensorflow.gfile.GFile", "numpy.expand_dims", "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.get_default_graph", "tensorflow.GraphDef" ] ]
ManeeshaPerera/forecast-framework
[ "60a22af4a97aec10c8bbea7f3f833061283382cb" ]
[ "run_combinations.py" ]
[ "from combinations.equal_weight import EqualWeight\nfrom combinations.pso_model import PSO\nfrom combinations.recursive_method import RecursiveEnsemble\nimport constants as const\nimport pandas as pd\nimport numpy as np\n\n\ndef run_combinations(horizon, forecast, forecast_test, data_train, data_out_sample):\n weights = {'weight': [], 'method': [], 'comb_method': []}\n horizon_info = const.HORIZON_INFO[horizon]\n seasonality = horizon_info['arima_params'][\n 'seasonal_freq']\n methods = forecast.columns.tolist()\n\n pso_initial_options = {'c1': [0, 10],\n 'c2': [0, 10],\n 'w': [0, 10],\n 'k': [1, 20],\n 'p': 2}\n num_pso_particles = 100\n\n # Run equal weight\n equal_weight = EqualWeight(forecast)\n equal_weight.find_weights()\n\n add_weights(weights, equal_weight.weights, methods, 'average')\n\n eq_fc = equal_weight.get_forecast(forecast)\n eq_fc_test = equal_weight.get_forecast(forecast_test)\n\n # Run PSO\n dimension = len(forecast.columns)\n pso = PSO(forecast, data_train, data_out_sample, dimension, num_pso_particles,\n horizon_info['horizon_as_int'],\n seasonality, options=pso_initial_options)\n pso.hyper_parameter_search()\n pso.find_weights()\n add_weights(weights, pso.weights, methods, 'pso- unconstrained')\n pso_fc = pso.get_forecast(forecast)\n pso_fc_test = pso.get_forecast(forecast_test)\n\n # PSO with bounds\n pso_b = PSO(forecast, data_train, data_out_sample, dimension, num_pso_particles,\n horizon_info['horizon_as_int'],\n seasonality, options=pso_initial_options, bounds=(np.array([0, 0, 0, 0, 0]), np.array([1, 1, 1, 1, 1])))\n pso_b.hyper_parameter_search()\n pso_b.find_weights()\n add_weights(weights, pso_b.weights, methods, 'pso [0,1]')\n pso_b_fc = pso_b.get_forecast(forecast)\n pso_b_fc_test = pso_b.get_forecast(forecast_test)\n\n # Add to Unity\n pso_b.weights = pso_b.weights / pso_b.weights.sum()\n add_weights(weights, pso_b.weights, methods, 'pso- convex')\n pso_b_fc_scaled = pso_b.get_forecast(forecast)\n pso_b_fc_test_scaled = pso_b.get_forecast(forecast_test)\n\n # Run recursive ensemble\n print(\"start recursive ensemble\")\n matrix = np.identity(len(forecast.columns))\n re = RecursiveEnsemble(forecast, data_train, data_out_sample, horizon_info['horizon_as_int'], matrix, seasonality,\n 0.001)\n re.find_weights()\n add_weights(weights, re.weights, methods, 're')\n re_fc = re.get_forecast(forecast)\n re_fc_test = re.get_forecast(forecast_test)\n\n train = pd.concat([pso_fc, pso_b_fc, pso_b_fc_scaled, eq_fc, re_fc], axis=1)\n train.columns = ['pso- unconstrained', 'pso [0,1]', 'pso- convex', 'average', 're']\n\n test = pd.concat([pso_fc_test, pso_b_fc_test, pso_b_fc_test_scaled, eq_fc_test, re_fc_test], axis=1)\n test.columns = ['pso- unconstrained', 'pso [0,1]', 'pso- convex', 'average', 're']\n\n return train, test, pd.DataFrame(weights)\n\n\ndef add_weights(dic, weights, methods, comb_name):\n for w in range(0, len(weights)):\n dic['weight'].append(weights[w])\n dic['method'].append(methods[w])\n dic['comb_method'].append(comb_name)\n" ]
[ [ "numpy.array", "pandas.DataFrame", "pandas.concat" ] ]