repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
dom-s/transformers | [
"83446a88d902661fab12bf8c37a1aa2845cdca5f",
"66ef8faf6ae805aeb4e71075d4da6eab7be3bc26"
] | [
"src/transformers/modeling_albert.py",
"src/transformers/modeling_tf_bert.py"
] | [
"# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch ALBERT model. \"\"\"\n\nimport logging\nimport math\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom transformers.configuration_albert import AlbertConfig\nfrom transformers.modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer\nfrom transformers.modeling_utils import PreTrainedModel\n\nfrom .file_utils import add_start_docstrings, add_start_docstrings_to_callable\n\n\nlogger = logging.getLogger(__name__)\n\n\nALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n \"albert-base-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-pytorch_model.bin\",\n \"albert-large-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-pytorch_model.bin\",\n \"albert-xlarge-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-pytorch_model.bin\",\n \"albert-xxlarge-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-pytorch_model.bin\",\n \"albert-base-v2\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-pytorch_model.bin\",\n \"albert-large-v2\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-pytorch_model.bin\",\n \"albert-xlarge-v2\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-pytorch_model.bin\",\n \"albert-xxlarge-v2\": \"https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-pytorch_model.bin\",\n}\n\n\ndef load_tf_weights_in_albert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n print(name)\n\n for name, array in zip(names, arrays):\n original_name = name\n\n # If saved from the TF HUB module\n name = name.replace(\"module/\", \"\")\n\n # Renaming and simplifying\n name = name.replace(\"ffn_1\", \"ffn\")\n name = name.replace(\"bert/\", \"albert/\")\n name = name.replace(\"attention_1\", \"attention\")\n name = name.replace(\"transform/\", \"\")\n name = name.replace(\"LayerNorm_1\", \"full_layer_layer_norm\")\n name = name.replace(\"LayerNorm\", \"attention/LayerNorm\")\n name = name.replace(\"transformer/\", \"\")\n\n # The feed forward layer had an 'intermediate' step which has been abstracted away\n name = name.replace(\"intermediate/dense/\", \"\")\n name = name.replace(\"ffn/intermediate/output/dense/\", \"ffn_output/\")\n\n # ALBERT attention was split between self and output which have been abstracted away\n name = name.replace(\"/output/\", \"/\")\n name = name.replace(\"/self/\", \"/\")\n\n # The pooler is a linear layer\n name = name.replace(\"pooler/dense\", \"pooler\")\n\n # The classifier was simplified to predictions from cls/predictions\n name = name.replace(\"cls/predictions\", \"predictions\")\n name = name.replace(\"predictions/attention\", \"predictions\")\n\n # Naming was changed to be more explicit\n name = name.replace(\"embeddings/attention\", \"embeddings\")\n name = name.replace(\"inner_group_\", \"albert_layers/\")\n name = name.replace(\"group_\", \"albert_layer_groups/\")\n\n # Classifier\n if len(name.split(\"/\")) == 1 and (\"output_bias\" in name or \"output_weights\" in name):\n name = \"classifier/\" + name\n\n # No ALBERT model currently handles the next sentence prediction task\n if \"seq_relationship\" in name:\n continue\n\n name = name.split(\"/\")\n\n # Ignore the gradients applied by the LAMB/ADAM optimizers.\n if \"adam_m\" in name or \"adam_v\" in name or \"global_step\" in name:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {} from {}\".format(name, original_name))\n pointer.data = torch.from_numpy(array)\n\n return model\n\n\nclass AlbertEmbeddings(BertEmbeddings):\n \"\"\"\n Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)\n self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)\n\n\nclass AlbertAttention(BertSelfAttention):\n def __init__(self, config):\n super().__init__(config)\n\n self.output_attentions = config.output_attentions\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n self.attention_head_size = config.hidden_size // config.num_attention_heads\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.num_attention_heads, self.attention_head_size)\n heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads\n for head in heads:\n # Compute how many pruned heads are before the head and move the index accordingly\n head = head - sum(1 if h < head else 0 for h in self.pruned_heads)\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n\n # Prune linear layers\n self.query = prune_linear_layer(self.query, index)\n self.key = prune_linear_layer(self.key, index)\n self.value = prune_linear_layer(self.value, index)\n self.dense = prune_linear_layer(self.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.num_attention_heads = self.num_attention_heads - len(heads)\n self.all_head_size = self.attention_head_size * self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input_ids, attention_mask=None, head_mask=None):\n mixed_query_layer = self.query(input_ids)\n mixed_key_layer = self.key(input_ids)\n mixed_value_layer = self.value(input_ids)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n\n # Should find a better way to do this\n w = (\n self.dense.weight.t()\n .view(self.num_attention_heads, self.attention_head_size, self.hidden_size)\n .to(context_layer.dtype)\n )\n b = self.dense.bias.to(context_layer.dtype)\n\n projected_context_layer = torch.einsum(\"bfnd,ndh->bfh\", context_layer, w) + b\n projected_context_layer_dropout = self.dropout(projected_context_layer)\n layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)\n return (layernormed_context_layer, attention_probs) if self.output_attentions else (layernormed_context_layer,)\n\n\nclass AlbertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = AlbertAttention(config)\n self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)\n self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)\n self.activation = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None):\n attention_output = self.attention(hidden_states, attention_mask, head_mask)\n ffn_output = self.ffn(attention_output[0])\n ffn_output = self.activation(ffn_output)\n ffn_output = self.ffn_output(ffn_output)\n hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])\n\n return (hidden_states,) + attention_output[1:] # add attentions if we output them\n\n\nclass AlbertLayerGroup(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None):\n layer_hidden_states = ()\n layer_attentions = ()\n\n for layer_index, albert_layer in enumerate(self.albert_layers):\n layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index])\n hidden_states = layer_output[0]\n\n if self.output_attentions:\n layer_attentions = layer_attentions + (layer_output[1],)\n\n if self.output_hidden_states:\n layer_hidden_states = layer_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (layer_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (layer_attentions,)\n return outputs # last-layer hidden state, (layer hidden states), (layer attentions)\n\n\nclass AlbertTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)\n self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None):\n hidden_states = self.embedding_hidden_mapping_in(hidden_states)\n\n all_attentions = ()\n\n if self.output_hidden_states:\n all_hidden_states = (hidden_states,)\n\n for i in range(self.config.num_hidden_layers):\n # Number of layers in a hidden group\n layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)\n\n # Index of the hidden group\n group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))\n\n layer_group_output = self.albert_layer_groups[group_idx](\n hidden_states,\n attention_mask,\n head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],\n )\n hidden_states = layer_group_output[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + layer_group_output[-1]\n\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nclass AlbertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = AlbertConfig\n pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"albert\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nALBERT_START_DOCSTRING = r\"\"\"\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Args:\n config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nALBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.AlbertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertModel(AlbertPreTrainedModel):\n\n config_class = AlbertConfig\n pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = load_tf_weights_in_albert\n base_model_prefix = \"albert\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.config = config\n self.embeddings = AlbertEmbeddings(config)\n self.encoder = AlbertTransformer(config)\n self.pooler = nn.Linear(config.hidden_size, config.hidden_size)\n self.pooler_activation = nn.Tanh()\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups.\n If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there\n is a total of 4 different layers.\n\n These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,\n while [2,3] correspond to the two inner groups of the second hidden layer.\n\n Any layer with in index other than [0,1,2,3] will result in an error.\n See base class PreTrainedModel for more information about head pruning\n \"\"\"\n for layer, heads in heads_to_prune.items():\n group_idx = int(layer / self.config.inner_group_num)\n inner_group_idx = int(layer - group_idx * self.config.inner_group_num)\n self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during pre-training.\n\n This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Example::\n\n from transformers import AlbertModel, AlbertTokenizer\n import torch\n\n tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n model = AlbertModel.from_pretrained('albert-base-v2')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = (\n head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n ) # We can specify head_mask for each layer\n head_mask = head_mask.to(\n dtype=next(self.parameters()).dtype\n ) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(\n input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)\n\n sequence_output = encoder_outputs[0]\n\n pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))\n\n outputs = (sequence_output, pooled_output) + encoder_outputs[\n 1:\n ] # add hidden_states and attentions if they are here\n return outputs\n\n\nclass AlbertMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.LayerNorm = nn.LayerNorm(config.embedding_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.dense = nn.Linear(config.hidden_size, config.embedding_size)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size)\n self.activation = ACT2FN[config.hidden_act]\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.decoder(hidden_states)\n\n prediction_scores = hidden_states + self.bias\n\n return prediction_scores\n\n\n@add_start_docstrings(\n \"Albert Model with a `language modeling` head on top.\", ALBERT_START_DOCSTRING,\n)\nclass AlbertForMaskedLM(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.predictions = AlbertMLMHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n masked_lm_labels=None,\n ):\n r\"\"\"\n masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with\n labels in ``[0, ..., config.vocab_size]``\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:\n loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Example::\n\n from transformers import AlbertTokenizer, AlbertForMaskedLM\n import torch\n\n tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n model = AlbertForMaskedLM.from_pretrained('albert-base-v2')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, masked_lm_labels=input_ids)\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_outputs = outputs[0]\n\n prediction_scores = self.predictions(sequence_outputs)\n\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n outputs = (masked_lm_loss,) + outputs\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForSequenceClassification(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:\n loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.\n logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import AlbertTokenizer, AlbertForSequenceClassification\n import torch\n\n tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n model = AlbertForSequenceClassification.from_pretrained('albert-base-v2')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, logits = outputs[:2]\n\n \"\"\"\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForQuestionAnswering(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:\n loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_scores ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-start scores (before SoftMax).\n end_scores: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-end scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n # The checkpoint albert-base-v2 is not fine-tuned for question answering. Please see the\n # examples/run_squad.py example to see how to fine-tune a model to a question answering task.\n\n from transformers import AlbertTokenizer, AlbertForQuestionAnswering\n import torch\n\n tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2')\n question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n input_dict = tokenizer.encode_plus(question, text, return_tensors='pt')\n start_scores, end_scores = model(**input_dict)\n\n \"\"\"\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (start_logits, end_logits,) + outputs[2:]\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 BERT model. \"\"\"\n\n\nimport logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .configuration_bert import BertConfig\nfrom .file_utils import MULTIPLE_CHOICE_DUMMY_INPUTS, add_start_docstrings, add_start_docstrings_to_callable\nfrom .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list\n\n\nlogger = logging.getLogger(__name__)\n\n\nTF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n \"bert-base-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tf_model.h5\",\n \"bert-large-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-tf_model.h5\",\n \"bert-base-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-tf_model.h5\",\n \"bert-large-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-tf_model.h5\",\n \"bert-base-multilingual-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-tf_model.h5\",\n \"bert-base-multilingual-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-tf_model.h5\",\n \"bert-base-chinese\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-tf_model.h5\",\n \"bert-base-german-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-tf_model.h5\",\n \"bert-large-uncased-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-tf_model.h5\",\n \"bert-large-cased-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-tf_model.h5\",\n \"bert-large-uncased-whole-word-masking-finetuned-squad\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-tf_model.h5\",\n \"bert-large-cased-whole-word-masking-finetuned-squad\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-tf_model.h5\",\n \"bert-base-cased-finetuned-mrpc\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-tf_model.h5\",\n \"bert-base-japanese\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-tf_model.h5\",\n \"bert-base-japanese-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-tf_model.h5\",\n \"bert-base-japanese-char\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-tf_model.h5\",\n \"bert-base-japanese-char-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-tf_model.h5\",\n \"bert-base-finnish-cased-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/tf_model.h5\",\n \"bert-base-finnish-uncased-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/tf_model.h5\",\n}\n\n\ndef gelu(x):\n \"\"\" Gaussian Error Linear Unit.\n Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))\n return x * cdf\n\n\ndef gelu_new(x):\n \"\"\"Gaussian Error Linear Unit.\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef swish(x):\n return x * tf.sigmoid(x)\n\n\nACT2FN = {\n \"gelu\": tf.keras.layers.Activation(gelu),\n \"relu\": tf.keras.activations.relu,\n \"swish\": tf.keras.layers.Activation(swish),\n \"gelu_new\": tf.keras.layers.Activation(gelu_new),\n}\n\n\nclass TFBertEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n self.hidden_size = config.hidden_size\n self.initializer_range = config.initializer_range\n\n self.position_embeddings = tf.keras.layers.Embedding(\n config.max_position_embeddings,\n config.hidden_size,\n embeddings_initializer=get_initializer(self.initializer_range),\n name=\"position_embeddings\",\n )\n self.token_type_embeddings = tf.keras.layers.Embedding(\n config.type_vocab_size,\n config.hidden_size,\n embeddings_initializer=get_initializer(self.initializer_range),\n name=\"token_type_embeddings\",\n )\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def build(self, input_shape):\n \"\"\"Build shared word embedding layer \"\"\"\n with tf.name_scope(\"word_embeddings\"):\n # Create and initialize weights. The random normal initializer was chosen\n # arbitrarily, and works well.\n self.word_embeddings = self.add_weight(\n \"weight\",\n shape=[self.vocab_size, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n super().build(input_shape)\n\n def call(self, inputs, mode=\"embedding\", training=False):\n \"\"\"Get token embeddings of inputs.\n Args:\n inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)\n mode: string, a valid value is one of \"embedding\" and \"linear\".\n Returns:\n outputs: (1) If mode == \"embedding\", output embedding tensor, float32 with\n shape [batch_size, length, embedding_size]; (2) mode == \"linear\", output\n linear tensor, float32 with shape [batch_size, length, vocab_size].\n Raises:\n ValueError: if mode is not valid.\n\n Shared weights logic adapted from\n https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24\n \"\"\"\n if mode == \"embedding\":\n return self._embedding(inputs, training=training)\n elif mode == \"linear\":\n return self._linear(inputs)\n else:\n raise ValueError(\"mode {} is not valid.\".format(mode))\n\n def _embedding(self, inputs, training=False):\n \"\"\"Applies embedding based on inputs tensor.\"\"\"\n input_ids, position_ids, token_type_ids, inputs_embeds = inputs\n\n if input_ids is not None:\n input_shape = shape_list(input_ids)\n else:\n input_shape = shape_list(inputs_embeds)[:-1]\n\n seq_length = input_shape[1]\n if position_ids is None:\n position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]\n if token_type_ids is None:\n token_type_ids = tf.fill(input_shape, 0)\n\n if inputs_embeds is None:\n inputs_embeds = tf.gather(self.word_embeddings, input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings, training=training)\n return embeddings\n\n def _linear(self, inputs):\n \"\"\"Computes logits by running inputs through a linear layer.\n Args:\n inputs: A float32 tensor with shape [batch_size, length, hidden_size]\n Returns:\n float32 tensor with shape [batch_size, length, vocab_size].\n \"\"\"\n batch_size = shape_list(inputs)[0]\n length = shape_list(inputs)[1]\n\n x = tf.reshape(inputs, [-1, self.hidden_size])\n logits = tf.matmul(x, self.word_embeddings, transpose_b=True)\n\n return tf.reshape(logits, [batch_size, length, self.vocab_size])\n\n\nclass TFBertSelfAttention(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n self.output_attentions = config.output_attentions\n\n self.num_attention_heads = config.num_attention_heads\n assert config.hidden_size % config.num_attention_heads == 0\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = tf.keras.layers.Dense(\n self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"query\"\n )\n self.key = tf.keras.layers.Dense(\n self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"key\"\n )\n self.value = tf.keras.layers.Dense(\n self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name=\"value\"\n )\n\n self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, inputs, training=False):\n hidden_states, attention_mask, head_mask = inputs\n\n batch_size = shape_list(hidden_states)[0]\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = tf.matmul(\n query_layer, key_layer, transpose_b=True\n ) # (batch size, num_heads, seq_len_q, seq_len_k)\n dk = tf.cast(shape_list(key_layer)[-1], tf.float32) # scale attention_scores\n attention_scores = attention_scores / tf.math.sqrt(dk)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in TFBertModel call() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = tf.nn.softmax(attention_scores, axis=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs, training=training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = tf.matmul(attention_probs, value_layer)\n\n context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])\n context_layer = tf.reshape(\n context_layer, (batch_size, -1, self.all_head_size)\n ) # (batch_size, seq_len_q, all_head_size)\n\n outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)\n return outputs\n\n\nclass TFBertSelfOutput(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, inputs, training=False):\n hidden_states, input_tensor = inputs\n\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFBertAttention(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.self_attention = TFBertSelfAttention(config, name=\"self\")\n self.dense_output = TFBertSelfOutput(config, name=\"output\")\n\n def prune_heads(self, heads):\n raise NotImplementedError\n\n def call(self, inputs, training=False):\n input_tensor, attention_mask, head_mask = inputs\n\n self_outputs = self.self_attention([input_tensor, attention_mask, head_mask], training=training)\n attention_output = self.dense_output([self_outputs[0], input_tensor], training=training)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass TFBertIntermediate(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass TFBertOutput(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, inputs, training=False):\n hidden_states, input_tensor = inputs\n\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFBertLayer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.attention = TFBertAttention(config, name=\"attention\")\n self.intermediate = TFBertIntermediate(config, name=\"intermediate\")\n self.bert_output = TFBertOutput(config, name=\"output\")\n\n def call(self, inputs, training=False):\n hidden_states, attention_mask, head_mask = inputs\n\n attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training)\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.bert_output([intermediate_output, attention_output], training=training)\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass TFBertEncoder(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = [TFBertLayer(config, name=\"layer_._{}\".format(i)) for i in range(config.num_hidden_layers)]\n\n def call(self, inputs, training=False):\n hidden_states, attention_mask, head_mask = inputs\n\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module([hidden_states, attention_mask, head_mask[i]], training=training)\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # outputs, (hidden states), (attentions)\n\n\nclass TFBertPooler(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n activation=\"tanh\",\n name=\"dense\",\n )\n\n def call(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n return pooled_output\n\n\nclass TFBertPredictionHeadTransform(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name=\"dense\"\n )\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n\n def call(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass TFBertLMPredictionHead(tf.keras.layers.Layer):\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n self.transform = TFBertPredictionHeadTransform(config, name=\"transform\")\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n super().build(input_shape)\n\n def call(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.input_embeddings(hidden_states, mode=\"linear\")\n hidden_states = hidden_states + self.bias\n return hidden_states\n\n\nclass TFBertMLMHead(tf.keras.layers.Layer):\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.predictions = TFBertLMPredictionHead(config, input_embeddings, name=\"predictions\")\n\n def call(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass TFBertNSPHead(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.seq_relationship = tf.keras.layers.Dense(\n 2, kernel_initializer=get_initializer(config.initializer_range), name=\"seq_relationship\"\n )\n\n def call(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass TFBertMainLayer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.num_hidden_layers = config.num_hidden_layers\n\n self.embeddings = TFBertEmbeddings(config, name=\"embeddings\")\n self.encoder = TFBertEncoder(config, name=\"encoder\")\n self.pooler = TFBertPooler(config, name=\"pooler\")\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def _resize_token_embeddings(self, new_num_tokens):\n raise NotImplementedError\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n inputs,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n training=False,\n ):\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\n assert len(inputs) <= 6, \"Too many inputs.\"\n elif isinstance(inputs, dict):\n input_ids = inputs.get(\"input_ids\")\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\n position_ids = inputs.get(\"position_ids\", position_ids)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n assert len(inputs) <= 6, \"Too many inputs.\"\n else:\n input_ids = inputs\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if attention_mask is None:\n attention_mask = tf.fill(input_shape, 1)\n if token_type_ids is None:\n token_type_ids = tf.fill(input_shape, 0)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n\n extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.num_hidden_layers\n # head_mask = tf.constant([0] * self.num_hidden_layers)\n\n embedding_output = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)\n encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training)\n\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n outputs = (sequence_output, pooled_output,) + encoder_outputs[\n 1:\n ] # add hidden_states and attentions if they are here\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions)\n\n\nclass TFBertPreTrainedModel(TFPreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = BertConfig\n pretrained_model_archive_map = TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"bert\"\n\n\nBERT_START_DOCSTRING = r\"\"\"\n This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.\n Use it as a regular TF 2.0 Keras Model and\n refer to the TF 2.0 documentation for all matter related to general usage and behavior.\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having\n all the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors\n in the first positional argument :\n\n - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`__\n position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`__\n head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, embedding_dim)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n training (:obj:`boolean`, `optional`, defaults to :obj:`False`):\n Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them\n (if set to :obj:`False`) for evaluation.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Bert Model transformer outputing raw hidden-states without any specific head on top.\",\n BERT_START_DOCSTRING,\n)\nclass TFBertModel(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.bert = TFBertMainLayer(config, name=\"bert\")\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during Bert pretraining. This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertModel\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertModel.from_pretrained('bert-base-uncased')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n \"\"\"\n outputs = self.bert(inputs, **kwargs)\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with two heads on top as done during the pre-training:\n a `masked language modeling` head and a `next sentence prediction (classification)` head. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass TFBertForPreTraining(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.bert = TFBertMainLayer(config, name=\"bert\")\n self.nsp = TFBertNSPHead(config, name=\"nsp___cls\")\n self.mlm = TFBertMLMHead(config, self.bert.embeddings, name=\"mlm___cls\")\n\n def get_output_embeddings(self):\n return self.bert.embeddings\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertForPreTraining\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertForPreTraining.from_pretrained('bert-base-uncased')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n prediction_scores, seq_relationship_scores = outputs[:2]\n\n \"\"\"\n outputs = self.bert(inputs, **kwargs)\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores = self.mlm(sequence_output, training=kwargs.get(\"training\", False))\n seq_relationship_score = self.nsp(pooled_output)\n\n outputs = (prediction_scores, seq_relationship_score,) + outputs[\n 2:\n ] # add hidden states and attention if they are here\n\n return outputs # prediction_scores, seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `language modeling` head on top. \"\"\", BERT_START_DOCSTRING)\nclass TFBertForMaskedLM(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.bert = TFBertMainLayer(config, name=\"bert\")\n self.mlm = TFBertMLMHead(config, self.bert.embeddings, name=\"mlm___cls\")\n\n def get_output_embeddings(self):\n return self.bert.embeddings\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n prediction_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertForMaskedLM\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertForMaskedLM.from_pretrained('bert-base-uncased')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n prediction_scores = outputs[0]\n\n \"\"\"\n outputs = self.bert(inputs, **kwargs)\n\n sequence_output = outputs[0]\n prediction_scores = self.mlm(sequence_output, training=kwargs.get(\"training\", False))\n\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\n\n return outputs # prediction_scores, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a `next sentence prediction (classification)` head on top. \"\"\", BERT_START_DOCSTRING,\n)\nclass TFBertForNextSentencePrediction(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.bert = TFBertMainLayer(config, name=\"bert\")\n self.nsp = TFBertNSPHead(config, name=\"nsp___cls\")\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n seq_relationship_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, 2)`)\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertForNextSentencePrediction\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n seq_relationship_scores = outputs[0]\n\n \"\"\"\n outputs = self.bert(inputs, **kwargs)\n\n pooled_output = outputs[1]\n seq_relationship_score = self.nsp(pooled_output)\n\n outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here\n\n return outputs # seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass TFBertForSequenceClassification(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.bert = TFBertMainLayer(config, name=\"bert\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n logits (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertForSequenceClassification\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n logits = outputs[0]\n\n \"\"\"\n outputs = self.bert(inputs, **kwargs)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output, training=kwargs.get(\"training\", False))\n logits = self.classifier(pooled_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n return outputs # logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass TFBertForMultipleChoice(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.bert = TFBertMainLayer(config, name=\"bert\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n 1, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @property\n def dummy_inputs(self):\n \"\"\" Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n \"\"\"\n return {\"input_ids\": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(\n self,\n inputs,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n training=False,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n classification_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`:\n `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above).\n\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertForMultipleChoice\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertForMultipleChoice.from_pretrained('bert-base-uncased')\n choices = [\"Hello, my dog is cute\", \"Hello, my cat is amazing\"]\n input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices\n outputs = model(input_ids)\n classification_scores = outputs[0]\n\n \"\"\"\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n attention_mask = inputs[1] if len(inputs) > 1 else attention_mask\n token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids\n position_ids = inputs[3] if len(inputs) > 3 else position_ids\n head_mask = inputs[4] if len(inputs) > 4 else head_mask\n inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds\n assert len(inputs) <= 6, \"Too many inputs.\"\n elif isinstance(inputs, dict):\n input_ids = inputs.get(\"input_ids\")\n attention_mask = inputs.get(\"attention_mask\", attention_mask)\n token_type_ids = inputs.get(\"token_type_ids\", token_type_ids)\n position_ids = inputs.get(\"position_ids\", position_ids)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n assert len(inputs) <= 6, \"Too many inputs.\"\n else:\n input_ids = inputs\n\n if input_ids is not None:\n num_choices = shape_list(input_ids)[1]\n seq_length = shape_list(input_ids)[2]\n else:\n num_choices = shape_list(inputs_embeds)[1]\n seq_length = shape_list(inputs_embeds)[2]\n\n flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None\n flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None\n flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None\n flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None\n\n flat_inputs = [\n flat_input_ids,\n flat_attention_mask,\n flat_token_type_ids,\n flat_position_ids,\n head_mask,\n inputs_embeds,\n ]\n\n outputs = self.bert(flat_inputs, training=training)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output, training=training)\n logits = self.classifier(pooled_output)\n reshaped_logits = tf.reshape(logits, (-1, num_choices))\n\n outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here\n\n return outputs # reshaped_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass TFBertForTokenClassification(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.bert = TFBertMainLayer(config, name=\"bert\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n self.classifier = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"classifier\"\n )\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertForTokenClassification\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertForTokenClassification.from_pretrained('bert-base-uncased')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n scores = outputs[0]\n\n \"\"\"\n outputs = self.bert(inputs, **kwargs)\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output, training=kwargs.get(\"training\", False))\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n return outputs # scores, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n BERT_START_DOCSTRING,\n)\nclass TFBertForQuestionAnswering(TFBertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n\n self.bert = TFBertMainLayer(config, name=\"bert\")\n self.qa_outputs = tf.keras.layers.Dense(\n config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"qa_outputs\"\n )\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n start_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-start scores (before SoftMax).\n end_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-end scores (before SoftMax).\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):\n tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import BertTokenizer, TFBertForQuestionAnswering\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = TFBertForQuestionAnswering.from_pretrained('bert-base-uncased')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n start_scores, end_scores = outputs[:2]\n\n \"\"\"\n outputs = self.bert(inputs, **kwargs)\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = tf.split(logits, 2, axis=-1)\n start_logits = tf.squeeze(start_logits, axis=-1)\n end_logits = tf.squeeze(end_logits, axis=-1)\n\n outputs = (start_logits, end_logits,) + outputs[2:]\n\n return outputs # start_logits, end_logits, (hidden_states), (attentions)\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.zeros",
"torch.einsum",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.Tanh",
"numpy.transpose",
"tensorflow.train.list_variables",
"torch.nn.MSELoss"
],
[
"tensorflow.keras.layers.LayerNormalization",
"numpy.sqrt",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.pow",
"tensorflow.split",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.Activation",
"tensorflow.transpose",
"tensorflow.math.sqrt",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.keras.layers.Dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
guyang3532/text | [
"e2fc987ff6a002018040cffac5e0d61c3d0b06c6",
"e2fc987ff6a002018040cffac5e0d61c3d0b06c6"
] | [
"benchmark/experimental_vectors.py",
"torchtext/experimental/transforms.py"
] | [
"import time\n\nimport torch\nfrom torchtext.experimental.datasets import AG_NEWS\nfrom torchtext.experimental.vectors import FastText as FastTextExperimental\nfrom torchtext.vocab import FastText\n\n\ndef benchmark_experimental_vectors():\n def _run_benchmark_lookup(tokens, vector):\n t0 = time.monotonic()\n for token in tokens:\n vector[token]\n print(\"Lookup time:\", time.monotonic() - t0)\n\n train, = AG_NEWS(data_select='train')\n vocab = train.get_vocab()\n tokens = []\n for (label, text) in train:\n for id in text.tolist():\n tokens.append(vocab.itos[id])\n\n # existing FastText construction\n print(\"Existing FastText - Not Jit Mode\")\n t0 = time.monotonic()\n fast_text = FastText()\n print(\"Construction time:\", time.monotonic() - t0)\n _run_benchmark_lookup(tokens, fast_text)\n\n # experimental FastText construction\n print(\"FastText Experimental\")\n t0 = time.monotonic()\n fast_text_experimental = FastTextExperimental(validate_file=False)\n print(\"Construction time:\", time.monotonic() - t0)\n\n # not jit lookup\n print(\"FastText Experimental - Not Jit Mode\")\n _run_benchmark_lookup(tokens, fast_text_experimental)\n\n # jit lookup\n print(\"FastText Experimental - Jit Mode\")\n jit_fast_text_experimental = torch.jit.script(fast_text_experimental)\n _run_benchmark_lookup(tokens, jit_fast_text_experimental)\n\n\nif __name__ == \"__main__\":\n benchmark_experimental_vectors()\n",
"import torch\nimport torch.nn as nn\nfrom typing import List, Tuple\n\n\n__all__ = [\n 'BasicEnglishNormalize',\n 'RegexTokenizer'\n]\n\n\nclass BasicEnglishNormalize(nn.Module):\n r\"\"\"Basic normalization for a string sentence.\n\n Normalization includes\n - lowercasing\n - complete some basic text normalization for English words as follows:\n - add spaces before and after '\\''\n - remove '\\\"',\n - add spaces before and after '.'\n - replace '<br \\/>'with single space\n - add spaces before and after ','\n - add spaces before and after '('\n - add spaces before and after ')'\n - add spaces before and after '!'\n - add spaces before and after '?'\n - replace ';' with single space\n - replace ':' with single space\n - replace multiple spaces with single space\n\n Examples:\n >>> import torch\n >>> from torchtext.experimental.transforms import BasicEnglishNormalize\n >>> test_sample = 'Basic English Normalization for a Line of Text'\n >>> basic_english_normalize = BasicEnglishNormalize()\n >>> jit_basic_english_normalize = torch.jit.script(basic_english_normalize)\n >>> tokens = jit_basic_english_normalize(test_sample)\n \"\"\"\n\n regex_and_replacement_string_pairs: List[Tuple[torch.classes.torchtext.Regex, str]]\n\n def __init__(self):\n super(BasicEnglishNormalize, self).__init__()\n patterns_list = [\n (r'\\'', ' \\' '),\n (r'\\\"', ''),\n (r'\\.', ' . '),\n (r'<br \\/>', ' '),\n (r',', ' , '),\n (r'\\(', ' ( '),\n (r'\\)', ' ) '),\n (r'\\!', ' ! '),\n (r'\\?', ' ? '),\n (r'\\;', ' '),\n (r'\\:', ' '),\n (r'\\s+', ' ')]\n\n regex_objects = map(lambda pattern_tuple: torch.classes.torchtext.Regex(pattern_tuple[0]), patterns_list)\n replacement_strings = map(lambda pattern_tuple: pattern_tuple[1], patterns_list)\n self.regex_and_replacement_string_pairs = list(zip(regex_objects, replacement_strings))\n\n def forward(self, line: str) -> List[str]:\n r\"\"\"\n Args:\n line (str): a line of text to tokenize.\n Returns:\n List[str]: a list of tokens after normalizing and splitting on whitespace.\n \"\"\"\n\n line = line.lower()\n for regex, replacement_string in self.regex_and_replacement_string_pairs:\n line = regex.Sub(line, replacement_string)\n return line.split()\n\n\nclass RegexTokenizer(nn.Module):\n r\"\"\"Regex tokenizer for a string sentence that applies all regex replacements defined in patterns_list.\n\n Args:\n patterns_list (List[Tuple[str, str]]): a list of tuples (ordered pairs) which contain the regex pattern string\n as the first element and the replacement string as the second element.\n\n Examples:\n >>> import torch\n >>> from torchtext.experimental.transforms import RegexTokenizer\n >>> test_sample = 'Basic Regex Tokenization for a Line of Text'\n >>> patterns_list = [\n (r'\\'', ' \\' '),\n (r'\\\"', '')]\n >>> regex_tokenizer = RegexTokenizer(patterns_list)\n >>> jit_regex_tokenizer = torch.jit.script(regex_tokenizer)\n >>> tokens = jit_regex_tokenizer(test_sample)\n \"\"\"\n\n regex_and_replacement_string_pairs: List[Tuple[torch.classes.torchtext.Regex, str]]\n\n def __init__(self, patterns_list: List[Tuple[str, str]]):\n super(RegexTokenizer, self).__init__()\n\n regex_objects = map(lambda pattern_tuple: torch.classes.torchtext.Regex(pattern_tuple[0]), patterns_list)\n replacement_strings = map(lambda pattern_tuple: pattern_tuple[1], patterns_list)\n self.regex_and_replacement_string_pairs = list(zip(regex_objects, replacement_strings))\n\n def forward(self, line: str) -> List[str]:\n r\"\"\"\n Args:\n line (str): a line of text to tokenize.\n Returns:\n List[str]: a list of tokens after normalizing and splitting on whitespace.\n \"\"\"\n\n for regex, replacement_string in self.regex_and_replacement_string_pairs:\n line = regex.Sub(line, replacement_string)\n return line.split()\n"
] | [
[
"torch.jit.script"
],
[
"torch.classes.torchtext.Regex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anonymous-user-commits/perturb-net | [
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23",
"66fc7c4a1234fa34b92bcc85751f0a6e23d80a23"
] | [
"cnns/nnlib/robustness/pni/code/models/nomarlization_layer.py",
"cnns/nnlib/robustness/pni/code/models/noise_layer_robust.py",
"cnns/nnlib/pytorch_architecture/net_eigen.py",
"cnns/nnlib/robustness/pni/code/utils.py",
"cnns/nnlib/solver.py",
"cnns/nnlib/pytorch_architecture/mobilenet.py",
"cnns/foolbox/foolbox_2_3_0/attacks/hop_skip_jump_attack.py",
"cnns/nnlib/pytorch_layers/fft_band_2D_complex_mask.py",
"cnns/foolbox/foolbox_2_3_0/tests/attacks/test_batch_attacks_pointwise.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass Normalize_layer(nn.Module):\n\n def __init__(self, mean, std):\n super(Normalize_layer, self).__init__()\n self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),\n requires_grad=False)\n self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),\n requires_grad=False)\n\n def forward(self, input):\n return input.sub(self.mean).div(self.std)\n\n\nclass noise_Normalize_layer(nn.Module):\n\n def __init__(self, mean, std, input_noise=False):\n super(noise_Normalize_layer, self).__init__()\n self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),\n requires_grad=False)\n self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),\n requires_grad=False)\n self.input_noise = input_noise\n self.alpha_i = nn.Parameter(torch.Tensor([0.25]), requires_grad=True)\n\n def forward(self, input):\n output = input.sub(self.mean).div(self.std)\n\n input_std = output.std().item()\n input_noise = output.clone().normal_(0, input_std)\n\n return output + input_noise * self.alpha_i * self.input_noise\n\n\n",
"import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass noise_Conv2d(nn.Conv2d):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1,\n groups=1, bias=True, noise_std=0.1):\n super(noise_Conv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride,\n padding, dilation, groups, bias)\n self.noise_std = noise_std\n\n def forward(self, input):\n noise_i = input.clone().normal_(0, self.noise_std)\n noise_input = input + noise_i\n\n output = F.conv2d(noise_input, self.weight, self.bias, self.stride,\n self.padding, self.dilation,\n self.groups)\n\n return output\n",
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom cnns.nnlib.pytorch_layers.conv_picker import Conv\nfrom cnns.nnlib.utils.svd2d import show_svd_feature_map\nfrom cnns.nnlib.utils.svd2d import show_svd_spectra\n\ndef get_conv(args, in_channels, out_channels, kernel_size, stride=1,\n padding=0, bias=True):\n return Conv(kernel_sizes=[kernel_size], in_channels=in_channels,\n out_channels=[out_channels], strides=[stride],\n padding=[padding], args=args, is_bias=bias).get_conv()\n\n\nH = 28\nW = 28\nnum_classes = 10\n\nkernel_size1 = 5\nkernel_size2 = 5\nin_channels = 1\nout_channels1 = 20\nhidden_neurons = 500\npull1 = 2\nout_channels2 = 50\npull2 = 2\n\ndef get_HW_after_pull2(\n H=H,\n W=W,\n kernel_size1 = kernel_size1,\n kernel_size2 = kernel_size2,\n pull1= pull1,\n pull2=pull2):\n H_conv1 = H - (kernel_size1 - 1)\n W_conv1 = W - (kernel_size1 - 1)\n assert H_conv1 % 2 == 0\n assert W_conv1 % 2 == 0\n H_pull1 = H_conv1 // pull1 # 12\n W_pull1 = W_conv1 // pull1 # 12\n W_conv2 = W_pull1 - (kernel_size2 - 1) # 8\n H_conv2 = H_pull1 - (kernel_size2 - 1) # 8\n assert H_conv2 % pull2 == 0\n assert W_conv2 % pull2 == 0\n H_pull2 = H_conv2 // pull2 # 4\n W_pull2 = W_conv2 // pull2 # 4\n return H_pull2, W_pull2\n\nconv1_param_nr = in_channels * out_channels1 * kernel_size1\nconv2_param_nr = out_channels1 * out_channels2 * kernel_size2\nconv_param_nr = conv1_param_nr + conv2_param_nr\n# print('total conv params: ', conv_param_nr)\n\nH_pull2, W_pull2 = get_HW_after_pull2()\nfc1_param_nr = H_pull2 * W_pull2 * hidden_neurons\nfc2_param_nr = hidden_neurons * num_classes\nfc_param_nr = fc1_param_nr + fc2_param_nr\n# print('total fully connected params: ', fc_param_nr)\n\nparam_nr = conv1_param_nr + conv2_param_nr + fc1_param_nr + fc2_param_nr\n# print('total param nr: ', param_nr) # 18100\n\nclass NetEigen(nn.Module):\n def __init__(self, args):\n super(NetEigen, self).__init__()\n self.counter = 0\n self.args = args\n # self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv1 = get_conv(args,\n in_channels=in_channels,\n out_channels=out_channels1,\n kernel_size=kernel_size1,\n stride=1)\n # self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.conv2 = get_conv(args,\n in_channels=out_channels1,\n out_channels=out_channels2,\n kernel_size=kernel_size2,\n stride=1)\n\n self.fc1 = nn.Linear(\n H_pull2 * W_pull2 * out_channels2,\n hidden_neurons)\n self.fc2 = nn.Linear(\n hidden_neurons,\n args.num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n s1 = show_svd_feature_map(x=x, counter=self.counter, args=self.args, index=1)\n x = F.relu(x)\n x = F.max_pool2d(x, pull1, pull1)\n x = self.conv2(x)\n s2 = show_svd_feature_map(x=x, counter=self.counter, args=self.args, index=2)\n x = F.relu(x)\n x = F.max_pool2d(x, pull2, pull2)\n x = x.view(-1, H_pull2 * W_pull2 * out_channels2)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n if s1 is not None and s2 is not None:\n show_svd_spectra([s1, s2], counter=self.counter)\n self.counter += 1\n return F.log_softmax(x, dim=1)\n\n\nif __name__ == \"__main__\":\n print('Net for mnist dataset.')\n",
"import os, sys, time, random\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom torch import nn\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass RecorderMeter(object):\n \"\"\"Computes and stores the minimum loss value and its epoch index\"\"\"\n def __init__(self, total_epoch):\n self.reset(total_epoch)\n\n def reset(self, total_epoch):\n assert total_epoch > 0\n self.total_epoch = total_epoch\n self.current_epoch = 0\n self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]\n self.epoch_losses = self.epoch_losses - 1\n\n self.epoch_accuracy= np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]\n self.epoch_accuracy= self.epoch_accuracy\n\n def update(self, idx, train_loss, train_acc, val_loss, val_acc):\n assert idx >= 0 and idx < self.total_epoch, 'total_epoch : {} , but update with the {} index'.format(self.total_epoch, idx)\n self.epoch_losses [idx, 0] = train_loss\n self.epoch_losses [idx, 1] = val_loss\n self.epoch_accuracy[idx, 0] = train_acc\n self.epoch_accuracy[idx, 1] = val_acc\n self.current_epoch = idx + 1\n # return self.max_accuracy(False) == val_acc\n\n def max_accuracy(self, istrain):\n if self.current_epoch <= 0: return 0\n if istrain: return self.epoch_accuracy[:self.current_epoch, 0].max()\n else: return self.epoch_accuracy[:self.current_epoch, 1].max()\n \n def plot_curve(self, save_path):\n title = 'the accuracy/loss curve of train/val'\n dpi = 80 \n width, height = 1200, 800\n legend_fontsize = 10\n scale_distance = 48.8\n figsize = width / float(dpi), height / float(dpi)\n\n fig = plt.figure(figsize=figsize)\n x_axis = np.array([i for i in range(self.total_epoch)]) # epochs\n y_axis = np.zeros(self.total_epoch)\n\n plt.xlim(0, self.total_epoch)\n plt.ylim(0, 100)\n interval_y = 5\n interval_x = 5\n plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x))\n plt.yticks(np.arange(0, 100 + interval_y, interval_y))\n plt.grid()\n plt.title(title, fontsize=20)\n plt.xlabel('the training epoch', fontsize=16)\n plt.ylabel('accuracy', fontsize=16)\n \n y_axis[:] = self.epoch_accuracy[:, 0]\n plt.plot(x_axis, y_axis, color='g', linestyle='-', label='train-accuracy', lw=2)\n plt.legend(loc=4, fontsize=legend_fontsize)\n\n y_axis[:] = self.epoch_accuracy[:, 1]\n plt.plot(x_axis, y_axis, color='y', linestyle='-', label='valid-accuracy', lw=2)\n plt.legend(loc=4, fontsize=legend_fontsize)\n\n \n y_axis[:] = self.epoch_losses[:, 0]\n plt.plot(x_axis, y_axis*50, color='g', linestyle=':', label='train-loss-x50', lw=2)\n plt.legend(loc=4, fontsize=legend_fontsize)\n\n y_axis[:] = self.epoch_losses[:, 1]\n plt.plot(x_axis, y_axis*50, color='y', linestyle=':', label='valid-loss-x50', lw=2)\n plt.legend(loc=4, fontsize=legend_fontsize)\n\n if save_path is not None:\n fig.savefig(save_path, dpi=dpi, bbox_inches='tight')\n print ('---- save figure {} into {}'.format(title, save_path))\n plt.close(fig)\n \n\ndef time_string():\n ISOTIMEFORMAT='%Y-%m-%d %X'\n string = '[{}]'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))\n return string\n\ndef convert_secs2time(epoch_time):\n need_hour = int(epoch_time / 3600)\n need_mins = int((epoch_time - 3600*need_hour) / 60)\n need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)\n return need_hour, need_mins, need_secs\n\ndef time_file_str():\n ISOTIMEFORMAT='%Y-%m-%d'\n string = '{}'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))\n return string + '-{}'.format(random.randint(1, 10000))\n\n\n",
"from __future__ import print_function, division\n\nimport time\nfrom builtins import object\n# from future import standard_library\n# standard_library.install_aliases()\nfrom builtins import range\n\nimport numpy as np\nimport pickle as pickle\n\nfrom nnlib import optim\nfrom nnlib.utils.general_utils import get_log_time\n\nclass Solver(object):\n \"\"\"\n A Solver encapsulates all the logic necessary for training classification\n models. The Solver performs stochastic gradient descent using different\n update rules defined in optim.py.\n\n The solver accepts both training and validataion data and labels so it can\n periodically check classification accuracy on both training and validation\n data to watch out for overfitting.\n\n To train a model, you will first construct a Solver instance, passing the\n model, dataset, and various options (learning rate, batch size, etc) to the\n constructor. You will then call the train() method to run the optimization\n procedure and train the model.\n\n After the train() method returns, model.params will contain the parameters\n that performed best on the validation set over the course of training.\n In addition, the instance variable solver.loss_history will contain a list\n of all losses encountered during training and the instance variables\n solver.train_acc_history and solver.val_acc_history will be lists of the\n accuracies of the model on the training and validation set at each epoch.\n\n Example usage might look something like this:\n\n data = {\n 'X_train': # training data\n 'y_train': # training labels\n 'X_val': # validation data\n 'y_val': # validation labels\n }\n model = MyAwesomeModel(hidden_size=100, reg=10)\n solver = Solver(model, data,\n update_rule='sgd',\n optim_config={\n 'learning_rate': 1e-3,\n },\n lr_decay=0.95,\n num_epochs=10, batch_size=100,\n print_every=100)\n solver.train()\n\n\n A Solver works on a model object that must conform to the following API:\n\n - model.params must be a dictionary mapping string parameter names to numpy\n arrays containing parameter values.\n\n - model.loss(X, y) must be a function that computes training-time loss and\n gradients, and test-time classification scores, with the following inputs\n and outputs:\n\n Inputs:\n - X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)\n - y: Array of labels, of shape (N,) giving labels for X where y[i] is the\n label for X[i].\n\n Returns:\n If y is None, run a test-time forward pass and return:\n - scores: Array of shape (N, C) giving classification scores for X where\n scores[i, c] gives the score of class c for X[i].\n\n If y is not None, run a training time forward and backward pass and\n return a tuple of:\n - loss: Scalar giving the loss\n - grads: Dictionary with the same keys as self.params mapping parameter\n names to gradients of the loss with respect to those parameters.\n \"\"\"\n\n def __init__(self, model, data, **kwargs):\n \"\"\"\n Construct a new Solver instance.\n\n Required arguments:\n - model: A model object conforming to the API described above\n - data: A dictionary of training and validation data containing:\n 'X_train': Array, shape (N_train, d_1, ..., d_k) of training images\n 'X_val': Array, shape (N_val, d_1, ..., d_k) of validation images\n 'y_train': Array, shape (N_train,) of labels for training images\n 'y_val': Array, shape (N_val,) of labels for validation images\n\n Optional arguments:\n - update_rule: A string giving the name of an update rule in optim.py.\n Default is 'sgd'.\n - optim_config: A dictionary containing hyperparameters that will be\n passed to the chosen update rule. Each update rule requires different\n hyperparameters (see optim.py) but all update rules require a\n 'learning_rate' parameter so that should always be present.\n - lr_decay: A scalar for learning rate decay; after each epoch the\n learning rate is multiplied by this value.\n - batch_size: Size of minibatches used to compute loss and gradient\n during training.\n - num_epochs: The number of epochs to run for during training.\n - print_every: Integer; training losses will be printed every\n print_every iterations.\n - verbose: Boolean; if set to false then no output will be printed\n during training.\n - num_train_samples: Number of training samples used to check training\n accuracy; default is 1000; set to None to use entire training set.\n - num_val_samples: Number of validation samples to use to check val\n accuracy; default is None, which uses the entire validation set.\n - checkpoint_name: If not None, then save model checkpoints here every\n epoch.\n \"\"\"\n self.model = model\n self.X_train = data['X_train']\n self.y_train = data['y_train']\n self.X_val = data['X_val']\n self.y_val = data['y_val']\n\n # Unpack keyword arguments\n self.update_rule = kwargs.pop('update_rule', 'sgd')\n self.optim_config = kwargs.pop('optim_config', {})\n self.lr_decay = kwargs.pop('lr_decay', 1.0)\n self.batch_size = kwargs.pop('batch_size', 100)\n self.num_epochs = kwargs.pop('num_epochs', 10)\n self.num_train_samples = kwargs.pop('num_train_samples', 1000)\n self.num_val_samples = kwargs.pop('num_val_samples', None)\n\n self.checkpoint_name = kwargs.pop('checkpoint_name', None)\n self.print_every = kwargs.pop('print_every', 10)\n self.verbose = kwargs.pop('verbose', True)\n\n default_epoch_log = \"epoch_log_\" + get_log_time() + \".csv\"\n self.epoch_log = kwargs.pop('epoch_log', default_epoch_log)\n\n default_loss_log = \"loss_log_\" + get_log_time() + \".csv\"\n self.loss_log = kwargs.pop('loss_log', default_loss_log)\n\n # Throw an error if there are extra keyword arguments\n if len(kwargs) > 0:\n extra = ', '.join('\"%s\"' % k for k in list(kwargs.keys()))\n raise ValueError('Unrecognized arguments %s' % extra)\n\n # Make sure the update rule exists, then replace the string\n # name with the actual function\n if not hasattr(optim, self.update_rule):\n raise ValueError('Invalid update_rule \"%s\"' % self.update_rule)\n self.update_rule = getattr(optim, self.update_rule)\n\n self._reset()\n\n def _reset(self):\n \"\"\"\n Set up some book-keeping variables for optimization. Don't call this\n manually.\n \"\"\"\n # Set up some variables for book-keeping\n self.epoch = 0\n self.best_val_acc = 0\n self.best_params = {}\n self.loss_history = []\n self.train_acc_history = []\n self.val_acc_history = []\n\n # Make a deep copy of the optim_config for each parameter\n self.optim_configs = {}\n for p in self.model.params:\n d = {k: v for k, v in self.optim_config.items()}\n self.optim_configs[p] = d\n\n def _step(self):\n \"\"\"\n Make a single gradient update. This is called by train() and should not\n be called manually.\n \"\"\"\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config\n\n def _save_checkpoint(self):\n if self.checkpoint_name is None: return\n checkpoint = {\n 'model': self.model,\n 'update_rule': self.update_rule,\n 'lr_decay': self.lr_decay,\n 'optim_config': self.optim_config,\n 'batch_size': self.batch_size,\n 'num_train_samples': self.num_train_samples,\n 'num_val_samples': self.num_val_samples,\n 'epoch': self.epoch,\n 'loss_history': self.loss_history,\n 'train_acc_history': self.train_acc_history,\n 'val_acc_history': self.val_acc_history,\n }\n filename = '%s_epoch_%d.pkl' % (self.checkpoint_name, self.epoch)\n if self.verbose:\n print('Saving checkpoint to \"%s\"' % filename)\n with open(filename, 'wb') as f:\n pickle.dump(checkpoint, f)\n\n def check_accuracy(self, X, y, num_samples=None, batch_size=100):\n \"\"\"\n Check accuracy of the model on the provided data.\n\n Inputs:\n - X: Array of data, of shape (N, d_1, ..., d_k)\n - y: Array of labels, of shape (N,)\n - num_samples: If not None, subsample the data and only test the model\n on num_samples datapoints.\n - batch_size: Split X and y into batches of this size to avoid using\n too much memory.\n\n Returns:\n - acc: Scalar giving the fraction of instances that were correctly\n classified by the model.\n \"\"\"\n\n # Maybe subsample the data\n N = X.shape[0]\n if num_samples is not None and N > num_samples:\n mask = np.random.choice(N, num_samples)\n N = num_samples\n X = X[mask]\n y = y[mask]\n\n # Compute predictions in batches\n num_batches = N // batch_size\n if N % batch_size != 0:\n num_batches += 1\n y_pred = []\n for i in range(num_batches):\n start = i * batch_size\n end = (i + 1) * batch_size\n scores = self.model.loss(X[start:end])\n y_pred.append(np.argmax(scores, axis=1))\n y_pred = np.hstack(y_pred)\n acc = np.mean(y_pred == y)\n\n return acc\n\n def train(self):\n \"\"\"\n Run optimization to train the model.\n \"\"\"\n num_train = self.X_train.shape[0]\n iterations_per_epoch = max(num_train // self.batch_size, 1)\n num_iterations = self.num_epochs * iterations_per_epoch\n\n start = time.time()\n for t in range(num_iterations):\n self._step()\n\n # Maybe print training loss\n if self.verbose and t % self.print_every == 0:\n print('(Iteration %d / %d) loss: %f' % (\n t + 1, num_iterations, self.loss_history[-1]))\n\n with open(self.loss_log, \"a+\") as f:\n f.write(\"iteration,\" + str(t + 1) + \",loss,\" + str(self.loss_history[-1]) + \"\\n\")\n\n # At the end of every epoch, increment the epoch counter and decay\n # the learning rate.\n epoch_end = (t + 1) % iterations_per_epoch == 0\n if epoch_end:\n self.epoch += 1\n for k in self.optim_configs:\n self.optim_configs[k]['learning_rate'] *= self.lr_decay\n\n # Check train and val accuracy on the first iteration, the last\n # iteration, and at the end of each epoch.\n first_it = (t == 0)\n last_it = (t == num_iterations - 1)\n if first_it or last_it or epoch_end:\n train_acc = self.check_accuracy(self.X_train, self.y_train,\n num_samples=self.num_train_samples)\n val_acc = self.check_accuracy(self.X_val, self.y_val,\n num_samples=self.num_val_samples)\n self.train_acc_history.append(train_acc)\n self.val_acc_history.append(val_acc)\n self._save_checkpoint()\n\n if self.verbose:\n elapsed_time = time.time() - start\n print('(Epoch, %d / %d), train acc: %f; val_acc: %f, epoch time: %f' % (\n self.epoch, self.num_epochs, train_acc, val_acc, elapsed_time))\n with open(self.epoch_log, \"a+\") as f:\n f.write(\"Epoch,\" + str(self.epoch) + \",train_acc,\" + str(train_acc) + \",val_acc,\" + str(\n val_acc) + \",time,\" + str(time.time() - start) + \"\\n\")\n start = time.time() # reset the timer\n # print(\"filter 0, channel 0, weights: \", self.model.params[\"W1\"][0][0])\n\n # Keep track of the best model\n if val_acc > self.best_val_acc:\n self.best_val_acc = val_acc\n self.best_params = {}\n for k, v in self.model.params.items():\n self.best_params[k] = v.copy()\n\n # At the end of training swap the best params into the model\n self.model.params = self.best_params\n",
"'''MobileNet in PyTorch.\n\nSee the paper \"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications\"\nfor more details.\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\n\nclass Block(nn.Module):\n '''Depthwise conv + Pointwise conv'''\n def __init__(self, in_planes, out_planes, stride=1):\n super(Block, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n return out\n\n\nclass MobileNet(nn.Module):\n # (128,2) means conv planes=128, conv stride=2, by default conv stride=1\n cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]\n\n def __init__(self, num_classes=10):\n super(MobileNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.layers = self._make_layers(in_planes=32)\n self.linear = nn.Linear(1024, num_classes)\n\n def _make_layers(self, in_planes):\n layers = []\n for x in self.cfg:\n out_planes = x if isinstance(x, int) else x[0]\n stride = 1 if isinstance(x, int) else x[1]\n layers.append(Block(in_planes, out_planes, stride))\n in_planes = out_planes\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layers(out)\n out = F.avg_pool2d(out, 2)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef test():\n net = MobileNet()\n x = torch.randn(1,3,32,32)\n y = net(Variable(x))\n print(y.size())\n\n# test()\n",
"import warnings\nimport time\nimport sys\n\nfrom .base import Attack\nfrom .base import generator_decorator\nfrom ..distances import MSE, Linf\nfrom ..criteria import Misclassification\nimport numpy as np\nimport math\nfrom warnings import warn\nimport logging\n\n\nclass HopSkipJumpAttack(Attack):\n \"\"\"A powerful adversarial attack that requires neither gradients\n nor probabilities.\n\n Notes\n -----\n Features:\n * ability to switch between two types of distances: MSE and Linf.\n * ability to continue previous attacks by passing an instance of the\n Adversarial class\n * ability to pass an explicit starting point; especially to initialize\n a targeted attack\n * ability to pass an alternative attack used for initialization\n * ability to specify the batch size\n\n References\n ----------\n ..\n HopSkipJumpAttack was originally proposed by Chen, Jordan and\n Wainwright.\n It is a decision-based attack that requires access to output\n labels of a model alone.\n Paper link: https://arxiv.org/abs/1904.02144\n The implementation in Foolbox is based on Boundary Attack.\n\n \"\"\"\n\n @generator_decorator\n def as_generator(\n self,\n a,\n iterations=64,\n initial_num_evals=100,\n max_num_evals=10000,\n stepsize_search=\"geometric_progression\",\n gamma=1.0,\n starting_point=None,\n batch_size=256,\n internal_dtype=np.float64,\n log_every_n_steps=None,\n loggingLevel=logging.WARNING,\n ):\n \"\"\"Applies HopSkipJumpAttack.\n\n Parameters\n ----------\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n The original, correctly classified input. If it is a\n numpy array, label must be passed as well. If it is\n an :class:`Adversarial` instance, label must not be passed.\n label : int\n The reference label of the original input. Must be passed\n if input is a numpy array, must not be passed if input is\n an :class:`Adversarial` instance.\n unpack : bool\n If true, returns the adversarial input, otherwise returns\n the Adversarial object.\n iterations : int\n Number of iterations to run.\n initial_num_evals: int\n Initial number of evaluations for gradient estimation.\n Larger initial_num_evals increases time efficiency, but\n may decrease query efficiency.\n max_num_evals: int\n Maximum number of evaluations for gradient estimation.\n stepsize_search: str\n How to search for stepsize; choices are 'geometric_progression',\n 'grid_search'. 'geometric progression' initializes the stepsize\n by ||x_t - x||_p / sqrt(iteration), and keep decreasing by half\n until reaching the target side of the boundary. 'grid_search'\n chooses the optimal epsilon over a grid, in the scale of\n ||x_t - x||_p.\n gamma: float\n The binary search threshold theta is gamma / d^1.5 for\n l2 attack and gamma / d^2 for linf attack.\n\n starting_point : `numpy.ndarray`\n Adversarial input to use as a starting point, required\n for targeted attacks.\n batch_size : int\n Batch size for model prediction.\n internal_dtype : np.float32 or np.float64\n Higher precision might be slower but is numerically more stable.\n log_every_n_steps : int\n Determines verbositity of the logging.\n loggingLevel : int\n Controls the verbosity of the logging, e.g. logging.INFO\n or logging.WARNING.\n\n \"\"\"\n\n self.initial_num_evals = initial_num_evals\n self.max_num_evals = max_num_evals\n self.stepsize_search = stepsize_search\n self.gamma = gamma\n self.batch_size = batch_size\n self._starting_point = starting_point\n self.internal_dtype = internal_dtype\n self.log_every_n_steps = log_every_n_steps\n\n self.logger = logging.getLogger(\"BoundaryAttack\")\n self.logger.setLevel(loggingLevel)\n\n # Set constraint based on the distance.\n if self._default_distance == MSE:\n self.constraint = \"l2\"\n elif self._default_distance == Linf:\n self.constraint = \"linf\"\n\n # Set binary search threshold.\n self.shape = a.unperturbed.shape\n self.d = np.prod(self.shape)\n if self.constraint == \"l2\":\n self.theta = self.gamma / (np.sqrt(self.d) * self.d)\n else:\n self.theta = self.gamma / (self.d * self.d)\n logging.info(\n \"HopSkipJumpAttack optimized for {} distance\".format(self.constraint)\n )\n\n yield from self.attack(a, iterations=iterations)\n\n def attack(self, a, iterations):\n \"\"\"\n iterations : int\n Maximum number of iterations to run.\n \"\"\"\n self.t_initial = time.time()\n\n # ===========================================================\n # Increase floating point precision\n # ===========================================================\n\n self.external_dtype = a.unperturbed.dtype\n\n assert self.internal_dtype in [np.float32, np.float64]\n assert self.external_dtype in [np.float32, np.float64]\n\n assert not (\n self.external_dtype == np.float64 and self.internal_dtype == np.float32\n )\n\n a.set_distance_dtype(self.internal_dtype)\n\n # ===========================================================\n # Construct batch decision function with binary output.\n # ===========================================================\n # decision_function = lambda x: a.forward(\n # x.astype(self.external_dtype), strict=False)[1]\n def decision_function(x):\n outs = []\n num_batchs = int(math.ceil(len(x) * 1.0 / self.batch_size))\n for j in range(num_batchs):\n current_batch = x[self.batch_size * j : self.batch_size * (j + 1)]\n current_batch = current_batch.astype(self.external_dtype)\n _, out = yield from a.forward(current_batch, strict=False)\n outs.append(out)\n outs = np.concatenate(outs, axis=0)\n return outs\n\n # ===========================================================\n # intialize time measurements\n # ===========================================================\n self.time_gradient_estimation = 0\n\n self.time_search = 0\n\n self.time_initialization = 0\n\n # ===========================================================\n # Initialize variables, constants, hyperparameters, etc.\n # ===========================================================\n\n # make sure repeated warnings are shown\n warnings.simplefilter(\"always\", UserWarning)\n\n # get bounds\n bounds = a.bounds()\n self.clip_min, self.clip_max = bounds\n\n # ===========================================================\n # Find starting point\n # ===========================================================\n\n yield from self.initialize_starting_point(a)\n\n if a.perturbed is None:\n warnings.warn(\n \"Initialization failed.\"\n \" it might be necessary to pass an explicit starting\"\n \" point.\"\n )\n return\n\n self.time_initialization += time.time() - self.t_initial\n\n assert a.perturbed.dtype == self.external_dtype\n # get original and starting point in the right format\n original = a.unperturbed.astype(self.internal_dtype)\n perturbed = a.perturbed.astype(self.internal_dtype)\n\n # ===========================================================\n # Iteratively refine adversarial\n # ===========================================================\n t0 = time.time()\n\n # Project the initialization to the boundary.\n perturbed, dist_post_update = yield from self.binary_search_batch(\n original, np.expand_dims(perturbed, 0), decision_function\n )\n\n dist = self.compute_distance(perturbed, original)\n\n distance = a.distance.value\n self.time_search += time.time() - t0\n\n # log starting point\n self.log_step(0, distance)\n\n for step in range(1, iterations + 1):\n\n t0 = time.time()\n\n # ===========================================================\n # Gradient direction estimation.\n # ===========================================================\n # Choose delta.\n delta = self.select_delta(dist_post_update, step)\n\n # Choose number of evaluations.\n num_evals = int(\n min([self.initial_num_evals * np.sqrt(step), self.max_num_evals])\n )\n\n # approximate gradient.\n gradf = yield from self.approximate_gradient(\n decision_function, perturbed, num_evals, delta\n )\n\n if self.constraint == \"linf\":\n update = np.sign(gradf)\n else:\n update = gradf\n t1 = time.time()\n self.time_gradient_estimation += t1 - t0\n\n # ===========================================================\n # Update, and binary search back to the boundary.\n # ===========================================================\n if self.stepsize_search == \"geometric_progression\":\n # find step size.\n epsilon = yield from self.geometric_progression_for_stepsize(\n perturbed, update, dist, decision_function, step\n )\n\n # Update the sample.\n perturbed = np.clip(\n perturbed + epsilon * update, self.clip_min, self.clip_max\n )\n\n # Binary search to return to the boundary.\n perturbed, dist_post_update = yield from self.binary_search_batch(\n original, perturbed[None], decision_function\n )\n\n elif self.stepsize_search == \"grid_search\":\n # Grid search for stepsize.\n epsilons = np.logspace(-4, 0, num=20, endpoint=True) * dist\n epsilons_shape = [20] + len(self.shape) * [1]\n perturbeds = perturbed + epsilons.reshape(epsilons_shape) * update\n perturbeds = np.clip(perturbeds, self.clip_min, self.clip_max)\n idx_perturbed = yield from decision_function(perturbeds)\n\n if np.sum(idx_perturbed) > 0:\n # Select the perturbation that yields the minimum\n # distance after binary search.\n perturbed, dist_post_update = yield from self.binary_search_batch(\n original, perturbeds[idx_perturbed], decision_function\n )\n t2 = time.time()\n\n self.time_search += t2 - t1\n\n # compute new distance.\n dist = self.compute_distance(perturbed, original)\n\n # ===========================================================\n # Log the step\n # ===========================================================\n # Using foolbox definition of distance for logging.\n if self.constraint == \"l2\":\n distance = dist ** 2 / self.d / (self.clip_max - self.clip_min) ** 2\n elif self.constraint == \"linf\":\n distance = dist / (self.clip_max - self.clip_min)\n message = \" (took {:.5f} seconds)\".format(t2 - t0)\n self.log_step(step, distance, message)\n sys.stdout.flush()\n\n # ===========================================================\n # Log overall runtime\n # ===========================================================\n\n self.log_time()\n\n # ===============================================================\n #\n # Other methods\n #\n # ===============================================================\n\n def initialize_starting_point(self, a):\n starting_point = self._starting_point\n\n if a.perturbed is not None:\n print(\n \"Attack is applied to a previously found adversarial.\"\n \" Continuing search for better adversarials.\"\n )\n if starting_point is not None: # pragma: no cover\n warnings.warn(\n \"Ignoring starting_point parameter because the attack\"\n \" is applied to a previously found adversarial.\"\n )\n return\n\n if starting_point is not None:\n yield from a.forward_one(starting_point)\n assert (\n a.perturbed is not None\n ), \"Invalid starting point provided. Please provide a starting point that is adversarial.\"\n return\n\n \"\"\"\n Apply BlendedUniformNoiseAttack if without\n initialization.\n Efficient Implementation of BlendedUniformNoiseAttack in Foolbox.\n \"\"\"\n success = 0\n num_evals = 0\n\n while True:\n random_noise = np.random.uniform(\n self.clip_min, self.clip_max, size=self.shape\n )\n _, success = yield from a.forward_one(\n random_noise.astype(self.external_dtype)\n )\n num_evals += 1\n if success:\n break\n if num_evals > 1e4:\n return\n\n # Binary search to minimize l2 distance to the original input.\n low = 0.0\n high = 1.0\n while high - low > 0.001:\n mid = (high + low) / 2.0\n blended = (1 - mid) * a.unperturbed + mid * random_noise\n _, success = yield from a.forward_one(blended.astype(self.external_dtype))\n if success:\n high = mid\n else:\n low = mid\n\n def compute_distance(self, x1, x2):\n if self.constraint == \"l2\":\n return np.linalg.norm(x1 - x2)\n elif self.constraint == \"linf\":\n return np.max(abs(x1 - x2))\n\n def project(self, unperturbed, perturbed_inputs, alphas):\n \"\"\" Projection onto given l2 / linf balls in a batch. \"\"\"\n alphas_shape = [len(alphas)] + [1] * len(self.shape)\n alphas = alphas.reshape(alphas_shape)\n if self.constraint == \"l2\":\n projected = (1 - alphas) * unperturbed + alphas * perturbed_inputs\n elif self.constraint == \"linf\":\n projected = np.clip(\n perturbed_inputs, unperturbed - alphas, unperturbed + alphas\n )\n return projected\n\n def binary_search_batch(self, unperturbed, perturbed_inputs, decision_function):\n \"\"\" Binary search to approach the boundary. \"\"\"\n\n # Compute distance between each of perturbed and unperturbed input.\n dists_post_update = np.array(\n [\n self.compute_distance(unperturbed, perturbed_x)\n for perturbed_x in perturbed_inputs\n ]\n )\n\n # Choose upper thresholds in binary searchs based on constraint.\n if self.constraint == \"linf\":\n highs = dists_post_update\n # Stopping criteria.\n thresholds = dists_post_update * self.theta\n else:\n highs = np.ones(len(perturbed_inputs))\n thresholds = self.theta\n\n lows = np.zeros(len(perturbed_inputs))\n\n # Call recursive function.\n while np.max((highs - lows) / thresholds) > 1:\n # projection to mids.\n mids = (highs + lows) / 2.0\n mid_inputs = self.project(unperturbed, perturbed_inputs, mids)\n\n # Update highs and lows based on model decisions.\n decisions = yield from decision_function(mid_inputs)\n lows = np.where(decisions == 0, mids, lows)\n highs = np.where(decisions == 1, mids, highs)\n\n out_inputs = self.project(unperturbed, perturbed_inputs, highs)\n\n # Compute distance of the output to select the best choice.\n # (only used when stepsize_search is grid_search.)\n dists = np.array(\n [self.compute_distance(unperturbed, out) for out in out_inputs]\n )\n idx = np.argmin(dists)\n\n dist = dists_post_update[idx]\n out = out_inputs[idx]\n return out, dist\n\n def select_delta(self, dist_post_update, current_iteration):\n \"\"\"\n Choose the delta at the scale of distance\n between x and perturbed sample.\n \"\"\"\n if current_iteration == 1:\n delta = 0.1 * (self.clip_max - self.clip_min)\n else:\n if self.constraint == \"l2\":\n delta = np.sqrt(self.d) * self.theta * dist_post_update\n elif self.constraint == \"linf\":\n delta = self.d * self.theta * dist_post_update\n\n return delta\n\n def approximate_gradient(self, decision_function, sample, num_evals, delta):\n \"\"\" Gradient direction estimation \"\"\"\n # Generate random vectors.\n noise_shape = [num_evals] + list(self.shape)\n if self.constraint == \"l2\":\n rv = np.random.randn(*noise_shape)\n elif self.constraint == \"linf\":\n rv = np.random.uniform(low=-1, high=1, size=noise_shape)\n\n axis = tuple(range(1, 1 + len(self.shape)))\n rv = rv / np.sqrt(np.sum(rv ** 2, axis=axis, keepdims=True))\n perturbed = sample + delta * rv\n perturbed = np.clip(perturbed, self.clip_min, self.clip_max)\n rv = (perturbed - sample) / delta\n\n # query the model.\n decisions = yield from decision_function(perturbed)\n decision_shape = [len(decisions)] + [1] * len(self.shape)\n fval = 2 * decisions.astype(self.internal_dtype).reshape(decision_shape) - 1.0\n\n # Baseline subtraction (when fval differs)\n vals = fval if abs(np.mean(fval)) == 1.0 else fval - np.mean(fval)\n gradf = np.mean(vals * rv, axis=0)\n\n # Get the gradient direction.\n gradf = gradf / np.linalg.norm(gradf)\n\n return gradf\n\n def geometric_progression_for_stepsize(\n self, x, update, dist, decision_function, current_iteration\n ):\n \"\"\" Geometric progression to search for stepsize.\n Keep decreasing stepsize by half until reaching\n the desired side of the boundary.\n \"\"\"\n epsilon = dist / np.sqrt(current_iteration)\n while True:\n updated = np.clip(x + epsilon * update, self.clip_min, self.clip_max)\n success = (yield from decision_function(updated[None]))[0]\n if success:\n break\n else:\n epsilon = epsilon / 2.0 # pragma: no cover\n\n return epsilon\n\n def log_step(self, step, distance, message=\"\", always=False):\n if self.log_every_n_steps is None or self.log_every_n_steps == np.inf:\n return\n if not always and step % self.log_every_n_steps != 0:\n return\n logging.info(\"Step {}: {:.5e} {}\".format(step, distance, message))\n\n def log_time(self):\n t_total = time.time() - self.t_initial\n rel_initialization = self.time_initialization / t_total\n rel_gradient_estimation = self.time_gradient_estimation / t_total\n rel_search = self.time_search / t_total\n\n self.printv(\"Time since beginning: {:.5f}\".format(t_total))\n self.printv(\n \" {:2.1f}% for initialization ({:.5f})\".format(\n rel_initialization * 100, self.time_initialization\n )\n )\n self.printv(\n \" {:2.1f}% for gradient estimation ({:.5f})\".format(\n rel_gradient_estimation * 100, self.time_gradient_estimation\n )\n )\n self.printv(\n \" {:2.1f}% for search ({:.5f})\".format(rel_search * 100, self.time_search)\n )\n\n def printv(self, *args, **kwargs):\n self.logger.info(*args, **kwargs)\n\n\ndef BoundaryAttackPlusPlus(\n model=None, criterion=Misclassification(), distance=MSE, threshold=None\n):\n warn(\"BoundaryAttackPlusPlus is deprecated; use HopSkipJumpAttack.\")\n return HopSkipJumpAttack(model, criterion, distance, threshold)\n",
"import torch\nfrom torch.nn import Module\nfrom cnns.nnlib.utils.general_utils import next_power2\nfrom torch.nn.functional import pad as torch_pad\nfrom cnns.nnlib.utils.complex_mask import get_disk_mask\nfrom cnns.nnlib.utils.complex_mask import get_hyper_mask\nfrom cnns.nnlib.utils.shift_DC_component import shift_DC\n\nclass FFTBandFunctionComplexMask2D(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward\n passes which operate on Tensors.\n \"\"\"\n\n signal_ndim = 2\n\n @staticmethod\n def forward(ctx, input, args, val=0, get_mask=get_hyper_mask,\n onesided=True):\n \"\"\"\n In the forward pass we receive a Tensor containing the input\n and return a Tensor containing the output. ctx is a context\n object that can be used to stash information for backward\n computation. You can cache arbitrary objects for use in the\n backward pass using the ctx.save_for_backward method.\n\n :param input: the input image\n :param args: arguments that define: compress_rate - the compression \n ratio, interpolate - the interpolation within mask: const, linear, exp,\n log, etc.\n :param val: the value (to change coefficients to) for the mask\n :onesided: should use the onesided FFT thanks to the conjugate symmetry\n or want to preserve all the coefficients\n \"\"\"\n # ctx.save_for_backward(input)\n # print(\"round forward\")\n FFTBandFunctionComplexMask2D.mark_dirty(input)\n\n N, C, H, W = input.size()\n\n if H != W:\n raise Exception(\"We support only squared input.\")\n\n if args.next_power2:\n H_fft = next_power2(H)\n W_fft = next_power2(W)\n pad_H = H_fft - H\n pad_W = W_fft - W\n input = torch_pad(input, (0, pad_W, 0, pad_H), 'constant', 0)\n else:\n H_fft = H\n W_fft = W\n xfft = torch.rfft(input,\n signal_ndim=FFTBandFunctionComplexMask2D.signal_ndim,\n onesided=onesided)\n del input\n\n _, _, H_xfft, W_xfft, _ = xfft.size()\n # assert H_fft == W_xfft, \"The input tensor has to be squared.\"\n\n mask, _ = get_mask(H=H_xfft, W=W_xfft,\n compress_rate=args.compress_fft_layer,\n val=val, interpolate=args.interpolate,\n onesided=onesided)\n mask = mask[:, 0:W_xfft, :]\n # print(mask)\n mask = mask.to(xfft.dtype).to(xfft.device)\n xfft = xfft * mask\n\n if ctx is not None:\n ctx.xfft = xfft\n if args.is_DC_shift:\n ctx.xfft = shift_DC(xfft, onesided=onesided)\n\n # xfft = shift_DC(xfft, onesided=onesided, shift_to=\"center\")\n # xfft = shift_DC(xfft, onesided=onesided, shift_to=\"corner\")\n out = torch.irfft(input=xfft,\n signal_ndim=FFTBandFunctionComplexMask2D.signal_ndim,\n signal_sizes=(H_fft, W_fft),\n onesided=onesided)\n out = out[..., :H, :W]\n return out\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the\n gradient of the loss with respect to the output, and we need\n to compute the gradient of the loss with respect to the input.\n\n See: https://arxiv.org/pdf/1706.04701.pdf appendix A\n\n We do not want to zero out the gradient.\n\n Defenses that mask a network’s gradients by quantizing the input values\n pose a challenge to gradient-based opt-mizationmethodsfor\n generating adversarial examples,such as the procedure we\n describe in Section 2.4. Astraightforward application of the\n approach would findzero gradients, because small changes to the input\n do notalter the output at all. In Section 3.1.1, we describe\n an approach where we run the optimizer on a substitute net-work without\n the color depth reduction step, which ap-proximates the real network.\n \"\"\"\n # print(\"round backward\")\n return grad_output.clone(), None, None, None, None, None\n\n\nclass FFTBand2DcomplexMask(Module):\n \"\"\"\n No PyTorch Autograd used - we compute backward pass on our own.\n \"\"\"\n \"\"\"\n FFT Band layer removes high frequency coefficients.\n \"\"\"\n\n def __init__(self, args):\n super(FFTBand2DcomplexMask, self).__init__()\n self.args = args\n\n def forward(self, input):\n \"\"\"\n This is the fully manual implementation of the forward and backward\n passes via the torch.autograd.Function.\n\n :param input: the input map (e.g., an image)\n :return: the result of 1D convolution\n \"\"\"\n return FFTBandFunctionComplexMask2D.apply(\n input, # input image\n self.args, # arguments for compression rate, is_nextPower2, etc.\n 0,\n # value set after compression (we usually zero out the coefficients)\n get_hyper_mask, # get_mask (the hyper mask is the most precise one)\n True, # onesided\n )\n",
"import numpy as np\n\nfrom foolbox.attacks import PointwiseAttack as Attack\n\n\ndef test_attack(bn_model, bn_criterion, bn_images, bn_labels):\n attack = Attack(bn_model, bn_criterion)\n advs = attack(bn_images, bn_labels, unpack=False)\n for adv in advs:\n assert adv.perturbed is not None\n assert adv.distance.value < np.inf\n\n\ndef test_attack_startingpoint(bn_model, bn_criterion, bn_images, bn_labels):\n attack = Attack(bn_model, bn_criterion)\n np.random.seed(2)\n starting_point = np.random.uniform(0, 1, size=bn_images[0].shape).astype(\n bn_images.dtype\n )\n advs = attack(bn_images, bn_labels, unpack=False, starting_point=starting_point)\n for adv in advs:\n assert adv.perturbed is not None\n assert adv.distance.value < np.inf\n\n\n# TODO: Add this test again\n\"\"\"\ndef test_attack_continue(bn_adversarial):\n adv = bn_adversarial\n attack = Attack()\n o = adv.unperturbed\n np.random.seed(2)\n starting_point = np.random.uniform(\n 0, 1, size=o.shape).astype(o.dtype)\n adv.forward_one(starting_point)\n assert adv.perturbed is not None\n attack(adv)\n assert adv.perturbed is not None\n assert adv.distance.value < np.inf\n\"\"\"\n\n\ndef test_attack_gl(gl_bn_model, bn_criterion, bn_images, bn_labels):\n attack = Attack(gl_bn_model, bn_criterion)\n advs = attack(bn_images, bn_labels, unpack=False)\n for adv in advs:\n assert adv.perturbed is not None\n assert adv.distance.value < np.inf\n\n\ndef test_attack_impossible(bn_model, bn_impossible_criterion, bn_images, bn_labels):\n attack = Attack(bn_model, bn_impossible_criterion)\n advs = attack(bn_images, bn_labels, unpack=False)\n for adv in advs:\n assert adv.perturbed is None\n assert adv.distance.value == np.inf\n"
] | [
[
"torch.Tensor"
],
[
"torch.nn.functional.conv2d"
],
[
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.log_softmax"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.hstack",
"numpy.argmax",
"numpy.mean",
"numpy.random.choice"
],
[
"torch.nn.Sequential",
"torch.randn",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.autograd.Variable"
],
[
"numpy.expand_dims",
"numpy.sqrt",
"numpy.clip",
"numpy.logspace",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.max",
"numpy.sign",
"numpy.argmin",
"numpy.mean",
"numpy.prod",
"numpy.random.randn",
"numpy.random.uniform",
"numpy.where",
"numpy.sum"
],
[
"torch.rfft",
"torch.nn.functional.pad",
"torch.irfft"
],
[
"numpy.random.uniform",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yihui-he2020/epipolar-transformers | [
"6824f4345b2998500fbacd0f4e30f67f8e3da7b8",
"6824f4345b2998500fbacd0f4e30f67f8e3da7b8"
] | [
"modeling/backbones/resnet.py",
"vision/triangulation.py"
] | [
"import logging\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nfrom modeling.layers.epipolar import Epipolar\nfrom modeling import registry\nfrom core import cfg\nfrom .basic_batch import find_tensor_peak_batch\nfrom utils.logger import setup_logger\nfrom utils.model_serialization import load_state_dict\n\n# logger = logging.getLogger(__name__)\nlogger = setup_logger(\"resnet\", cfg.FOLDER_NAME)\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, planes)\n self.bn1 = norm_layer(planes)\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = norm_layer(planes)\n self.conv3 = conv1x1(planes, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.out_channels = 512 * block.expansion\n #self.fc = nn.Linear(self.out_channels, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n #x = self.fc(x)\n\n return x\n\n\[email protected]('R-18')\ndef resnet18(cfg, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if cfg.BACKBONE.PRETRAINED:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model\n\n\[email protected]('R-34')\ndef resnet34(cfg, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if cfg.BACKBONE.PRETRAINED:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)\n return model\n\n\[email protected]('R-50')\ndef resnet50(cfg, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if cfg.BACKBONE.PRETRAINED:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)\n return model\n\n\[email protected]('R-101')\ndef resnet101(cfg, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if cfg.BACKBONE.PRETRAINED:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)\n return model\n\n\[email protected]('R-152')\ndef resnet152(cfg, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if cfg.BACKBONE.PRETRAINED:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)\n return model\n\n\n# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# Written by Chunyu Wang ([email protected]), modified by Yihui He\n# ------------------------------------------------------------------------------\n\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers, cfg, **kwargs):\n if cfg.BACKBONE.BN_MOMENTUM < 0:\n self.BN_MOMENTUM = None\n else:\n self.BN_MOMENTUM = cfg.BACKBONE.BN_MOMENTUM\n\n DECONV_WITH_BIAS = False\n NUM_DECONV_LAYERS = 3\n NUM_DECONV_FILTERS = [256, 256, 256]\n NUM_DECONV_KERNELS = [4, 4, 4]\n FINAL_CONV_KERNEL = 1 #cfg.POSE_RESNET.FINAL_CONV_KERNEL\n self.inplanes = 64\n self.deconv_with_bias = DECONV_WITH_BIAS\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=self.BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(\n NUM_DECONV_LAYERS,\n NUM_DECONV_FILTERS,\n NUM_DECONV_KERNELS,\n )\n\n self.final_layer = nn.Conv2d(\n in_channels=NUM_DECONV_FILTERS[-1],\n out_channels=cfg.KEYPOINT.NUM_PTS,\n kernel_size=FINAL_CONV_KERNEL, \n stride=1,\n padding=1 if FINAL_CONV_KERNEL == 3 else 0\n )\n\n if 'epipolarpose' in cfg.BACKBONE.BODY:\n if cfg.EPIPOLAR.MERGE == 'both':\n self.epipolar_sampler1 = Epipolar()\n self.epipolar_sampler = Epipolar()\n else:\n self.epipolar_sampler = None\n self.epipolar_sampler1 = None\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=self.BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=self.BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x, other_inputs=[None, None, None, None, None, None, None]):\n batch_size = x.shape[0]\n other_features, other_KRT, other_heatmaps, KRT, camera, other_camera, other_img = other_inputs\n features, heatmaps, batch_locs, batch_scos, corr_poss, depths = [], [], [], [], [], []\n # 3 x 256 x 256\n x = self.conv1(x)\n # 128 x 128\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n # 256 x 64 x 64\n\n def getOtherFeat(feat, sampler=None):\n # skip feature aggregation for last layer\n corr_pos = None\n depth = None\n if other_features is None:\n # normal hourglass\n return feat, None, None, None\n if 'epipolarpose' in cfg.BACKBONE.BODY:\n ret, corr_pos, depth, sample_locs = \\\n sampler(feat, other_features, KRT, other_KRT, \\\n camera=camera, other_camera=other_camera)\n return ret + feat, corr_pos, depth, sample_locs\n \n if cfg.EPIPOLAR.MERGE == 'early':\n feature = x\n x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)\n depths.append(depth)\n corr_poss.append(corr_pos)\n elif cfg.EPIPOLAR.MERGE == 'both':\n feature = x\n x, _, _, _ = getOtherFeat(feature, sampler=self.epipolar_sampler)\n\n x = self.layer2(x)\n # 512 x 32 × 32\n x = self.layer3(x)\n # 1024 x 16 × 16\n x = self.layer4(x)\n # 2048 x 8 x 8\n\n feature = self.deconv_layers(x)\n #256 x 64 x 64\n \n if cfg.EPIPOLAR.MERGE == 'late':\n x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)\n depths.append(depth)\n corr_poss.append(corr_pos)\n elif cfg.EPIPOLAR.MERGE == 'both':\n x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler1) \n depths.append(depth)\n corr_poss.append(corr_pos) \n else:\n x = feature\n\n #20 x 64 x 64\n heatmaps.append(self.final_layer(x))\n \n # The location of the current batch\n for ibatch in range(batch_size):\n batch_location, batch_score = find_tensor_peak_batch(heatmaps[-1][ibatch], \n cfg.KEYPOINT.SIGMA, \n cfg.BACKBONE.DOWNSAMPLE)\n batch_locs.append(batch_location)\n batch_scos.append(batch_score)\n batch_locs, batch_scos = torch.stack(batch_locs), torch.stack(batch_scos)\n if other_features is None:\n corr_poss, depths = None, None\n else:\n corr_poss = corr_poss[-1]\n depths = depths[-1]\n\n return feature, heatmaps, batch_locs, batch_scos, corr_poss, depths, sample_locs, None \n\n def init_weights(self, pretrained=None):\n if pretrained is not None:\n if isinstance(pretrained, str) and os.path.isfile(pretrained):\n logger.info('=> loading pretrained model {}'.format(pretrained))\n pretrained_state_dict = torch.load(pretrained)\n else:\n logger.info('=> loading pretrained model from web')\n pretrained_state_dict = pretrained\n\n logger.info('=> init deconv weights from normal distribution')\n for name, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.ConvTranspose2d):\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n logger.info('=> init {}.weight as 1'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n logger.info('=> init final conv weights from normal distribution')\n for m in self.final_layer.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n #load_state_dict(self, pretrained_state_dict, prefix='resnet.')\n #load_state_dict(self, pretrained_state_dict, prefix='backbone.')\n load_state_dict(self, pretrained_state_dict, strict=False, ignored_layers=['final_layer.bias', 'final_layer.weight'], prefix=cfg.WEIGHTS_PREFIX, prefix_replace=cfg.WEIGHTS_PREFIX_REPLACE)\n #self.load_state_dict(pretrained_state_dict, strict=False)\n else:\n logger.info('=> init weights from normal distribution')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.normal_(m.weight, std=0.001)\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n\n\nresnet_spec = {'18': (BasicBlock, [2, 2, 2, 2]),\n '34': (BasicBlock, [3, 4, 6, 3]),\n '50': (Bottleneck, [3, 4, 6, 3]),\n '101': (Bottleneck, [3, 4, 23, 3]),\n '152': (Bottleneck, [3, 8, 36, 3])}\n\[email protected]('poseR-18')\[email protected]('poseR-34')\[email protected]('poseR-50')\[email protected]('poseR-101')\[email protected]('poseR-152')\[email protected]('epipolarposeR-18')\[email protected]('epipolarposeR-34')\[email protected]('epipolarposeR-50')\[email protected]('epipolarposeR-101')\[email protected]('epipolarposeR-152')\ndef get_pose_net(cfg, **kwargs):\n num_layers = cfg.BACKBONE.BODY.split('-')[-1]\n\n block_class, layers = resnet_spec[num_layers]\n\n model = PoseResNet(block_class, layers, cfg, **kwargs)\n\n if cfg.BACKBONE.PRETRAINED:\n # model.init_weights(cfg.NETWORK.PRETRAINED)\n if cfg.BACKBONE.PRETRAINED_WEIGHTS:\n model.init_weights(cfg.BACKBONE.PRETRAINED_WEIGHTS)\n else:\n model.init_weights(model_zoo.load_url(model_urls['resnet'+num_layers]))\n\n return model\n",
"import numpy as np\nimport math\nimport cv2\nimport random\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nif __name__ != '__main__':\n from .multiview import camera_center, pix2coord, coord2pix\n from core import cfg\n\nfrom .camera_model import CameraModel\nfrom .multi_camera_system import MultiCameraSystem\nfrom pymvg.ros_compat import sensor_msgs as sensor_msgs_compat\n\ndef toT(A):\n if isinstance(A, torch.FloatTensor):\n return A\n return torch.from_numpy(A)\n\ndef toV(A):\n return Variable(toT(A))\n\ndef pytTriangulateDLT_getuv(M, i, p):\n d = torch.mm(p, M[2, :].unsqueeze(0))\n d = M[i, :].expand( (p.shape[0], M.shape[1]) ) - d\n return d.unsqueeze(1)\n\ndef pytTriangulateDLT_multi(Ms, ps):\n ds = list()\n for u in range( Ms.size(0) ):\n ds.append( pytTriangulateDLT_getuv(Ms[u,:,:], 0, ps[u, :, 0:1]) )\n ds.append( pytTriangulateDLT_getuv(Ms[u,:,:], 1, ps[u, :, 1:2]) )\n Ab = torch.cat( ds, dim=1 )\n A = Ab[:, :, :3]\n At = torch.transpose(A, 1, 2)\n AtA = torch.matmul(At, A)\n tpinv = [t.inverse() for t in torch.unbind(AtA)]\n invAtA = torch.stack(tpinv)\n P = torch.matmul( invAtA, torch.matmul(At, -Ab[:, :, 3].unsqueeze(2)) )\n return P\n\ndef pytTriangulateDLT(M1, M2, p1, p2):\n d1 = pytTriangulateDLT_getuv(M1, 0, p1[:, 0:1])\n d2 = pytTriangulateDLT_getuv(M1, 1, p1[:, 1:2])\n d3 = pytTriangulateDLT_getuv(M2, 0, p2[:, 0:1])\n d4 = pytTriangulateDLT_getuv(M2, 1, p2[:, 1:2])\n Ab = torch.cat( (d1, d2, d3, d4), dim=1 )\n A = Ab[:, :, :3]\n At = torch.transpose(A, 1, 2)\n AtA = torch.matmul(At, A)\n tpinv = [t.inverse() for t in torch.unbind(AtA)]\n invAtA = torch.stack(tpinv)\n P = torch.matmul( invAtA, torch.matmul(At, -Ab[:, :, 3].unsqueeze(2)) )\n return P\n\ndef pytTriangulateNLR_calcGrad(M, p, Pt):\n m1P = torch.matmul(M[0, :3].unsqueeze(0), Pt) + M[0, 3]\n m2P = torch.matmul(M[1, :3].unsqueeze(0), Pt) + M[1, 3]\n m3P = torch.matmul(M[2, :3].unsqueeze(0), Pt) + M[2, 3]\n m3P_sq = m3P * m3P\n x = torch.div(m1P, m3P)\n y = torch.div(m2P, m3P)\n e_u = p[:, 0:1].unsqueeze(2) - x\n e_v = p[:, 1:2].unsqueeze(2) - y\n grad1 = -2 * e_u * torch.div(M[0,:3] * m3P - M[2,:3] * m1P, m3P_sq)\n grad2 = -2 * e_v * torch.div(M[1,:3] * m3P - M[2,:3] * m2P, m3P_sq)\n return grad1+grad2, e_u*e_u + e_v*e_v, torch.stack((x, y)).squeeze(2).squeeze(2)\n\ndef pytTriangulateNLR(M1, M2, p1, p2, Pt):\n Pt = Pt.clone()\n lr = 0.001\n for it in range(10000):\n grad1, l1, dc = pytTriangulateNLR_calcGrad(M1, p1, Pt)\n grad2, l2, dc = pytTriangulateNLR_calcGrad(M2, p2, Pt)\n grad = torch.transpose(grad1+grad2, 1, 2) * lr\n if grad.abs().max().data[0] < 1e-4:\n print(\"Non-linear refinement finished in %d iterations\"%(it))\n break\n #print \"%d %g %g %g %g\"%(it, (l1+l2).data[0], Pt.data[0][0][0], Pt.data[0][1][0], Pt.data[0][2][0])\n Pt -= grad\n return Pt\n\ndef point2line(p3D, x1, x2):\n # http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html\n d1 = x1 - p3D\n d2 = x2 - p3D\n d3 = x1 - x2\n #dist2 = np.linalg.norm( np.cross( d1, d2 ) ) / np.linalg.norm(d3)\n #expanded so it is faster..\n cro = (d1[1]*d2[2]-d1[2]*d2[1], d1[2]*d2[0]-d1[0]*d2[2], d1[0]*d2[1]-d1[1]*d2[0])\n return math.sqrt(cro[0]*cro[0] + cro[1]*cro[1] + cro[2]*cro[2]) / math.sqrt(d3[0]*d3[0] + d3[1]*d3[1] + d3[2]*d3[2])\n\nRANSAC_ITER = 100\n\ndef triangulate(pts, KRTs, confs):\n \"\"\"\n Args:\n pts: view x 42 x 2\n KRTs: view x 3 x 4\n confs: view x 42\n Return:\n 42 x 3\n \"\"\"\n if torch.is_tensor(pts): pts = pts.cpu().numpy()\n if torch.is_tensor(KRTs): KRTs = KRTs.cpu().numpy()\n if torch.is_tensor(confs): confs = confs.cpu().numpy()\n\n cam_centers = []\n invAs = []\n for M in KRTs:\n cam_center, invA = camera_center(M)\n cam_centers.append(cam_center)\n invAs.append(invA)\n cam_centers = np.array(cam_centers)\n invAs = np.array(invAs)\n # view x 3\n ret = []\n for cands, conf in zip(pts.transpose((1,0,2)), confs.T):\n best = 0\n bestconf = 0\n best3D = [0, 0, 0]\n selected_idx = conf > cfg.KEYPOINT.CONF_THRES\n if selected_idx.sum() <= 1:\n ret.append(best3D)\n continue\n cands = cands[selected_idx]\n KRT = KRTs[selected_idx]\n\n for _ in range(RANSAC_ITER):\n a = random.randint(0, len(cands)-1)\n b = random.randint(0, len(cands)-1)\n if a == b:\n continue\n\n p3D = cv2.triangulatePoints( KRT[a], KRT[b], cands[a], cands[b])\n p3D /= p3D[3]\n p3D = p3D[:3].squeeze()\n\n acc = 0\n for cand, cam_center, invA in zip(cands, cam_centers[selected_idx], invAs[selected_idx]):\n x1 = np.dot(invA, np.append(cand, 1)) + cam_center\n dist = point2line(p3D, x1, cam_center)\n if dist < cfg.KEYPOINT.RANSAC_THRES:\n acc += 1\n\n if acc > best:\n best = acc\n best3D = p3D\n ret.append(best3D)\n return np.array(ret)\n\ndef cv2triangulate(KRT0, KRT1, pts0, pts1):\n p3D = cv2.triangulatePoints(KRT0, KRT1, pts0, pts1)\n p3D /= p3D[3]\n p3D = p3D[:3].squeeze()\n return p3D\n\ndef triangulate_refine(pts, KRTs, Ks, RTs, confs):\n \"\"\"\n Args:\n pts: view x 42 x 2\n KRTs: view x 3 x 4\n confs: view x 42\n Return:\n 42 x 3\n \"\"\"\n camera_system = build_multi_camera_system(Ks, RTs)\n\n if torch.is_tensor(pts): pts = pts.cpu().numpy()\n if torch.is_tensor(KRTs): KRTs = KRTs.cpu().numpy()\n if torch.is_tensor(Ks): Ks = Ks.cpu().numpy()\n if torch.is_tensor(RTs): RTs = RTs.cpu().numpy()\n if torch.is_tensor(confs): confs = confs.cpu().numpy()\n assert len(confs.T) == pts.shape[1]\n\n cam_centers = []\n invAs = []\n for M in KRTs:\n cam_center, invA = camera_center(M)\n cam_centers.append(cam_center)\n invAs.append(invA)\n cam_centers = np.array(cam_centers)\n invAs = np.array(invAs)\n # view x 3\n ret = []\n for all_cands, conf in zip(pts.transpose((1,0,2)), confs.T):\n best = 0\n bestconf = 0\n best3D = [0, 0, 0]\n bestinliers = []\n selected_idx = conf > cfg.KEYPOINT.CONF_THRES\n if selected_idx.sum() <= 1:\n ret.append(best3D)\n continue\n cands = all_cands[selected_idx]\n KRT = KRTs[selected_idx]\n\n for _ in range(RANSAC_ITER):\n a = random.randint(0, len(cands)-1)\n b = random.randint(0, len(cands)-1)\n if a == b:\n continue\n\n p3D = cv2triangulate( KRT[a], KRT[b], cands[a], cands[b])\n\n acc = 0\n inliers = []\n for pid, cand, cam_center, invA in zip(np.where(selected_idx)[0], cands, cam_centers[selected_idx], invAs[selected_idx]):\n x1 = np.dot(invA, np.append(cand, 1)) + cam_center\n dist = point2line(p3D, x1, cam_center)\n if dist < cfg.KEYPOINT.RANSAC_THRES:\n acc += 1\n inliers.append(pid)\n\n\n if acc > best:\n bestinliers = inliers\n best = acc\n best3D = p3D\n\n points_2d_set = []\n if len(bestinliers) > 1:\n for j in bestinliers:\n points_2d = all_cands[j]\n points_2d_set.append((str(j), points_2d))\n best3D = triangulate_one_point(camera_system, points_2d_set).squeeze()\n ret.append(best3D)\n return np.array(ret)\n\ndef triangulate_epipolar(pts, KRTs, Ks, RTs, confs, corr_pos, otherKRTs, dlt=False):\n \"\"\"\n Args:\n pts: view x 42 x 2\n KRTs: view x 3 x 4\n confs: view x 42\n Return:\n 42 x 3\n \"\"\"\n camera_system = build_multi_camera_system(Ks, RTs)\n\n if torch.is_tensor(pts): pts = pts.cpu().numpy()\n if torch.is_tensor(KRTs): KRTs = KRTs.cpu().numpy()\n if torch.is_tensor(otherKRTs): otherKRTs = otherKRTs.cpu().numpy()\n if torch.is_tensor(Ks): Ks = Ks.cpu().numpy()\n if torch.is_tensor(RTs): RTs = RTs.cpu().numpy()\n if torch.is_tensor(confs): confs = confs.cpu().numpy()\n if torch.is_tensor(corr_pos): corr_pos = corr_pos.cpu().numpy()\n assert len(confs.T) == pts.shape[1]\n assert 'epipolar' in cfg.BACKBONE.BODY\n\n cam_centers = []\n invAs = []\n for M in KRTs:\n cam_center, invA = camera_center(M)\n cam_centers.append(cam_center)\n invAs.append(invA)\n cam_centers = np.array(cam_centers)\n invAs = np.array(invAs)\n # view x 3\n ret = []\n for all_cands, conf in zip(pts.transpose((1,0,2)), confs.T):\n best = 0\n bestconf = 0\n best3D = [0, 0, 0]\n bestinliers = []\n selected_idx = conf > cfg.KEYPOINT.CONF_THRES\n if selected_idx.sum() == 0:\n print('correspondence by max point + epipolar')\n selected_idx = np.zeros_like(selected_idx)\n selected_idx[conf.argmax()] = True\n elif selected_idx.sum() == 1:\n print('correspondence by 1 point + epipolar')\n if selected_idx.sum() == 1:\n # 1 x 2\n cands = all_cands[selected_idx]\n pix_locs = coord2pix(cands / cfg.DATASETS.IMAGE_RESIZE / cfg.DATASETS.PREDICT_RESIZE,\n cfg.BACKBONE.DOWNSAMPLE).squeeze()\n other_locs = corr_pos[selected_idx].squeeze()[int(pix_locs[1]), int(pix_locs[0])]\n other_locs = pix2coord(other_locs, cfg.BACKBONE.DOWNSAMPLE) # 128 -> 512\n other_locs = other_locs * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE # ->1024->4096\n otherKRT = otherKRTs[selected_idx].squeeze()\n KRT = KRTs[selected_idx].squeeze()\n best3D = cv2triangulate(KRT, otherKRT, cands.squeeze(), other_locs)\n ret.append(best3D)\n continue\n \n cands = all_cands[selected_idx]\n KRT = KRTs[selected_idx]\n\n if dlt:\n points_2d_set = []\n for j in np.where(selected_idx)[0]:\n points_2d_set.append((str(j), all_cands[j]))\n best3D = triangulate_one_point(camera_system, points_2d_set).squeeze()\n ret.append(best3D)\n continue\n\n # too few points no need to ransac\n if pts.shape[1] < RANSAC_ITER**.5:\n for a in range(len(cands)):\n for b in range(len(cands)):\n if a == b:\n continue\n p3D = cv2triangulate(KRT[a], KRT[b], cands[a], cands[b])\n acc = 0\n inliers = []\n for pid, cand, cam_center, invA in zip(np.where(selected_idx)[0], cands, cam_centers[selected_idx], invAs[selected_idx]):\n x1 = np.dot(invA, np.append(cand, 1)) + cam_center\n dist = point2line(p3D, x1, cam_center)\n if dist < cfg.KEYPOINT.RANSAC_THRES:\n acc += 1\n inliers.append(pid)\n if acc > best:\n bestinliers = inliers\n best = acc\n best3D = p3D\n else:\n for _ in range(RANSAC_ITER):\n a, b = np.random.choice(len(cands), 2, replace=False)\n # a = random.randint(0, len(cands)-1)\n # b = random.randint(0, len(cands)-1)\n # if a == b:\n # continue\n p3D = cv2triangulate( KRT[a], KRT[b], cands[a], cands[b])\n acc = 0\n inliers = []\n for pid, cand, cam_center, invA in zip(np.where(selected_idx)[0], cands, cam_centers[selected_idx], invAs[selected_idx]):\n x1 = np.dot(invA, np.append(cand, 1)) + cam_center\n dist = point2line(p3D, x1, cam_center)\n if dist < cfg.KEYPOINT.RANSAC_THRES:\n acc += 1\n inliers.append(pid)\n if acc > best:\n bestinliers = inliers\n best = acc\n best3D = p3D\n\n if len(bestinliers) > 2:\n points_2d_set = []\n for j in bestinliers:\n points_2d_set.append((str(j), all_cands[j]))\n best3D = triangulate_one_point(camera_system, points_2d_set).squeeze()\n ret.append(best3D)\n return np.array(ret)\n\ndef build_multi_camera_system(Ks, RTs):\n \"\"\"\n Build a multi-camera system with pymvg package for triangulation\n\n Args:\n Ks, RTs: list of camera parameters\n Returns:\n cams_system: a multi-cameras system\n \"\"\"\n pymvg_cameras = []\n for name, (K, RT) in enumerate(zip(Ks, RTs)):\n P = np.zeros( (3,4) )\n P[:3,:3]=K\n\n distortion_coefficients = np.zeros((5,))\n i = sensor_msgs_compat.msg.CameraInfo()\n i.width = None\n i.height = None\n i.D = [float(val) for val in distortion_coefficients]\n i.K = list(K.flatten())\n i.R = list(np.eye(3).flatten())\n i.P = list(P.flatten())\n\n camera = CameraModel._from_parts(\n translation=RT[:, -1], \n rotation=RT[:, :-1], \n intrinsics=i,\n name=str(name))\n # camera = CameraModel.load_camera_from_M(KRT, name=str(name))\n pymvg_cameras.append(camera)\n return MultiCameraSystem(pymvg_cameras)\n\n\ndef triangulate_one_point(camera_system, points_2d_set):\n \"\"\"\n Triangulate 3d point in world coordinates with multi-views 2d points\n\n Args:\n camera_system: pymvg camera system\n points_2d_set: list of structure (camera_name, point2d)\n Returns:\n points_3d: 3x1 point in world coordinates\n \"\"\"\n # try:\n points_3d = camera_system.find3d(points_2d_set)\n # except:\n # print(points_2d_set)\n return points_3d\n\n\ndef triangulate_pymvg(pts, Ks, RTs, confs):\n# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# Written by Chunyu Wang ([email protected]), modified by Yihui He\n# ------------------------------------------------------------------------------\n \"\"\"\n Triangulate 3d points in world coordinates of multi-view 2d poses\n by interatively calling $triangulate_one_point$\n\n Args:\n KRTs: a list of camera parameters, each corresponding to\n one prediction in pts\n pts: ndarray of shape nxkx2, len(cameras) == n\n Returns:\n poses3d: ndarray of shape n/nviews x k x 3\n \"\"\"\n if torch.is_tensor(pts): pts = pts.cpu().numpy()\n if torch.is_tensor(Ks): Ks = Ks.cpu().numpy()\n if torch.is_tensor(RTs): RTs = RTs.cpu().numpy()\n if torch.is_tensor(confs): confs = confs.cpu().numpy()\n\n njoints = pts.shape[1]\n camera_system = build_multi_camera_system(Ks, RTs)\n p3D = np.zeros((njoints, 3))\n for k, conf in enumerate(confs.T):\n confthresh = cfg.KEYPOINT.CONF_THRES\n while True:\n selected_idx = np.where(conf > confthresh)[0]\n if confthresh < -1:\n break\n if len(selected_idx) <= 1:\n confthresh -= 0.05\n print('conf too high, decrease to', confthresh)\n else:\n break\n points_2d_set = []\n for j in selected_idx:\n points_2d = pts[j, k, :]\n points_2d_set.append((str(j), points_2d))\n p3D[k, :] = triangulate_one_point(camera_system, points_2d_set).T\n return p3D\n\nif __name__ == '__main__':\n KRTs = 2*[np.eye(3, 4)]\n Ks = 2*[np.eye(3, 3)]\n RTs = 2*[np.eye(3, 4)]\n pts = np.zeros((2, 5, 2))\n confs = np.ones((2, 5))\n print(triangulate_pymvg(pts, Ks, RTs, confs))\n\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.load",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.stack",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.init.kaiming_normal_"
],
[
"torch.div",
"torch.transpose",
"torch.cat",
"numpy.eye",
"torch.from_numpy",
"torch.is_tensor",
"numpy.ones",
"torch.matmul",
"numpy.append",
"numpy.zeros_like",
"torch.unbind",
"torch.stack",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AntoineDidisheim/didipack | [
"9c9266bf248cae79e6ffddd98b7e573108abaa57"
] | [
"didipack/latex_table.py"
] | [
"import pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nfrom enum import Enum\n\nclass ParValue(Enum):\n TSTAT = 1\n PVALUE = 2\n STD = 3\n\nclass OneReg:\n def __init__(self, reg, show_list=[], hide_list=[], blocks=[], bottom_blocks=[]):\n self.reg = reg\n if show_list == []:\n self.show_list = []\n for s in self.reg.params.keys():\n if s not in hide_list:\n self.show_list.append(s)\n else:\n self.show_list = show_list\n\n self.blocks = blocks\n self.bottom_block = bottom_blocks\n\n def create_columns(self):\n # first add the parameters of the reg\n d = pd.Series(dtype=object)\n for k in self.show_list:\n if k in self.reg.params.keys():\n v = self.reg.pvalues[k]\n p = f'{np.round(self.reg.params[k], TableReg.round):,}'\n for tr in TableReg.sign_tr:\n if v <= tr:\n p += '*'\n # self = table_2.reg_list[0]\n # update the v to be tstat or std depending on parameters\n if TableReg.par_value == ParValue.TSTAT:\n v = self.reg.tvalues[k]\n if TableReg.par_value == ParValue.STD:\n v = self.reg.bse[k]\n v = r'(' + f'{np.round(v, TableReg.round):,}' + r')'\n v_l = [len(x) for x in v.split('.')]\n p_l = [len(x) for x in p.split('.')]\n\n t = abs(v_l[0] - p_l[0])\n t = r'\\phantom{' + '*' * t + '}'\n if v_l[0] > p_l[0]:\n p = t + p\n if v_l[0] < p_l[0]:\n v = t + v\n t = abs(v_l[1] - p_l[1])\n t = r'\\phantom{' + '*' * t + '}'\n if v_l[1] > p_l[1]:\n p = p+t\n if v_l[1] < p_l[1]:\n v = v+t\n\n # else:\n # p = r'\\phantom{(}' + p + r'\\phantom{)}'\n d[k] = p\n\n t = pd.Series(dtype=object)\n t[''] =v\n\n d = d.append(t)\n else:\n t = pd.Series(dtype=object)\n t[k] = TableReg.missing_symbol\n t[''] = TableReg.missing_symbol\n d = d.append(t)\n # now we can add the \"blocks\", that is fix effects and others\n for block in self.blocks:\n t = pd.Series(dtype=object)\n t[TableReg.group_key] = ''\n\n for k in block.keys():\n t[k] = block[k]\n d = d.append(t)\n\n # finaly additional info (r² and n.obs per default, but you can add anything through bottom blocks\n if TableReg.show_obs | TableReg.show_r2 | (len(self.bottom_block)>0):\n t = pd.Series(dtype=object)\n t[TableReg.group_key] = ''\n t['Observations'] = f'{int(self.reg.nobs):,}'\n\n if hasattr(self.reg,'rsquared_adj'):\n\n t[r'$R^2$'] = np.round(self.reg.rsquared_adj,TableReg.round_r2)\n else:\n t[r'Pseudo $R^2$'] = np.round(self.reg.prsquared,TableReg.round_r2)\n\n first_block = True\n for block in self.bottom_block:\n if first_block:\n first_block = False\n else:\n t[TableReg.group_key] = ''\n t = pd.Series(dtype=object)\n for k in block.keys():\n t[k] = block[k]\n d = d.append(t)\n return d\n\n\nclass TableReg:\n missing_symbol = ' '\n par_value = ParValue.STD\n round = 4\n round_r2 = 4\n sign_tr = [0.1, 0.05, 0.01]\n show_obs = True\n show_r2 = True\n variable_skip = r'\\smallskip'\n group_key = 'asgeg'\n group_skip = r'\\medskip'\n equal_lines = False\n\n def __init__(self, **option):\n self.reg_list = []\n self.hide_list = []\n self.order = []\n self.df = None\n self.final_show_list = []\n self.show_only_list = []\n self.col_groups = []\n self.rename_dict = {}\n if 'hide_list' in option:\n assert type(option['hide_list']) == list, \"The overall hide list has to be a list\"\n self.hide_list = option['hide_list']\n\n if 'show_only_list' in option:\n assert type(option['show_only_list']) == list, \"The show only list has to be a list\"\n self.show_only_list = option['show_only_list']\n\n if 'order' in option:\n assert type(option['order']) == list, \"The order has to be a list\"\n self.order = option['order']\n\n if 'col_groups' in option:\n self.set_col_groups(option['col_groups'])\n\n if 'rename_dict' in option:\n self.set_rename_dict(option['rename_dict'])\n\n\n def set_rename_dict(self, rename_dict):\n assert type(rename_dict) == dict, \"The rename dict must be a dictionary\"\n self.rename_dict = rename_dict\n\n\n\n def set_col_groups(self, groups):\n assert type(groups) == list, \"The col order has to be a list of list\"\n for group in groups:\n assert type(group) == list, \"Each col group must be a list ['name of group', first columne in the group (int), last col in group (int)]\"\n self.col_groups = groups\n\n\n def add_reg(self, reg, show_list=[], hide_list=[], blocks=[],bottom_blocks=[]):\n hide_list = hide_list + self.hide_list\n self.reg_list.append(OneReg(reg, show_list, hide_list, blocks, bottom_blocks))\n\n def update_show_list(self):\n if len(self.show_only_list) == 0:\n show_list = []\n for oneReg in self.reg_list:\n show_list = list(set(show_list + oneReg.show_list))\n show_list = list(np.sort(show_list))\n show_list = self.order + [x for x in show_list if x not in self.order]\n else:\n show_list = self.show_only_list\n\n col = []\n for oneReg in self.reg_list:\n oneReg.show_list = show_list\n col.append(oneReg.create_columns())\n self.df = pd.concat(col,1)\n\n self.df.columns = [r'\\parboxc{c}{0.6cm}{('+str(int(i+1))+')}' for i in range(self.df.shape[1])]\n self.df = self.df.rename(index=self.rename_dict)\n\n self.final_show_list = show_list\n self.final_show_list = pd.Series(self.final_show_list).replace(self.rename_dict).values.tolist()\n self.tex=''\n\n def create_tex(self):\n self.update_show_list()\n\n # writing the tex modification to include name templatess\n tex = self.df.to_latex(escape=False)\n cols = tex.split('\\\\begin{tabular}{')[1].split('}')[0]\n rep = list(cols.replace('l','c'))\n rep[0] = 'l'\n tex = tex.replace(cols,''.join(rep))\n\n if len(self.col_groups)>0:\n # adding \"group col names\"\n s = '\\n '\n s_line = '\\n '\n for g in self.col_groups:\n s += '& \\multicolumn{'+str(1+g[2]-g[1])+'}{c}{\\parboxc{c}{0.6cm}{'+g[0]+'}}'\n # s += '& \\multicolumn{'+str(1+g[2]-g[1])+'}{c}{'+g[0]+'}'\n s_line += r'\\cmidrule(lr){'+str(g[1]+1)+'-'+str(g[2]+1)+'}'\n s += r' \\\\'+'\\n'\n s_line += '\\n'\n\n ts = tex.split(r'\\toprule')\n tex = ts[0]+r'\\toprule' + s +s_line+ ts[1]\n\n ts = tex.split(r'\\midrule')\n tex = ts[0]+r'\\midrule' + ts[1]\n\n # adding the skip between variable\n # first we extract the maxium length of a column on the first one\n L = 0\n for x in self.df.index:\n L = max(L,len(x))\n L+=1\n for i in range(1,len(self.final_show_list)):\n a = self.final_show_list[i]\n a += ' '*(L-len(a))+'&'\n ts = tex.split(a)\n temp = ts[0][:-4] + TableReg.variable_skip + ts[0][-4:]\n tex=temp+a+ts[1]\n\n\n # processing the group skip\n t = None\n for item in tex.split(\"\\n\"):\n if TableReg.group_key in item:\n t = item\n # replacing specific rule\n\n if t is not None:\n self.tex = tex.replace(t, TableReg.group_skip + r'\\\\')\n else:\n self.tex = tex\n\n def save_tex(self, save_dir):\n\n self.create_tex()\n tex = self.tex\n if TableReg.equal_lines:\n tex=tex.replace(r'\\toprule',r'\\hline')\n tex=tex.replace(r'\\midrule',r'\\hline')\n tex=tex.replace(r'\\bottomrule',r'\\hline')\n\n with open(save_dir,'w') as txt:\n txt.write(tex)\n\n @staticmethod\n def create_panel_of_tables(table_list, name_list, save_dir):\n numbers = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split()\n title_list = []\n for i in range(len(table_list)):\n table_list[i].create_tex()\n title_list.append('Panel '+numbers[i]+': '+name_list[i])\n\n\n tex = table_list[0].tex\n temp = r' \\multicolumn{6}{c}{\\parboxc{c}{0.7cm}{'+title_list[i]+r'}} \\\\'\n ts = tex.split(r'\\toprule')\n tex = ts[0]+r'\\toprule' +temp+r'\\hline'+ts[1]\n tex = tex.replace(r'\\bottomrule','')\n tex = tex.replace(r'\\end{tabular}',r'asf')\n tex = tex.replace('\\\\\\\\\\n\\nasf','\\\\bigskip \\\\\\\\ \\n')\n\n\n for i in range(1,len(table_list)):\n t_tex = table_list[i].tex\n temp = r' \\multicolumn{6}{c}{\\parboxc{c}{0.6cm}{' + title_list[i] + r'}} \\\\'\n ts = t_tex.split(r'\\toprule')\n t_tex = ts[0] + r'\\hline' + temp + r'\\hline' + ts[1]\n t = None\n for item in t_tex.split(\"\\n\"):\n if r'\\begin{tabular}' in item:\n t = item\n t_tex = t_tex.replace(t,'')\n if i+1 < len(table_list):\n t_tex = t_tex.replace(r'\\bottomrule','')\n t_tex = t_tex.replace(r'\\end{tabular}', r'asf')\n t_tex = t_tex.replace('\\\\\\\\\\n\\nasf', '\\\\bigskip \\\\\\\\ \\n')\n tex +=t_tex\n\n\n\n with open(save_dir,'w') as txt:\n txt.write(tex)\n\n\n\n"
] | [
[
"numpy.round",
"pandas.concat",
"pandas.Series",
"numpy.sort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
forgi86/pyMPC | [
"4b004ba707dab49cd36d96a3575b8593c870a904"
] | [
"test_scripts/main_cvxpy_simple.py"
] | [
"from cvxpy import Variable, Parameter, Minimize, Problem, OSQP, quad_form\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse as sparse\nimport time\n\n\nif __name__ == \"__main__\":\n\n # Discrete time model of a quadcopter\n Ts = 0.2\n M = 2.0\n\n Ad = sparse.csc_matrix([\n [1.0, Ts],\n [0, 1.0]\n ])\n Bd = sparse.csc_matrix([\n [0.0],\n [Ts/M]])\n\n [nx, nu] = Bd.shape # number of states and number or inputs\n\n # Constraints\n uref = 0\n uinit = 0 # not used here\n umin = np.array([-1000.0]) - uref\n umax = np.array([1000.0]) - uref\n\n xmin = np.array([-100.0, -100.0])\n xmax = np.array([100.0, 100.0])\n\n # Objective function\n Q = sparse.diags([0.2, 0.3])\n QN = sparse.diags([0.4, 0.5]) # final cost\n R = 0.1*sparse.eye(1)\n\n # Initial and reference states\n x0 = np.array([0.1, 0.2]) # initial state\n # Reference input and states\n pref = 7.0\n vref = 0\n xref = np.array([pref, vref]) # reference state\n\n # Prediction horizon\n Np = 20\n\n # Define problem\n u = Variable((nu, Np))\n x = Variable((nx, Np + 1))\n x_init = Parameter(nx)\n objective = 0\n constraints = [x[:,0] == x_init]\n for k in range(Np):\n objective += quad_form(x[:, k] - xref, Q) + quad_form(u[:, k], R)\n constraints += [x[:, k+1] == Ad*x[:, k] + Bd*u[:, k]]\n constraints += [xmin <= x[:, k], x[:, k] <= xmax]\n constraints += [umin <= u[:, k], u[:, k] <= umax]\n objective += quad_form(x[:, Np] - xref, QN)\n prob = Problem(Minimize(objective), constraints)\n\n\n # Simulate in closed loop\n # Simulate in closed loop\n len_sim = 15 # simulation length (s)\n nsim = int(len_sim/Ts) # simulation length(timesteps)\n xsim = np.zeros((nsim,nx))\n usim = np.zeros((nsim,nu))\n tsim = np.arange(0,nsim)*Ts\n\n uminus1_val = uinit # initial previous measured input is the input at time instant -1.\n time_start = time.time()\n for i in range(nsim):\n x_init.value = x0\n #uminus1.value = uminus1_val\n prob.solve(solver=OSQP, warm_start=True)\n uMPC = u[:,0].value\n usim[i,:] = uMPC\n x0 = Ad.dot(x0) + Bd.dot(uMPC)\n xsim[i,:] = x0\n\n uminus1_val = uMPC # or a measurement if the input is affected by noise\n time_sim = time.time() - time_start\n\n # In [1]\n import matplotlib.pyplot as plt\n fig,axes = plt.subplots(3,1, figsize=(10,10))\n axes[0].plot(tsim, xsim[:,0], \"k\", label='p')\n axes[0].plot(tsim, xref[0]*np.ones(np.shape(tsim)), \"r--\", label=\"pref\")\n axes[0].set_title(\"Position (m)\")\n\n axes[1].plot(tsim, xsim[:,1], label=\"v\")\n axes[1].plot(tsim, xref[1]*np.ones(np.shape(tsim)), \"r--\", label=\"vref\")\n axes[1].set_title(\"Velocity (m/s)\")\n\n axes[2].plot(tsim, usim[:,0], label=\"u\")\n axes[2].plot(tsim, uref*np.ones(np.shape(tsim)), \"r--\", label=\"uref\")\n axes[2].set_title(\"Force (N)\")\n\n\n for ax in axes:\n ax.grid(True)\n ax.legend()\n"
] | [
[
"scipy.sparse.csc_matrix",
"scipy.sparse.eye",
"numpy.arange",
"scipy.sparse.diags",
"matplotlib.pyplot.subplots",
"numpy.shape",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
SuryaThiru/ppscore | [
"59df800e32d4ef5fda4be2bdf4b3235db2a39fee"
] | [
"src/ppscore/calculation.py"
] | [
"from sklearn import tree\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import mean_absolute_error, f1_score\n\nimport pandas as pd\nfrom pandas.api.types import (\n is_numeric_dtype,\n is_bool_dtype,\n is_categorical_dtype,\n is_string_dtype,\n is_datetime64_any_dtype,\n is_timedelta64_dtype,\n)\n\n# if the number is 4, then it is possible to detect patterns when there are at least 4 times the same observation. If the limit is increased, the minimum observations also increase. This is important, because this is the limit when sklearn will throw an error which will lead to a score of 0 if we catch it\nCV_ITERATIONS = 4\n\nRANDOM_SEED = 587136\n\n# if a numeric column has less than 15 unique values, it is inferred as categoric\n# thus, the ppscore will use a classification\n# this has important implications on the ppscore\n# eg if you have 4 equal categories encoded 0, 1, 2, 3 and treat it as a regression\n# then the baseline is 1 (median) which is okayish and a predictor will have a harder time\n# to beat the baseline, thus the ppscore will be considerably lower\n# if the column is encoded as category, then the baseline will be to always predict 0\n# this baseline will be way easier to beat and thus result in a higher ppscore\nNUMERIC_AS_CATEGORIC_BREAKPOINT = 15\n\n\ndef _calculate_model_cv_score_(df, target, feature, metric, model, **kwargs):\n \"Calculates the mean model score based on cross-validation\"\n # Sources about the used methods:\n # https://scikit-learn.org/stable/modules/tree.html\n # https://scikit-learn.org/stable/modules/cross_validation.html\n # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html\n\n # shuffle the rows - this is important for crossvalidation\n # because the crossvalidation just takes the first n lines\n # if there is a strong pattern in the rows eg 0,0,0,0,1,1,1,1\n # then this will lead to problems because the first cv sees mostly 0 and the later 1\n # this approach might be wrong for timeseries because it might leak information\n df = df.sample(frac=1, random_state=RANDOM_SEED, replace=False)\n\n # preprocess target\n if df[target].dtype == object:\n le = preprocessing.LabelEncoder()\n df[target] = le.fit_transform(df[target])\n target_series = df[target]\n else:\n target_series = df[target]\n\n # preprocess feature\n if df[feature].dtype == object:\n one_hot_encoder = preprocessing.OneHotEncoder()\n sparse_matrix = one_hot_encoder.fit_transform(df[feature].values.reshape(-1, 1))\n feature_df = sparse_matrix\n else:\n # reshaping needed because there is only 1 feature\n feature_df = df[feature].values.reshape(-1, 1)\n\n # Crossvalidation is stratifiedKFold for classification, KFold for regression\n # CV on one core (n_job=1; default) has shown to be fastest\n scores = cross_val_score(\n model, feature_df, target_series, cv=CV_ITERATIONS, scoring=metric\n )\n\n return scores.mean()\n\n\ndef _normalized_mae_score(model_mae, naive_mae):\n \"Normalizes the model MAE score, given the baseline score\"\n # # Value range of MAE is [0, infinity), 0 is best\n # 10, 5 >> 0 because worse than naive\n # 10, 20 >> 0.5\n # 5, 20 >> 0.75 = 1 - (mae/base_mae)\n if model_mae > naive_mae:\n return 0\n else:\n return 1 - (model_mae / naive_mae)\n\n\ndef _mae_normalizer(df, y, model_score):\n \"In case of MAE, calculates the baseline score for y and derives the PPS.\"\n df[\"naive\"] = df[y].median()\n baseline_score = mean_absolute_error(df[y], df[\"naive\"]) # true, pred\n\n ppscore = _normalized_mae_score(abs(model_score), baseline_score)\n return ppscore, baseline_score\n\n\ndef _normalized_f1_score(model_f1, baseline_f1):\n \"Normalizes the model F1 score, given the baseline score\"\n # # F1 ranges from 0 to 1\n # # 1 is best\n # 0.5, 0.7 = 0 because worse than naive\n # 0.75, 0.5 > 0.5\n #\n if model_f1 < baseline_f1:\n return 0\n else:\n scale_range = 1.0 - baseline_f1 # eg 0.3\n f1_diff = model_f1 - baseline_f1 # eg 0.1\n return f1_diff / scale_range # 0.1/0.3 = 0.33\n\n\ndef _f1_normalizer(df, y, model_score):\n \"In case of F1, calculates the baseline score for y and derives the PPS.\"\n df[\"naive\"] = df[y].value_counts().index[0]\n baseline_score = f1_score(df[y], df[\"naive\"], average=\"weighted\")\n\n ppscore = _normalized_f1_score(model_score, baseline_score)\n return ppscore, baseline_score\n\n\nTASKS = {\n \"regression\": {\n \"metric_name\": \"mean absolute error\",\n \"metric_key\": \"neg_mean_absolute_error\",\n \"model\": tree.DecisionTreeRegressor(),\n \"score_normalizer\": _mae_normalizer,\n },\n \"classification\": {\n \"metric_name\": \"weighted F1\",\n \"metric_key\": \"f1_weighted\",\n \"model\": tree.DecisionTreeClassifier(),\n \"score_normalizer\": _f1_normalizer,\n },\n \"predict_itself\": {\n \"metric_name\": None,\n \"metric_key\": None,\n \"model\": None,\n \"score_normalizer\": None,\n },\n \"predict_constant\": {\n \"metric_name\": None,\n \"metric_key\": None,\n \"model\": None,\n \"score_normalizer\": None,\n },\n \"predict_id\": {\n \"metric_name\": None,\n \"metric_key\": None,\n \"model\": None,\n \"score_normalizer\": None,\n },\n}\n\n\ndef _infer_task(df, x, y):\n \"Returns str with the name of the inferred task based on the columns x and y\"\n if x == y:\n return \"predict_itself\"\n\n category_count = df[y].value_counts().count()\n if category_count == 1:\n return \"predict_constant\"\n if category_count == 2:\n return \"classification\"\n if category_count == len(df[y]) and (\n is_string_dtype(df[y]) or is_categorical_dtype(df[y])\n ):\n return \"predict_id\"\n if category_count <= NUMERIC_AS_CATEGORIC_BREAKPOINT and is_numeric_dtype(df[y]):\n return \"classification\"\n\n if is_bool_dtype(df[y]) or is_string_dtype(df[y]) or is_categorical_dtype(df[y]):\n return \"classification\"\n\n if is_datetime64_any_dtype(df[y]) or is_timedelta64_dtype(df[y]):\n raise Exception(\n f\"The target column {y} has the dtype {df[y].dtype} which is not supported. A possible solution might be to convert {y} to a string column\"\n )\n\n # this check needs to be after is_bool_dtype because bool is considered numeric by pandas\n if is_numeric_dtype(df[y]):\n return \"regression\"\n\n raise Exception(\n f\"Could not infer a valid task based on the target {y}. The dtype {df[y].dtype} is not yet supported\"\n ) # pragma: no cover\n\n\ndef _feature_is_id(df, x):\n \"Returns Boolean if the feature column x is an ID\"\n if not (is_string_dtype(df[x]) or is_categorical_dtype(df[x])):\n return False\n\n category_count = df[x].value_counts().count()\n return category_count == len(df[x])\n\n\ndef _maybe_sample(df, sample):\n \"\"\"\n Maybe samples the rows of the given df to have at most ``sample`` rows\n If sample is ``None`` or falsy, there will be no sampling.\n If the df has fewer rows than the sample, there will be no sampling.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe that might be sampled\n sample : int or ``None``\n Number of rows to be sampled\n\n Returns\n -------\n pandas.DataFrame\n DataFrame after potential sampling\n \"\"\"\n if sample and len(df) > sample:\n # this is a problem if x or y have more than sample=5000 categories\n # TODO: dont sample when the problem occurs and show warning\n df = df.sample(sample, random_state=RANDOM_SEED, replace=False)\n return df\n\n\ndef score(df, x, y, task=None, sample=5000):\n \"\"\"\n Calculate the Predictive Power Score (PPS) for \"x predicts y\"\n The score always ranges from 0 to 1 and is data-type agnostic.\n\n A score of 0 means that the column x cannot predict the column y better than a naive baseline model.\n A score of 1 means that the column x can perfectly predict the column y given the model.\n A score between 0 and 1 states the ratio of how much potential predictive power the model achieved compared to the baseline model.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe that contains the columns x and y\n x : str\n Name of the column x which acts as the feature\n y : str\n Name of the column y which acts as the target\n task : str, default ``None``\n Name of the prediction task, e.g. ``classification`` or ``regression``\n If the task is not specified, it is infered based on the y column\n The task determines which model and evaluation score is used for the PPS\n sample : int or ``None``\n Number of rows for sampling. The sampling decreases the calculation time of the PPS.\n If ``None`` there will be no sampling.\n\n Returns\n -------\n Dict\n A dict that contains multiple fields about the resulting PPS.\n The dict enables introspection into the calculations that have been performed under the hood\n \"\"\"\n\n if x == y:\n task_name = \"predict_itself\"\n else:\n # TODO: log.warning when values have been dropped\n df = df[[x, y]].dropna()\n if len(df) == 0:\n raise Exception(\"After dropping missing values, there are no valid rows left\")\n df = _maybe_sample(df, sample)\n\n if task is None:\n task_name = _infer_task(df, x, y)\n else:\n task_name = task\n\n task = TASKS[task_name]\n\n if task_name in [\"predict_constant\", \"predict_itself\"]:\n model_score = 1\n ppscore = 1\n baseline_score = 1\n elif task_name == \"predict_id\": # target is id\n model_score = 0\n ppscore = 0\n baseline_score = 0\n elif _feature_is_id(df, x):\n model_score = 0\n ppscore = 0\n baseline_score = 0\n else:\n model_score = _calculate_model_cv_score_(\n df, target=y, feature=x, metric=task[\"metric_key\"], model=task[\"model\"]\n )\n ppscore, baseline_score = task[\"score_normalizer\"](df, y, model_score)\n\n return {\n \"x\": x,\n \"y\": y,\n \"task\": task_name,\n \"ppscore\": ppscore,\n \"metric\": task[\"metric_name\"],\n \"baseline_score\": baseline_score,\n \"model_score\": abs(model_score), # sklearn returns negative mae\n \"model\": task[\"model\"],\n }\n\n\n# def predictors(df, y, task=None, sorted=True):\n# pass\n\n\ndef matrix(df, output=\"df\", **kwargs):\n \"\"\"\n Calculate the Predictive Power Score (PPS) matrix for all columns in the dataframe\n\n Parameters\n ----------\n df : pandas.DataFrame\n The dataframe that contains the data\n output: str - potential values: \"df\", \"dict\"\n Control the type of the output. Either return a df or a dict with all the PPS dicts arranged by the target column\n kwargs:\n Other key-word arguments that shall be forwarded to the pps.score method\n\n Returns\n -------\n pandas.DataFrame or Dict\n Either returns a df or a dict with all the PPS dicts arranged by the target column. This can be influenced by the output argument\n \"\"\"\n data = {}\n columns = list(df.columns)\n\n for target in columns:\n scores = []\n for feature in columns:\n # single_score = score(df, x=feature, y=target)[\"ppscore\"]\n try:\n single_score = score(df, x=feature, y=target, **kwargs)[\"ppscore\"]\n except:\n # TODO: log error\n single_score = 0\n scores.append(single_score)\n data[target] = scores\n\n if output == \"df\":\n matrix = pd.DataFrame.from_dict(data, orient=\"index\")\n matrix.columns = columns\n return matrix\n else: # output == \"dict\"\n return data\n"
] | [
[
"pandas.api.types.is_categorical_dtype",
"sklearn.model_selection.cross_val_score",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.metrics.mean_absolute_error",
"sklearn.preprocessing.OneHotEncoder",
"pandas.api.types.is_datetime64_any_dtype",
"pandas.api.types.is_numeric_dtype",
"sklearn.tree.DecisionTreeClassifier",
"pandas.DataFrame.from_dict",
"pandas.api.types.is_timedelta64_dtype",
"sklearn.metrics.f1_score",
"sklearn.preprocessing.LabelEncoder",
"pandas.api.types.is_bool_dtype",
"pandas.api.types.is_string_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
joakimlindblad/py_alpha_amd_release | [
"6a95286753c48e9f0c882d650158b15b58bcdd46"
] | [
"register.py"
] | [
"\n#\n# Py-Alpha-AMD Registration Framework\n# Author: Johan Ofverstedt\n# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information\n#\n# Copyright 2019 Johan Ofverstedt\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\n#\n# Registration framework\n#\n\n# Import Numpy/Scipy\nimport numpy as np\nimport scipy as sp\nimport scipy.misc\n\n# Import transforms\nfrom transforms import CompositeTransform\nfrom transforms import AffineTransform\nfrom transforms import Rigid2DTransform\nfrom transforms import Rotate2DTransform\nfrom transforms import TranslationTransform\nfrom transforms import ScalingTransform\n\n# Import distances\nfrom distances import QuantizedImage\nfrom distances import alpha_amd\nfrom distances import symmetric_amd_distance\n\n# Import optimizers\nfrom optimizers import GradientDescentOptimizer\n\n# Import generators and filters\nimport generators\nimport filters\n\n# Import misc\nimport math\nimport sys\nimport time\nimport cProfile, pstats\n\nclass Register:\n def __init__(self, dim):\n self.dim = dim\n self.sampling_fraction = 1.0\n self.step_lengths = np.array([[0.1, 1.0]])\n self.iterations = 1500\n self.alpha_levels = 7\n self.gradient_magnitude_threshold = 0.00001\n \n self.ref_im = None\n self.flo_im = None\n self.ref_mask = None\n self.flo_mask = None\n self.ref_weights = None\n self.flo_weights = None\n\n # Transforms\n self.initial_transforms = []\n self.transforms_param_scaling = []\n self.output_transforms = []\n self.values = []\n self.value_history = []\n\n # Resolution pyramid levels\n self.pyramid_factors = []\n self.pyramid_sigmas = []\n\n self.distances = []\n \n # Reporting/Output\n self.report_func = None\n self.report_freq = 25\n\n def add_initial_transform(self, transform, param_scaling=None):\n if param_scaling is None:\n param_scaling = np.ones((transforms.get_param_count(),))\n self.initial_transforms.append(transform)\n self.transforms_param_scaling.append(param_scaling)\n \n def add_initial_transforms(self, transforms, param_scaling=None):\n for i, t in enumerate(transforms):\n if param_scaling is None:\n pscaling = np.ones((transforms.get_param_count(),))\n else:\n pscaling = param_scaling[i]\n self.add_initial_transform(t, pscaling)\n \n def clear_transforms(self):\n self.initial_transforms = []\n self.output_transforms = []\n self.transforms_param_scaling = []\n self.values = []\n self.value_history = []\n \n def get_output(self, index):\n return self.output_transforms[index], self.values[index]\n\n def get_value_history(self, index, level):\n return self.value_history[index][level]\n\n def add_pyramid_level(self, factor, sigma):\n self.pyramid_factors.append(factor)\n self.pyramid_sigmas.append(sigma)\n \n def add_pyramid_levels(self, factors, sigmas):\n for i in range(len(factors)):\n self.add_pyramid_level(factors[i], sigmas[i])\n\n def get_pyramid_level_count(self):\n return len(self.pyramid_factors)\n\n def set_sampling_fraction(self, sampling_fraction):\n self.sampling_fraction = sampling_fraction\n \n def set_iterations(self, iterations):\n self.iterations = iterations\n\n def set_alpha_levels(self, alpha_levels):\n self.alpha_levels = alpha_levels\n \n def set_step_lengths(self, step_lengths):\n self.step_lengths = np.array(step_lengths)#np.array([start_step_length, end_step_length])\n \n def set_reference_image(self, image, spacing = None):\n self.ref_im = image\n if spacing is None:\n self.ref_spacing = np.ones(image.ndim)\n else:\n self.ref_spacing = spacing\n \n def set_floating_image(self, image, spacing = None):\n self.flo_im = image\n if spacing is None:\n self.flo_spacing = np.ones(image.ndim)\n else:\n self.flo_spacing = spacing\n\n def set_reference_mask(self, mask):\n self.ref_mask = mask\n\n def set_floating_mask(self, mask):\n self.flo_mask = mask\n\n def set_reference_weights(self, weights):\n self.ref_weights = weights\n\n def set_floating_weights(self, weights):\n self.flo_weights = weights\n\n def set_gradient_magnitude_threshold(self, t):\n self.gradient_magnitude_threshold = t\n\n def set_report_freq(self, freq):\n self.report_freq = freq\n \n def set_report_func(self, func):\n self.report_func = func\n \n def initialize(self, pyramid_images_output_path=None):\n if len(self.pyramid_factors) == 0:\n self.add_pyramid_level(1, 0.0)\n if len(self.initial_transforms) == 0:\n self.add_initial_transform(AffineTransform(self.dim))\n \n ### Preprocessing\n\n pyramid_levels = len(self.pyramid_factors)\n\n for i in range(pyramid_levels):\n factor = self.pyramid_factors[i]\n\n ref_resampled = filters.downsample(filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]), factor)\n flo_resampled = filters.downsample(filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]), factor)\n \n ref_mask_resampled = filters.downsample(self.ref_mask, factor)\n flo_mask_resampled = filters.downsample(self.flo_mask, factor)\n\n ref_resampled = filters.normalize(ref_resampled, 0.0, ref_mask_resampled)\n flo_resampled = filters.normalize(flo_resampled, 0.0, flo_mask_resampled)\n\n if pyramid_images_output_path is not None and ref_resampled.ndim == 2:\n scipy.misc.imsave('%sref_resampled_%d.png' % (pyramid_images_output_path, i+1), ref_resampled)\n scipy.misc.imsave('%sflo_resampled_%d.png' % (pyramid_images_output_path, i+1), flo_resampled)\n \n if self.ref_weights is None:\n ref_weights = np.zeros(ref_resampled.shape)\n ref_weights[ref_mask_resampled] = 1.0\n else:\n ref_weights = filters.downsample(self.ref_weights, factor)\n if self.flo_weights is None:\n flo_weights = np.zeros(flo_resampled.shape)\n flo_weights[flo_mask_resampled] = 1.0\n else:\n flo_weights = filters.downsample(self.flo_weights, factor)\n\n ref_diag = np.sqrt(np.square(np.array(ref_resampled.shape)*self.ref_spacing).sum())\n flo_diag = np.sqrt(np.square(np.array(flo_resampled.shape)*self.flo_spacing).sum())\n\n q_ref = QuantizedImage(ref_resampled, self.alpha_levels, ref_weights, self.ref_spacing*factor, remove_zero_weight_pnts = True)\n q_flo = QuantizedImage(flo_resampled, self.alpha_levels, flo_weights, self.flo_spacing*factor, remove_zero_weight_pnts = True)\n\n tf_ref = alpha_amd.AlphaAMD(q_ref, self.alpha_levels, ref_diag, self.ref_spacing*factor, ref_mask_resampled, ref_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)\n tf_flo = alpha_amd.AlphaAMD(q_flo, self.alpha_levels, flo_diag, self.flo_spacing*factor, flo_mask_resampled, flo_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)\n\n symmetric_measure = True\n squared_measure = False\n\n sym_dist = symmetric_amd_distance.SymmetricAMDDistance(symmetric_measure=symmetric_measure, squared_measure=squared_measure)\n\n sym_dist.set_ref_image_source(q_ref)\n sym_dist.set_ref_image_target(tf_ref)\n\n sym_dist.set_flo_image_source(q_flo)\n sym_dist.set_flo_image_target(tf_flo)\n\n sym_dist.set_sampling_fraction(self.sampling_fraction)\n\n sym_dist.initialize()\n\n self.distances.append(sym_dist)\n\n def run(self):\n pyramid_level_count = len(self.pyramid_factors)\n transform_count = len(self.initial_transforms)\n\n for t_it in range(transform_count):\n init_transform = self.initial_transforms[t_it]\n param_scaling = self.transforms_param_scaling[t_it]\n\n self.value_history.append([])\n\n for lvl_it in range(pyramid_level_count):\n \n opt = GradientDescentOptimizer(self.distances[lvl_it], init_transform.copy())\n\n if self.step_lengths.ndim == 1:\n opt.set_step_length(self.step_lengths[0], self.step_lengths[1])\n else:\n opt.set_step_length(self.step_lengths[lvl_it, 0], self.step_lengths[lvl_it, 1])\n opt.set_scalings(param_scaling)\n opt.set_gradient_magnitude_threshold(self.gradient_magnitude_threshold)\n opt.set_report_freq(self.report_freq)\n if type(self.report_func) is list or type(self.report_func) is tuple:\n opt.set_report_callback(self.report_func[t_it])\n else:\n opt.set_report_callback(self.report_func)\n\n if isinstance(self.iterations, int):\n itercount = self.iterations\n else:\n assert(len(self.iterations) == pyramid_level_count)\n itercount = self.iterations[lvl_it]\n \n opt.optimize(itercount)\n\n if lvl_it + 1 == pyramid_level_count:\n self.output_transforms.append(opt.get_transform())\n self.values.append(opt.get_value())\n self.initial_transforms[t_it] = opt.get_transform()\n else:\n init_transform = opt.get_transform()\n\n self.value_history[-1].append(opt.get_value_history())\n\n \n \n "
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DeepPSP/torch_ecg | [
"6db5ffb063d0e8fb4ce97029a0d184a658f43a37",
"6db5ffb063d0e8fb4ce97029a0d184a658f43a37"
] | [
"torch_ecg/models/cnn/multi_scopic.py",
"torch_ecg/databases/physionet_databases/cinc2020.py"
] | [
"\"\"\"\nThe core part of the SOTA model of CPSC2019,\nbranched, and has different scope (in terms of dilation) in each branch\n\"\"\"\nfrom copy import deepcopy\nfrom itertools import repeat\nfrom collections import OrderedDict\nfrom typing import Union, Optional, Sequence, NoReturn\n\nimport numpy as np\nnp.set_printoptions(precision=5, suppress=True)\nimport torch\nfrom torch import nn\nfrom torch import Tensor\n\nfrom ...cfg import CFG, DEFAULTS\nfrom ...utils.utils_nn import compute_module_size, SizeMixin\nfrom ...utils.misc import dict_to_str\nfrom ...models._nets import (\n Conv_Bn_Activation,\n DownSample,\n NonLocalBlock, SEBlock, GlobalContextBlock,\n)\n\n\nif DEFAULTS.torch_dtype == torch.float64:\n torch.set_default_tensor_type(torch.DoubleTensor)\n\n\n__all__ = [\n \"MultiScopicCNN\",\n \"MultiScopicBasicBlock\",\n \"MultiScopicBranch\",\n]\n\n\nclass MultiScopicBasicBlock(SizeMixin, nn.Sequential):\n \"\"\" finished, checked,\n\n basic building block of the CNN part of the SOTA model\n from CPSC2019 challenge (entry 0416)\n\n (conv -> activation) * N --> bn --> down_sample\n \"\"\"\n __DEBUG__ = False\n __name__ = \"MultiScopicBasicBlock\"\n\n def __init__(self,\n in_channels:int,\n scopes:Sequence[int],\n num_filters:Union[int,Sequence[int]],\n filter_lengths:Union[int,Sequence[int]],\n subsample_length:int,\n groups:int=1,\n **config) -> NoReturn:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n in_channels: int,\n number of channels in the input\n scopes: sequence of int,\n scopes of the convolutional layers, via `dilation`\n num_filters: int or sequence of int,\n number of filters of the convolutional layer(s)\n filter_lengths: int or sequence of int,\n filter length(s) (kernel size(s)) of the convolutional layer(s)\n subsample_length: int,\n subsample length (ratio) at the last layer of the block\n \"\"\"\n super().__init__()\n self.__in_channels = in_channels\n self.__scopes = scopes\n self.__num_convs = len(self.__scopes)\n if isinstance(num_filters, int):\n self.__out_channels = list(repeat(num_filters, self.__num_convs))\n else:\n self.__out_channels = num_filters\n assert len(self.__out_channels) == self.__num_convs, \\\n f\"`scopes` indicates {self.__num_convs} convolutional layers, while `num_filters` indicates {len(self.__out_channels)}\"\n if isinstance(filter_lengths, int):\n self.__filter_lengths = list(repeat(filter_lengths, self.__num_convs))\n else:\n self.__filter_lengths = filter_lengths\n assert len(self.__filter_lengths) == self.__num_convs, \\\n f\"`scopes` indicates {self.__num_convs} convolutional layers, while `filter_lengths` indicates {len(self.__filter_lengths)}\"\n self.__subsample_length = subsample_length\n self.__groups = groups\n self.config = CFG(deepcopy(config))\n\n conv_in_channels = self.__in_channels\n for idx in range(self.__num_convs):\n self.add_module(\n f\"ca_{idx}\",\n Conv_Bn_Activation(\n in_channels=conv_in_channels,\n out_channels=self.__out_channels[idx],\n kernel_size=self.__filter_lengths[idx],\n stride=1,\n dilation=self.__scopes[idx],\n groups=self.__groups,\n batch_norm=self.config.batch_norm,\n # kw_bn=self.config.kw_bn,\n activation=self.config.activation,\n kw_activation=self.config.kw_activation,\n kernel_initializer=self.config.kernel_initializer,\n kw_initializer=self.config.kw_initializer,\n bias=self.config.bias,\n )\n )\n conv_in_channels = self.__out_channels[idx]\n self.add_module(\n \"bn\",\n nn.BatchNorm1d(self.__out_channels[-1])\n )\n self.add_module(\n \"down\",\n DownSample(\n down_scale=self.__subsample_length,\n in_channels=self.__out_channels[-1],\n groups=self.__groups,\n # padding=\n batch_norm=False,\n mode=self.config.subsample_mode,\n )\n )\n if self.config.dropout > 0:\n self.add_module(\n \"dropout\",\n nn.Dropout(self.config.dropout, inplace=False)\n )\n\n def forward(self, input:Tensor) -> Tensor:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n input: Tensor,\n of shape (batch_size, n_channels, seq_len)\n\n Returns\n -------\n output: Tensor,\n of shape (batch_size, n_channels, seq_len)\n \"\"\"\n output = super().forward(input)\n return output\n\n def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n seq_len: int,\n length of the 1d sequence\n batch_size: int, optional,\n the batch size, can be None\n\n Returns\n -------\n output_shape: sequence,\n the output shape of this block, given `seq_len` and `batch_size`\n \"\"\"\n _seq_len = seq_len\n for idx, module in enumerate(self):\n if idx == self.__num_convs: # bn layer\n continue\n elif self.config.dropout > 0 and idx == len(self)-1: # dropout layer\n continue\n output_shape = module.compute_output_shape(_seq_len, batch_size)\n _, _, _seq_len = output_shape\n return output_shape\n\n\nclass MultiScopicBranch(SizeMixin, nn.Sequential):\n \"\"\" finished, checked,\n \n branch path of the CNN part of the SOTA model\n from CPSC2019 challenge (entry 0416)\n \"\"\"\n __DEBUG__ = False\n __name__ = \"MultiScopicBranch\"\n\n def __init__(self,\n in_channels:int,\n scopes:Sequence[Sequence[int]],\n num_filters:Union[Sequence[int],Sequence[Sequence[int]]],\n filter_lengths:Union[Sequence[int],Sequence[Sequence[int]]],\n subsample_lengths:Union[int,Sequence[int]],\n groups:int=1,\n **config) -> NoReturn:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n in_channels: int,\n number of features (channels) of the input\n scopes: sequence of sequences of int,\n scopes (in terms of `dilation`) for the convolutional layers,\n each sequence of int is for one branch\n num_filters: sequence of int, or sequence of sequences of int,\n number of filters for the convolutional layers,\n if is sequence of int,\n then convolutionaly layers in one branch will have the same number of filters\n filter_lengths: sequence of int, or sequence of sequences of int,\n filter length (kernel size) of the convolutional layers,\n if is sequence of int,\n then convolutionaly layers in one branch will have the same filter length\n subsample_lengths: int, or sequence of int,\n subsample length (stride) of the convolutional layers,\n if is sequence of int,\n then convolutionaly layers in one branch will have the same subsample length\n groups: int, default 1,\n connection pattern (of channels) of the inputs and outputs\n config: dict,\n other hyper-parameters, including\n dropout, activation choices, weight initializer, etc.\n \"\"\"\n super().__init__()\n self.__in_channels = in_channels\n self.__scopes = scopes\n self.__num_blocks = len(self.__scopes)\n self.__num_filters = num_filters\n assert len(self.__num_filters) == self.__num_blocks, \\\n f\"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `num_filters` indicates {len(self.__num_filters)}\"\n self.__filter_lengths = filter_lengths\n assert len(self.__filter_lengths) == self.__num_blocks, \\\n f\"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `filter_lengths` indicates {len(self.__filter_lengths)}\"\n if isinstance(subsample_lengths, int):\n self.__subsample_lengths = list(repeat(subsample_lengths, self.__num_blocks))\n else:\n self.__subsample_lengths = filter_lengths\n assert len(self.__subsample_lengths) == self.__num_blocks, \\\n f\"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `subsample_lengths` indicates {len(self.__subsample_lengths)}\"\n self.__groups = groups\n self.config = CFG(deepcopy(config))\n\n block_in_channels = self.__in_channels\n for idx in range(self.__num_blocks):\n self.add_module(\n f\"block_{idx}\",\n MultiScopicBasicBlock(\n in_channels=block_in_channels,\n scopes=self.__scopes[idx],\n num_filters=self.__num_filters[idx],\n filter_lengths=self.__filter_lengths[idx],\n subsample_length=self.__subsample_lengths[idx],\n groups=self.__groups,\n dropout=self.config.dropouts[idx],\n **(self.config.block)\n )\n )\n block_in_channels = self.__num_filters[idx]\n\n def forward(self, input:Tensor) -> Tensor:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n input: Tensor,\n of shape (batch_size, n_channels, seq_len)\n\n Returns\n -------\n output: Tensor,\n of shape (batch_size, n_channels, seq_len)\n \"\"\"\n output = super().forward(input)\n return output\n\n def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n seq_len: int,\n length of the 1d sequence\n batch_size: int, optional,\n the batch size, can be None\n\n Returns\n -------\n output_shape: sequence,\n the output shape of this block, given `seq_len` and `batch_size`\n \"\"\"\n _seq_len = seq_len\n for idx, module in enumerate(self):\n output_shape = module.compute_output_shape(_seq_len, batch_size)\n _, _, _seq_len = output_shape\n return output_shape\n\n\nclass MultiScopicCNN(SizeMixin, nn.Module):\n \"\"\" finished, checked,\n\n CNN part of the SOTA model from CPSC2019 challenge (entry 0416)\n \"\"\"\n __DEBUG__ = False\n __name__ = \"MultiScopicCNN\"\n\n def __init__(self, in_channels:int, **config) -> NoReturn:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n in_channels: int,\n number of channels in the input\n config: dict,\n other hyper-parameters of the Module, ref. corresponding config file\n key word arguments that have to be set:\n scopes: sequence of sequences of sequences of int,\n scopes (in terms of dilation) of each convolution\n num_filters: sequence of sequences (of int or of sequences of int),\n number of filters of the convolutional layers,\n with granularity to each block of each branch,\n or to each convolution of each block of each branch\n filter_lengths: sequence of sequences (of int or of sequences of int),\n filter length(s) (kernel size(s)) of the convolutions,\n with granularity to each block of each branch,\n or to each convolution of each block of each branch\n subsample_lengths: sequence of int or sequence of sequences of int,\n subsampling length(s) (ratio(s)) of all blocks,\n with granularity to each branch or to each block of each branch,\n each subsamples after the last convolution of each block\n dropouts: sequence of int or sequence of sequences of int,\n dropout rates of all blocks,\n with granularity to each branch or to each block of each branch,\n each dropouts at the last of each block\n groups: int,\n connection pattern (of channels) of the inputs and outputs\n block: dict,\n other parameters that can be set for the building blocks\n for a full list of configurable parameters, ref. corr. config file\n \"\"\"\n super().__init__()\n self.__in_channels = in_channels\n self.config = CFG(deepcopy(config))\n self.__scopes = self.config.scopes\n self.__num_branches = len(self.__scopes)\n\n if self.__DEBUG__:\n print(f\"configuration of {self.__name__} is as follows\\n{dict_to_str(self.config)}\")\n\n self.branches = nn.ModuleDict()\n for idx in range(self.__num_branches):\n self.branches[f\"branch_{idx}\"] = \\\n MultiScopicBranch(\n in_channels=self.__in_channels,\n scopes=self.__scopes[idx],\n num_filters=self.config.num_filters[idx],\n filter_lengths=self.config.filter_lengths[idx],\n subsample_lengths=self.config.subsample_lengths[idx],\n groups=self.config.groups,\n dropouts=self.config.dropouts[idx],\n block=self.config.block, # a dict\n )\n\n def forward(self, input:Tensor) -> Tensor:\n \"\"\" finished, checked,\n \n Parameters\n ----------\n input: Tensor,\n of shape (batch_size, n_channels, seq_len)\n\n Returns\n -------\n output: Tensor,\n of shape (batch_size, n_channels, seq_len)\n \"\"\"\n branch_out = OrderedDict()\n for idx in range(self.__num_branches):\n key = f\"branch_{idx}\"\n branch_out[key] = self.branches[key].forward(input)\n output = torch.cat(\n [branch_out[f\"branch_{idx}\"] for idx in range(self.__num_branches)],\n dim=1, # along channels\n )\n return output\n \n def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n seq_len: int,\n length of the 1d sequence\n batch_size: int, optional,\n the batch size, can be None\n\n Returns\n -------\n output_shape: sequence,\n the output shape of this block, given `seq_len` and `batch_size`\n \"\"\"\n out_channels = 0\n for idx in range(self.__num_branches):\n key = f\"branch_{idx}\"\n _, _branch_oc, _seq_len = \\\n self.branches[key].compute_output_shape(seq_len, batch_size)\n out_channels += _branch_oc\n output_shape = (batch_size, out_channels, _seq_len)\n return output_shape\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nimport os, io, sys\nimport re\nimport json\nimport time\n# import pprint\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom typing import Union, Optional, Any, List, Dict, Tuple, Set, Sequence, NoReturn\nfrom numbers import Real, Number\n\nimport numpy as np\nnp.set_printoptions(precision=5, suppress=True)\nimport pandas as pd\nimport wfdb\nfrom scipy.io import loadmat\nfrom scipy.signal import resample, resample_poly\n\nfrom ...cfg import CFG\nfrom ...utils.misc import (\n get_record_list_recursive,\n get_record_list_recursive3,\n ms2samples, dict_to_str,\n ensure_siglen,\n)\nfrom ...utils import ecg_arrhythmia_knowledge as EAK\nfrom ..aux_data.cinc2020_aux_data import (\n dx_mapping_all, dx_mapping_scored, dx_mapping_unscored,\n normalize_class, abbr_to_snomed_ct_code,\n df_weights_abbr,\n equiv_class_dict,\n)\nfrom ..base import PhysioNetDataBase, DEFAULT_FIG_SIZE_PER_SEC\n\n\n__all__ = [\n \"CINC2020\",\n \"compute_metrics\",\n \"compute_all_metrics\",\n]\n\n\n# configurations for visualization\nPlotCfg = CFG()\n# default const for the plot function in dataset.py\n# used only when corr. values are absent\n# all values are time bias w.r.t. corr. peaks, with units in ms\nPlotCfg.p_onset = -40\nPlotCfg.p_offset = 40\nPlotCfg.q_onset = -20\nPlotCfg.s_offset = 40\nPlotCfg.qrs_radius = 60\nPlotCfg.t_onset = -100\nPlotCfg.t_offset = 60\n\n\nclass CINC2020(PhysioNetDataBase):\n \"\"\" finished, under improving,\n\n Classification of 12-lead ECGs: the PhysioNet/Computing in Cardiology Challenge 2020\n\n ABOUT CINC2020\n --------------\n 0. There are 6 difference tranches of training data, listed as follows:\n A. 6,877\n recordings from China Physiological Signal Challenge in 2018 (CPSC2018): PhysioNetChallenge2020_Training_CPSC.tar.gz in ref. [6]\n B. 3,453 recordings\n from China 12-Lead ECG Challenge Database (unused data from CPSC2018 and NOT the CPSC2018 test data): PhysioNetChallenge2020_Training_2.tar.gz in ref. [6]\n C. 74 recordings\n from the St Petersburg INCART 12-lead Arrhythmia Database: PhysioNetChallenge2020_Training_StPetersburg.tar.gz in ref. [6]\n D. 516 recordings\n from the PTB Diagnostic ECG Database: PhysioNetChallenge2020_Training_PTB.tar.gz in ref. [6]\n E. 21,837 recordings\n from the PTB-XL electrocardiography Database: PhysioNetChallenge2020_PTB-XL.tar.gz in ref. [6]\n F. 10,344 recordings\n from a Georgia 12-Lead ECG Challenge Database: PhysioNetChallenge2020_Training_E.tar.gz in ref. [6]\n In total, 43,101 labeled recordings of 12-lead ECGs from four countries (China, Germany, Russia, and the USA) across 3 continents have been posted publicly for this Challenge, with approximately the same number hidden for testing, representing the largest public collection of 12-lead ECGs\n\n 1. the A tranche training data comes from CPSC2018, whose folder name is `Training_WFDB`. The B tranche training data are unused training data of CPSC2018, having folder name `Training_2`. For these 2 tranches, ref. the docstring of `database_reader.cpsc_databases.cpsc2018.CPSC2018`\n 2. C. D. E. tranches of training data all come from corresponding PhysioNet dataset, whose details can be found in corresponding files:\n C: database_reader.physionet_databases.incartdb.INCARTDB\n D: database_reader.physionet_databases.ptbdb.PTBDB\n E: database_reader.physionet_databases.ptb_xl.PTB_XL\n the C tranche has folder name `Training_StPetersburg`, the D tranche has folder name `Training_PTB`, the F tranche has folder name `WFDB`\n 3. the F tranche is entirely new, posted for this Challenge, and represents a unique demographic of the Southeastern United States. It has folder name `Training_E/WFDB`.\n 4. only a part of diagnosis_abbr (diseases that appear in the labels of the 6 tranches of training data) are used in the scoring function (ref. `dx_mapping_scored_cinc2020`), while others are ignored (ref. `dx_mapping_unscored_cinc2020`). The scored diagnoses were chosen based on prevalence of the diagnoses in the training data, the severity of the diagnoses, and the ability to determine the diagnoses from ECG recordings. The ignored diagnosis_abbr can be put in a a \"non-class\" group.\n 5. the (updated) scoring function has a scoring matrix with nonzero off-diagonal elements. This scoring function reflects the clinical reality that some misdiagnoses are more harmful than others and should be scored accordingly. Moreover, it reflects the fact that confusing some classes is much less harmful than confusing other classes.\n\n 6. sampling frequencies:\n A. (CPSC2018): 500 Hz\n B. (CPSC2018-2): 500 Hz\n C. (INCART): 257 Hz\n D. (PTB): 1000 Hz\n E. (PTB-XL): 500 Hz\n F. (Georgia): 500 Hz\n 7. all data are recorded in the leads ordering of\n [\"I\", \"II\", \"III\", \"aVR\", \"aVL\", \"aVF\", \"V1\", \"V2\", \"V3\", \"V4\", \"V5\", \"V6\"]\n using for example the following code:\n >>> db_dir = \"/media/cfs/wenhao71/data/cinc2020_data/\"\n >>> working_dir = \"./working_dir\"\n >>> dr = CINC2020Reader(db_dir=db_dir,working_dir=working_dir)\n >>> set_leads = []\n >>> for tranche, l_rec in dr.all_records.items():\n ... for rec in l_rec:\n ... ann = dr.load_ann(rec)\n ... leads = ann[\"df_leads\"][\"lead_name\"].values.tolist()\n ... if leads not in set_leads:\n ... set_leads.append(leads)\n\n NOTE\n ----\n 1. The datasets have been roughly processed to have a uniform format, hence differ from their original resource (e.g. differe in sampling frequency, sample duration, etc.)\n 2. The original datasets might have richer metadata (especially those from PhysioNet), which can be fetched from corresponding reader's docstring or website of the original source\n 3. Each sub-dataset might have its own organizing scheme of data, which should be carefully dealt with\n 4. There are few \"absolute\" diagnoses in 12 lead ECGs, where large discrepancies in the interpretation of the ECG can be found even inspected by experts. There is inevitably something lost in translation, especially when you do not have the context. This doesn\"t mean making an algorithm isn't important\n 5. The labels are noisy, which one has to deal with in all real world data\n 6. each line of the following classes are considered the same (in the scoring matrix):\n - RBBB, CRBBB (NOT including IRBBB)\n - PAC, SVPB\n - PVC, VPB\n 7. unfortunately, the newly added tranches (C - F) have baseline drift and are much noisier. In contrast, CPSC data have had baseline removed and have higher SNR\n 8. on Aug. 1, 2020, adc gain (including \"resolution\", \"ADC\"? in .hea files) of datasets INCART, PTB, and PTB-xl (tranches C, D, E) are corrected. After correction, (the .tar files of) the 3 datasets are all put in a \"WFDB\" subfolder. In order to keep the structures consistant, they are moved into \"Training_StPetersburg\", \"Training_PTB\", \"WFDB\" as previously. Using the following code, one can check the adc_gain and baselines of each tranche:\n >>> db_dir = \"/media/cfs/wenhao71/data/cinc2020_data/\"\n >>> working_dir = \"./working_dir\"\n >>> dr = CINC2020(db_dir=db_dir,working_dir=working_dir)\n >>> resolution = {tranche: set() for tranche in \"ABCDEF\"}\n >>> baseline = {tranche: set() for tranche in \"ABCDEF\"}\n >>> for tranche, l_rec in dr.all_records.items():\n ... for rec in l_rec:\n ... ann = dr.load_ann(rec)\n ... resolution[tranche] = resolution[tranche].union(set(ann[\"df_leads\"][\"adc_gain\"]))\n ... baseline[tranche] = baseline[tranche].union(set(ann[\"df_leads\"][\"baseline\"]))\n >>> print(resolution, baseline)\n {\"A\": {1000.0}, \"B\": {1000.0}, \"C\": {1000.0}, \"D\": {1000.0}, \"E\": {1000.0}, \"F\": {1000.0}} {\"A\": {0}, \"B\": {0}, \"C\": {0}, \"D\": {0}, \"E\": {0}, \"F\": {0}}\n 9. the .mat files all contain digital signals, which has to be converted to physical values using adc gain, basesline, etc. in corresponding .hea files. `wfdb.rdrecord` has already done this conversion, hence greatly simplifies the data loading process.\n NOTE that there\"s a difference when using `wfdb.rdrecord`: data from `loadmat` are in \"channel_first\" format, while `wfdb.rdrecord.p_signal` produces data in the \"channel_last\" format\n 10. there\"re 3 equivalent (2 classes are equivalent if the corr. value in the scoring matrix is 1):\n (RBBB, CRBBB), (PAC, SVPB), (PVC, VPB)\n 11. in the newly (Feb., 2021) created dataset (ref. [7]), header files of each subset were gathered into one separate compressed file. This is due to the fact that updates on the dataset are almost always done in the header files. The correct usage of ref. [7], after uncompressing, is replacing the header files in the folder `All_training_WFDB` by header files from the 6 folders containing all header files from the 6 subsets.\n\n ISSUES\n ------\n 1. reading the .hea files, baselines of all records are 0, however it is not the case if one plot the signal\n 2. about half of the LAD records satisfy the \"2-lead\" criteria, but fail for the \"3-lead\" criteria, which means that their axis is (-30°, 0°) which is not truely LAD\n 3. (Aug. 15, 2020; resolved, and changed to 1000) tranche F, the Georgia subset, has ADC gain 4880 which might be too high. Thus obtained voltages are too low. 1000 might be a suitable (correct) value of ADC gain for this tranche just as the other tranches.\n 4. \"E04603\" (all leads), \"E06072\" (chest leads, epecially V1-V3), \"E06909\" (lead V2), \"E07675\" (lead V3), \"E07941\" (lead V6), \"E08321\" (lead V6) has exceptionally large values at rpeaks, reading (`load_data`) these two records using `wfdb` would bring in `nan` values. One can check using the following code\n >>> rec = \"E04603\"\n >>> dr.plot(rec, dr.load_data(rec, backend=\"scipy\", units=\"uv\")) # currently raising error\n\n Usage\n -----\n 1. ECG arrhythmia detection\n\n References\n ----------\n [1] https://physionetchallenges.github.io/2020/\n [2] http://2018.icbeb.org/#\n [3] https://physionet.org/content/incartdb/1.0.0/\n [4] https://physionet.org/content/ptbdb/1.0.0/\n [5] https://physionet.org/content/ptb-xl/1.0.1/\n [6] (deprecated) https://storage.cloud.google.com/physionet-challenge-2020-12-lead-ecg-public/\n [7] (recommended) https://storage.cloud.google.com/physionetchallenge2021-public-datasets/\n \"\"\"\n\n def __init__(self,\n db_dir:str,\n working_dir:Optional[str]=None,\n verbose:int=2,\n **kwargs:Any) -> NoReturn:\n \"\"\"\n\n Parameters\n ----------\n db_dir: str,\n storage path of the database\n working_dir: str, optional,\n working directory, to store intermediate files and log file\n verbose: int, default 2,\n log verbosity\n kwargs: auxilliary key word arguments\n \"\"\"\n super().__init__(db_name=\"CINC2020\", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)\n \n self.rec_ext = \"mat\"\n self.ann_ext = \"hea\"\n\n self.db_tranches = list(\"ABCDEF\")\n self.tranche_names = CFG({\n \"A\": \"CPSC\",\n \"B\": \"CPSC-Extra\",\n \"C\": \"StPetersburg\",\n \"D\": \"PTB\",\n \"E\": \"PTB-XL\",\n \"F\": \"Georgia\",\n })\n self.rec_prefix = CFG({\n \"A\": \"A\", \"B\": \"Q\", \"C\": \"I\", \"D\": \"S\", \"E\": \"HR\", \"F\": \"E\",\n })\n\n self.db_dir_base = db_dir\n self.db_dirs = CFG({tranche:\"\" for tranche in self.db_tranches})\n self._all_records = None\n self._ls_rec() # loads file system structures into self.db_dirs and self._all_records\n\n self._diagnoses_records_list = None\n self._ls_diagnoses_records()\n\n self.fs = {\n \"A\": 500, \"B\": 500, \"C\": 257, \"D\": 1000, \"E\": 500, \"F\": 500,\n }\n self.spacing = {t: 1000 / f for t,f in self.fs.items()}\n\n self.all_leads = deepcopy(EAK.Standard12Leads)\n self._all_leads_set = set(self.all_leads)\n\n self.df_ecg_arrhythmia = dx_mapping_all[[\"Dx\",\"SNOMED CT Code\",\"Abbreviation\"]]\n self.ann_items = [\n \"rec_name\", \"nb_leads\",\"fs\",\"nb_samples\",\"datetime\",\"age\",\"sex\",\n \"diagnosis\",\"df_leads\",\n \"medical_prescription\",\"history\",\"symptom_or_surgery\",\n ]\n self.label_trans_dict = equiv_class_dict.copy()\n\n # self.value_correction_factor = CFG({tranche:1 for tranche in self.db_tranches})\n # self.value_correction_factor.F = 4.88 # ref. ISSUES 3\n\n self.exceptional_records = [\"E04603\", \"E06072\", \"E06909\", \"E07675\", \"E07941\", \"E08321\"] # ref. ISSUES 4\n\n def get_subject_id(self, rec:str) -> int:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n rec: str,\n name of the record\n\n Returns\n -------\n sid: int,\n the `subject_id` corr. to `rec`\n \"\"\"\n s2d = {\"A\":\"11\", \"B\":\"12\", \"C\":\"21\", \"D\":\"31\", \"E\":\"32\", \"F\":\"41\"}\n s2d = {self.rec_prefix[k]:v for k,v in s2d.items()}\n prefix = \"\".join(re.findall(r\"[A-Z]\", rec))\n n = rec.replace(prefix,\"\")\n sid = int(f\"{s2d[prefix]}{'0'*(8-len(n))}{n}\")\n return sid\n\n def _ls_rec(self) -> NoReturn:\n \"\"\" finished, checked,\n\n list all the records and load into `self._all_records`,\n facilitating further uses\n \"\"\"\n fn = \"record_list.json\"\n record_list_fp = os.path.join(self.db_dir_base, fn)\n if os.path.isfile(record_list_fp):\n with open(record_list_fp, \"r\") as f:\n self._all_records = {k:v for k,v in json.load(f).items() if k in self.tranche_names}\n for tranche in self.db_tranches:\n self.db_dirs[tranche] = os.path.join(self.db_dir_base, os.path.dirname(self._all_records[tranche][0]))\n self._all_records[tranche] = [os.path.basename(f) for f in self._all_records[tranche]]\n else:\n print(\"Please wait patiently to let the reader find all records of all the tranches...\")\n start = time.time()\n rec_patterns_with_ext = {\n tranche: f\"^{self.rec_prefix[tranche]}(?:\\d+).{self.rec_ext}$\" \\\n for tranche in self.db_tranches\n }\n self._all_records = \\\n get_record_list_recursive3(self.db_dir_base, rec_patterns_with_ext)\n to_save = deepcopy(self._all_records)\n for tranche in self.db_tranches:\n tmp_dirname = [ os.path.dirname(f) for f in self._all_records[tranche] ]\n if len(set(tmp_dirname)) != 1:\n if len(set(tmp_dirname)) > 1:\n raise ValueError(f\"records of tranche {tranche} are stored in several folders!\")\n else:\n raise ValueError(f\"no record found for tranche {tranche}!\")\n self.db_dirs[tranche] = os.path.join(self.db_dir_base, tmp_dirname[0])\n self._all_records[tranche] = [os.path.basename(f) for f in self._all_records[tranche]]\n print(f\"Done in {time.time() - start:.5f} seconds!\")\n with open(os.path.join(self.db_dir_base, fn), \"w\") as f:\n json.dump(to_save, f)\n\n def _ls_diagnoses_records(self) -> NoReturn:\n \"\"\" finished, checked,\n\n list all the records for all diagnoses\n \"\"\"\n fn = \"diagnoses_records_list.json\"\n dr_fp = os.path.join(self.db_dir_base, fn)\n if os.path.isfile(dr_fp):\n with open(dr_fp, \"r\") as f:\n self._diagnoses_records_list = json.load(f)\n else:\n print(\"Please wait several minutes patiently to let the reader list records for each diagnosis...\")\n start = time.time()\n self._diagnoses_records_list = {d: [] for d in df_weights_abbr.columns.values.tolist()}\n for tranche, l_rec in self._all_records.items():\n for rec in l_rec:\n ann = self.load_ann(rec)\n ld = ann[\"diagnosis_scored\"][\"diagnosis_abbr\"]\n for d in ld:\n self._diagnoses_records_list[d].append(rec)\n print(f\"Done in {time.time() - start:.5f} seconds!\")\n with open(dr_fp, \"w\") as f:\n json.dump(self._diagnoses_records_list, f)\n self._all_records = CFG(self._all_records)\n\n @property\n def diagnoses_records_list(self):\n \"\"\" finished, checked,\n \"\"\"\n if self._diagnoses_records_list is None:\n self._ls_diagnoses_records()\n return self._diagnoses_records_list\n\n def _get_tranche(self, rec:str) -> str:\n \"\"\" finished, checked,\n\n get the tranche\"s symbol (one of \"A\",\"B\",\"C\",\"D\",\"E\",\"F\") of a record via its name\n\n Parameters\n ----------\n rec: str,\n name of the record\n\n Returns\n -------\n tranche, str,\n symbol of the tranche, ref. `self.rec_prefix`\n \"\"\"\n prefix = \"\".join(re.findall(r\"[A-Z]\", rec))\n tranche = {v:k for k,v in self.rec_prefix.items()}[prefix]\n return tranche\n\n def get_data_filepath(self, rec:str, with_ext:bool=True) -> str:\n \"\"\" finished, checked,\n\n get the absolute file path of the data file of `rec`\n\n Parameters\n ----------\n rec: str,\n name of the record\n with_ext: bool, default True,\n if True, the returned file path comes with file extension,\n otherwise without file extension,\n which is useful for `wfdb` functions\n\n Returns\n -------\n fp: str,\n absolute file path of the data file of the record\n \"\"\"\n tranche = self._get_tranche(rec)\n fp = os.path.join(self.db_dirs[tranche], f\"{rec}.{self.rec_ext}\")\n if not with_ext:\n fp = os.path.splitext(fp)[0]\n return fp\n\n def get_header_filepath(self, rec:str, with_ext:bool=True) -> str:\n \"\"\" finished, checked,\n\n get the absolute file path of the header file of `rec`\n\n Parameters\n ----------\n rec: str,\n name of the record\n with_ext: bool, default True,\n if True, the returned file path comes with file extension,\n otherwise without file extension,\n which is useful for `wfdb` functions\n\n Returns\n -------\n fp: str,\n absolute file path of the header file of the record\n \"\"\"\n tranche = self._get_tranche(rec)\n fp = os.path.join(self.db_dirs[tranche], f\"{rec}.{self.ann_ext}\")\n if not with_ext:\n fp = os.path.splitext(fp)[0]\n return fp\n\n def get_ann_filepath(self, rec:str, with_ext:bool=True) -> str:\n \"\"\" finished, checked,\n alias for `get_header_filepath`\n \"\"\"\n fp = self.get_header_filepath(rec, with_ext=with_ext)\n return fp\n\n def load_data(self, rec:str, leads:Optional[Union[str, List[str]]]=None, data_format:str=\"channel_first\", backend:str=\"wfdb\", units:str=\"mV\", fs:Optional[Real]=None) -> np.ndarray:\n \"\"\" finished, checked,\n\n load physical (converted from digital) ecg data,\n which is more understandable for humans\n\n Parameters\n ----------\n rec: str,\n name of the record\n leads: str or list of str, optional,\n the leads to load\n data_format: str, default \"channel_first\",\n format of the ecg data,\n \"channel_last\" (alias \"lead_last\"), or\n \"channel_first\" (alias \"lead_first\")\n backend: str, default \"wfdb\",\n the backend data reader, can also be \"scipy\"\n units: str, default \"mV\",\n units of the output signal, can also be \"μV\", with an alias of \"uV\"\n fs: real number, optional,\n if not None, the loaded data will be resampled to this frequency\n \n Returns\n -------\n data: ndarray,\n the ecg data\n \"\"\"\n assert data_format.lower() in [\"channel_first\", \"lead_first\", \"channel_last\", \"lead_last\"]\n tranche = self._get_tranche(rec)\n if not leads:\n _leads = self.all_leads\n elif isinstance(leads, str):\n _leads = [leads]\n else:\n _leads = leads\n # if tranche in \"CD\" and fs == 500: # resample will be done at the end of the function\n # data = self.load_resampled_data(rec)\n if backend.lower() == \"wfdb\":\n rec_fp = self.get_data_filepath(rec, with_ext=False)\n # p_signal of \"lead_last\" format\n wfdb_rec = wfdb.rdrecord(rec_fp, physical=True, channel_names=_leads)\n data = np.asarray(wfdb_rec.p_signal.T)\n # lead_units = np.vectorize(lambda s: s.lower())(wfdb_rec.units)\n elif backend.lower() == \"scipy\":\n # loadmat of \"lead_first\" format\n rec_fp = self.get_data_filepath(rec, with_ext=True)\n data = loadmat(rec_fp)[\"val\"]\n header_info = self.load_ann(rec, raw=False)[\"df_leads\"]\n baselines = header_info[\"baseline\"].values.reshape(data.shape[0], -1)\n adc_gain = header_info[\"adc_gain\"].values.reshape(data.shape[0], -1)\n data = np.asarray(data-baselines) / adc_gain\n leads_ind = [self.all_leads.index(item) for item in _leads]\n data = data[leads_ind,:]\n # lead_units = np.vectorize(lambda s: s.lower())(header_info[\"df_leads\"][\"adc_units\"].values)\n else:\n raise ValueError(f\"backend `{backend.lower()}` not supported for loading data\")\n \n # ref. ISSUES 3, for multiplying `value_correction_factor`\n # data = data * self.value_correction_factor[tranche]\n\n if units.lower() in [\"uv\", \"μv\"]:\n data = data * 1000\n\n if fs is not None and fs != self.fs[tranche]:\n data = resample_poly(data, fs, self.fs[tranche], axis=1)\n\n if data_format.lower() in [\"channel_last\", \"lead_last\"]:\n data = data.T\n\n return data\n\n def load_ann(self, rec:str, raw:bool=False, backend:str=\"wfdb\") -> Union[dict,str]:\n \"\"\" finished, checked,\n\n load annotations (header) stored in the .hea files\n \n Parameters\n ----------\n rec: str,\n name of the record\n raw: bool, default False,\n if True, the raw annotations without parsing will be returned\n backend: str, default \"wfdb\", case insensitive,\n if is \"wfdb\", `wfdb.rdheader` will be used to load the annotations;\n if is \"naive\", annotations will be parsed from the lines read from the header files\n \n Returns\n -------\n ann_dict, dict or str,\n the annotations with items: ref. `self.ann_items`\n \"\"\"\n tranche = self._get_tranche(rec)\n ann_fp = self.get_ann_filepath(rec, with_ext=True)\n with open(ann_fp, \"r\") as f:\n header_data = f.read().splitlines()\n \n if raw:\n ann_dict = \"\\n\".join(header_data)\n return ann_dict\n\n if backend.lower() == \"wfdb\":\n ann_dict = self._load_ann_wfdb(rec, header_data)\n elif backend.lower() == \"naive\":\n ann_dict = self._load_ann_naive(header_data)\n else:\n raise ValueError(f\"backend `{backend.lower()}` not supported for loading annotations\")\n return ann_dict\n\n def _load_ann_wfdb(self, rec:str, header_data:List[str]) -> dict:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n rec: str,\n name of the record\n header_data: list of str,\n list of lines read directly from a header file,\n complementary to data read using `wfdb.rdheader` if applicable,\n this data will be used, since `datetime` is not well parsed by `wfdb.rdheader`\n\n Returns\n -------\n ann_dict, dict,\n the annotations with items: ref. `self.ann_items`\n \"\"\"\n header_fp = self.get_header_filepath(rec, with_ext=False)\n header_reader = wfdb.rdheader(header_fp)\n ann_dict = {}\n ann_dict[\"rec_name\"], ann_dict[\"nb_leads\"], ann_dict[\"fs\"], ann_dict[\"nb_samples\"], ann_dict[\"datetime\"], daytime = header_data[0].split(\" \")\n\n ann_dict[\"nb_leads\"] = int(ann_dict[\"nb_leads\"])\n ann_dict[\"fs\"] = int(ann_dict[\"fs\"])\n ann_dict[\"nb_samples\"] = int(ann_dict[\"nb_samples\"])\n ann_dict[\"datetime\"] = datetime.strptime(\" \".join([ann_dict[\"datetime\"], daytime]), \"%d-%b-%Y %H:%M:%S\")\n try: # see NOTE. 1.\n ann_dict[\"age\"] = int([l for l in header_reader.comments if \"Age\" in l][0].split(\": \")[-1])\n except:\n ann_dict[\"age\"] = np.nan\n try:\n ann_dict[\"sex\"] = [l for l in header_reader.comments if \"Sex\" in l][0].split(\": \")[-1]\n except:\n ann_dict[\"sex\"] = \"Unknown\"\n try:\n ann_dict[\"medical_prescription\"] = [l for l in header_reader.comments if \"Rx\" in l][0].split(\": \")[-1]\n except:\n ann_dict[\"medical_prescription\"] = \"Unknown\"\n try:\n ann_dict[\"history\"] = [l for l in header_reader.comments if \"Hx\" in l][0].split(\": \")[-1]\n except:\n ann_dict[\"history\"] = \"Unknown\"\n try:\n ann_dict[\"symptom_or_surgery\"] = [l for l in header_reader.comments if \"Sx\" in l][0].split(\": \")[-1]\n except:\n ann_dict[\"symptom_or_surgery\"] = \"Unknown\"\n\n l_Dx = [l for l in header_reader.comments if \"Dx\" in l][0].split(\": \")[-1].split(\",\")\n ann_dict[\"diagnosis\"], ann_dict[\"diagnosis_scored\"] = self._parse_diagnosis(l_Dx)\n\n df_leads = pd.DataFrame()\n for k in [\"file_name\", \"fmt\", \"byte_offset\", \"adc_gain\", \"units\", \"adc_res\", \"adc_zero\", \"baseline\", \"init_value\", \"checksum\", \"block_size\", \"sig_name\"]:\n df_leads[k] = header_reader.__dict__[k]\n df_leads = df_leads.rename(\n columns={\n \"sig_name\": \"lead_name\",\n \"units\":\"adc_units\",\n \"file_name\":\"filename\",\n }\n )\n df_leads.index = df_leads[\"lead_name\"]\n df_leads.index.name = None\n ann_dict[\"df_leads\"] = df_leads\n\n return ann_dict\n\n def _load_ann_naive(self, header_data:List[str]) -> dict:\n \"\"\" finished, checked,\n\n load annotations (header) using raw data read directly from a header file\n \n Parameters\n ----------\n header_data: list of str,\n list of lines read directly from a header file\n \n Returns\n -------\n ann_dict, dict,\n the annotations with items: ref. `self.ann_items`\n \"\"\"\n ann_dict = {}\n ann_dict[\"rec_name\"], ann_dict[\"nb_leads\"], ann_dict[\"fs\"], ann_dict[\"nb_samples\"], ann_dict[\"datetime\"], daytime = header_data[0].split(\" \")\n\n ann_dict[\"nb_leads\"] = int(ann_dict[\"nb_leads\"])\n ann_dict[\"fs\"] = int(ann_dict[\"fs\"])\n ann_dict[\"nb_samples\"] = int(ann_dict[\"nb_samples\"])\n ann_dict[\"datetime\"] = datetime.strptime(\" \".join([ann_dict[\"datetime\"], daytime]), \"%d-%b-%Y %H:%M:%S\")\n try: # see NOTE. 1.\n ann_dict[\"age\"] = int([l for l in header_data if l.startswith(\"#Age\")][0].split(\": \")[-1])\n except:\n ann_dict[\"age\"] = np.nan\n try:\n ann_dict[\"sex\"] = [l for l in header_data if l.startswith(\"#Sex\")][0].split(\": \")[-1]\n except:\n ann_dict[\"sex\"] = \"Unknown\"\n try:\n ann_dict[\"medical_prescription\"] = [l for l in header_data if l.startswith(\"#Rx\")][0].split(\": \")[-1]\n except:\n ann_dict[\"medical_prescription\"] = \"Unknown\"\n try:\n ann_dict[\"history\"] = [l for l in header_data if l.startswith(\"#Hx\")][0].split(\": \")[-1]\n except:\n ann_dict[\"history\"] = \"Unknown\"\n try:\n ann_dict[\"symptom_or_surgery\"] = [l for l in header_data if l.startswith(\"#Sx\")][0].split(\": \")[-1]\n except:\n ann_dict[\"symptom_or_surgery\"] = \"Unknown\"\n\n l_Dx = [l for l in header_data if l.startswith(\"#Dx\")][0].split(\": \")[-1].split(\",\")\n ann_dict[\"diagnosis\"], ann_dict[\"diagnosis_scored\"] = self._parse_diagnosis(l_Dx)\n\n ann_dict[\"df_leads\"] = self._parse_leads(header_data[1:13])\n\n return ann_dict\n\n def _parse_diagnosis(self, l_Dx:List[str]) -> Tuple[dict, dict]:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n l_Dx: list of str,\n raw information of diagnosis, read from a header file\n\n Returns\n -------\n diag_dict:, dict,\n diagnosis, including SNOMED CT Codes, fullnames and abbreviations of each diagnosis\n diag_scored_dict: dict,\n the scored items in `diag_dict`\n \"\"\"\n diag_dict, diag_scored_dict = {}, {}\n try:\n diag_dict[\"diagnosis_code\"] = [item for item in l_Dx]\n # selection = dx_mapping_all[\"SNOMED CT Code\"].isin(diag_dict[\"diagnosis_code\"])\n # diag_dict[\"diagnosis_abbr\"] = dx_mapping_all[selection][\"Abbreviation\"].tolist()\n # diag_dict[\"diagnosis_fullname\"] = dx_mapping_all[selection][\"Dx\"].tolist()\n diag_dict[\"diagnosis_abbr\"] = \\\n [ dx_mapping_all[dx_mapping_all[\"SNOMED CT Code\"]==dc][\"Abbreviation\"].values[0] \\\n for dc in diag_dict[\"diagnosis_code\"] ]\n diag_dict[\"diagnosis_fullname\"] = \\\n [ dx_mapping_all[dx_mapping_all[\"SNOMED CT Code\"]==dc][\"Dx\"].values[0] \\\n for dc in diag_dict[\"diagnosis_code\"] ]\n scored_indices = np.isin(diag_dict[\"diagnosis_code\"], dx_mapping_scored[\"SNOMED CT Code\"].values)\n diag_scored_dict[\"diagnosis_code\"] = \\\n [ item for idx, item in enumerate(diag_dict[\"diagnosis_code\"]) \\\n if scored_indices[idx] ]\n diag_scored_dict[\"diagnosis_abbr\"] = \\\n [ item for idx, item in enumerate(diag_dict[\"diagnosis_abbr\"]) \\\n if scored_indices[idx] ]\n diag_scored_dict[\"diagnosis_fullname\"] = \\\n [ item for idx, item in enumerate(diag_dict[\"diagnosis_fullname\"]) \\\n if scored_indices[idx] ]\n except: # the old version, the Dx\"s are abbreviations\n diag_dict[\"diagnosis_abbr\"] = diag_dict[\"diagnosis_code\"]\n selection = dx_mapping_all[\"Abbreviation\"].isin(diag_dict[\"diagnosis_abbr\"])\n diag_dict[\"diagnosis_fullname\"] = dx_mapping_all[selection][\"Dx\"].tolist()\n # if not keep_original:\n # for idx, d in enumerate(ann_dict[\"diagnosis_abbr\"]):\n # if d in [\"Normal\", \"NSR\"]:\n # ann_dict[\"diagnosis_abbr\"] = [\"N\"]\n return diag_dict, diag_scored_dict\n\n def _parse_leads(self, l_leads_data:List[str]) -> pd.DataFrame:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n l_leads_data: list of str,\n raw information of each lead, read from a header file\n\n Returns\n -------\n df_leads: DataFrame,\n infomation of each leads in the format of DataFrame\n \"\"\"\n df_leads = pd.read_csv(io.StringIO(\"\\n\".join(l_leads_data)), delim_whitespace=True, header=None)\n df_leads.columns = [\"filename\", \"fmt+byte_offset\", \"adc_gain+units\", \"adc_res\", \"adc_zero\", \"init_value\", \"checksum\", \"block_size\", \"lead_name\",]\n df_leads[\"fmt\"] = df_leads[\"fmt+byte_offset\"].apply(lambda s: s.split(\"+\")[0])\n df_leads[\"byte_offset\"] = df_leads[\"fmt+byte_offset\"].apply(lambda s: s.split(\"+\")[1])\n df_leads[\"adc_gain\"] = df_leads[\"adc_gain+units\"].apply(lambda s: s.split(\"/\")[0])\n df_leads[\"adc_units\"] = df_leads[\"adc_gain+units\"].apply(lambda s: s.split(\"/\")[1])\n for k in [\"byte_offset\", \"adc_gain\", \"adc_res\", \"adc_zero\", \"init_value\", \"checksum\",]:\n df_leads[k] = df_leads[k].apply(lambda s: int(s))\n df_leads[\"baseline\"] = df_leads[\"adc_zero\"]\n df_leads = df_leads[[\"filename\", \"fmt\", \"byte_offset\", \"adc_gain\", \"adc_units\", \"adc_res\", \"adc_zero\", \"baseline\", \"init_value\", \"checksum\", \"block_size\", \"lead_name\"]]\n df_leads.index = df_leads[\"lead_name\"]\n df_leads.index.name = None\n return df_leads\n\n def load_header(self, rec:str, raw:bool=False) -> Union[dict,str]:\n \"\"\"\n alias for `load_ann`, as annotations are also stored in header files\n \"\"\"\n return self.load_ann(rec, raw)\n\n def get_labels(self, rec:str, scored_only:bool=True, fmt:str=\"s\", normalize:bool=True) -> List[str]:\n \"\"\" finished, checked,\n\n read labels (diagnoses or arrhythmias) of a record\n \n Parameters\n ----------\n rec: str,\n name of the record\n scored_only: bool, default True,\n only get the labels that are scored in the CINC2020 official phase\n fmt: str, default \"a\",\n the format of labels, one of the following (case insensitive):\n - \"a\", abbreviations\n - \"f\", full names\n - \"s\", SNOMED CT Code\n normalize: bool, default True,\n if True, the labels will be transformed into their equavalents,\n which are defined in `utils.utils_misc.cinc2020_aux_data.py`\n \n Returns\n -------\n labels, list,\n the list of labels\n \"\"\"\n ann_dict = self.load_ann(rec)\n if scored_only:\n labels = ann_dict[\"diagnosis_scored\"]\n else:\n labels = ann_dict[\"diagnosis\"]\n if fmt.lower() == \"a\":\n labels = labels[\"diagnosis_abbr\"]\n elif fmt.lower() == \"f\":\n labels = labels[\"diagnosis_fullname\"]\n elif fmt.lower() == \"s\":\n labels = labels[\"diagnosis_code\"]\n else:\n raise ValueError(f\"`fmt` should be one of `a`, `f`, `s`, but got `{fmt}`\")\n if normalize:\n labels = [self.label_trans_dict.get(item, item) for item in labels]\n return labels\n\n def get_fs(self, rec:str) -> Real:\n \"\"\" finished, checked,\n\n get the sampling frequency of a record\n\n Parameters\n ----------\n rec: str,\n name of the record\n\n Returns\n -------\n fs: real number,\n sampling frequency of the record `rec`\n \"\"\"\n tranche = self._get_tranche(rec)\n fs = self.fs[tranche]\n return fs\n\n def get_subject_info(self, rec:str, items:Optional[List[str]]=None) -> dict:\n \"\"\" finished, checked,\n\n read auxiliary information of a subject (a record) stored in the header files\n\n Parameters\n ----------\n rec: str,\n name of the record\n items: list of str, optional,\n items of the subject's information (e.g. sex, age, etc.)\n \n Returns\n -------\n subject_info: dict,\n information about the subject, including\n \"age\", \"sex\", \"medical_prescription\", \"history\", \"symptom_or_surgery\",\n \"\"\"\n if items is None or len(items) == 0:\n info_items = [\n \"age\", \"sex\", \"medical_prescription\", \"history\", \"symptom_or_surgery\",\n ]\n else:\n info_items = items\n ann_dict = self.load_ann(rec)\n subject_info = [ann_dict[item] for item in info_items]\n\n return subject_info\n\n def save_challenge_predictions(self, rec:str, output_dir:str, scores:List[Real], labels:List[int], classes:List[str]) -> NoReturn:\n \"\"\" NOT finished, NOT checked, need updating, \n \n TODO: update for the official phase\n\n Parameters\n ----------\n rec: str,\n name of the record\n output_dir: str,\n directory to save the predictions\n scores: list of real,\n raw predictions\n labels: list of int,\n 0 or 1, binary predictions\n classes: list of str,\n SNOMED CT Code of binary predictions\n \"\"\"\n new_file = f\"{rec}.csv\"\n output_file = os.path.join(output_dir, new_file)\n\n # Include the filename as the recording number\n recording_string = f\"#{rec}\"\n class_string = \",\".join(classes)\n label_string = \",\".join(str(i) for i in labels)\n score_string = \",\".join(str(i) for i in scores)\n\n with open(output_file, \"w\") as f:\n # f.write(recording_string + \"\\n\" + class_string + \"\\n\" + label_string + \"\\n\" + score_string + \"\\n\")\n f.write(\"\\n\".join([recording_string, class_string, label_string, score_string, \"\"]))\n\n def plot(self,\n rec:str,\n data:Optional[np.ndarray]=None,\n ann:Optional[Dict[str, np.ndarray]]=None,\n ticks_granularity:int=0,\n leads:Optional[Union[str, List[str]]]=None,\n same_range:bool=False,\n waves:Optional[Dict[str, Sequence[int]]]=None,\n **kwargs:Any) -> NoReturn:\n \"\"\" finished, checked, to improve,\n\n plot the signals of a record or external signals (units in μV),\n with metadata (fs, labels, tranche, etc.),\n possibly also along with wave delineations\n\n Parameters\n ----------\n rec: str,\n name of the record\n data: ndarray, optional,\n (12-lead) ecg signal to plot,\n should be of the format \"channel_first\", and compatible with `leads`\n if given, data of `rec` will not be used,\n this is useful when plotting filtered data\n ann: dict, optional,\n annotations for `data`, with 2 items: \"scored\", \"all\",\n ignored if `data` is None\n ticks_granularity: int, default 0,\n the granularity to plot axis ticks, the higher the more,\n 0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks)\n leads: str or list of str, optional,\n the leads to plot\n same_range: bool, default False,\n if True, forces all leads to have the same y range\n waves: dict, optional,\n indices of the wave critical points, including\n \"p_onsets\", \"p_peaks\", \"p_offsets\",\n \"q_onsets\", \"q_peaks\", \"r_peaks\", \"s_peaks\", \"s_offsets\",\n \"t_onsets\", \"t_peaks\", \"t_offsets\"\n kwargs: dict,\n\n TODO\n ----\n 1. slice too long records, and plot separately for each segment\n 2. plot waves using `axvspan`\n\n NOTE\n ----\n `Locator` of `plt` has default `MAXTICKS` equal to 1000,\n if not modifying this number, at most 40 seconds of signal could be plotted once\n\n Contributors: Jeethan, and WEN Hao\n \"\"\"\n tranche = self._get_tranche(rec)\n if tranche in \"CDE\":\n physionet_lightwave_suffix = CFG({\n \"C\": \"incartdb/1.0.0\",\n \"D\": \"ptbdb/1.0.0\",\n \"E\": \"ptb-xl/1.0.1\",\n })\n url = f\"https://physionet.org/lightwave/?db={physionet_lightwave_suffix[tranche]}\"\n print(f\"better view: {url}\")\n \n if \"plt\" not in dir():\n import matplotlib.pyplot as plt\n plt.MultipleLocator.MAXTICKS = 3000\n if leads is None or leads == \"all\":\n _leads = self.all_leads\n elif isinstance(leads, str):\n _leads = [leads]\n else:\n _leads = leads\n # assert all([l in self.all_leads for l in _leads])\n assert set(_leads).issubset(self._all_leads_set)\n\n # lead_list = self.load_ann(rec)[\"df_leads\"][\"lead_name\"].tolist()\n # lead_indices = [lead_list.index(l) for l in _leads]\n lead_indices = [self.all_leads.index(l) for l in _leads]\n if data is None:\n _data = self.load_data(rec, data_format=\"channel_first\", units=\"μV\")[lead_indices]\n else:\n units = self._auto_infer_units(data)\n print(f\"input data is auto detected to have units in {units}\")\n if units.lower() == \"mv\":\n _data = 1000 * data\n else:\n _data = data\n assert _data.shape[0] == len(_leads), \\\n f\"number of leads from data of shape ({_data.shape[0]}) does not match the length ({len(_leads)}) of `leads`\"\n \n if same_range:\n y_ranges = np.ones((_data.shape[0],)) * np.max(np.abs(_data)) + 100\n else:\n y_ranges = np.max(np.abs(_data), axis=1) + 100\n\n if waves:\n if waves.get(\"p_onsets\", None) and waves.get(\"p_offsets\", None):\n p_waves = [\n [onset, offset] \\\n for onset, offset in zip(waves[\"p_onsets\"], waves[\"p_offsets\"])\n ]\n elif waves.get(\"p_peaks\", None):\n p_waves = [\n [\n max(0, p + ms2samples(PlotCfg.p_onset, fs=self.get_fs(rec))),\n min(_data.shape[1], p + ms2samples(PlotCfg.p_offset, fs=self.get_fs(rec)))\n ] for p in waves[\"p_peaks\"]\n ]\n else:\n p_waves = []\n if waves.get(\"q_onsets\", None) and waves.get(\"s_offsets\", None):\n qrs = [\n [onset, offset] for onset, offset in zip(waves[\"q_onsets\"], waves[\"s_offsets\"])\n ]\n elif waves.get(\"q_peaks\", None) and waves.get(\"s_peaks\", None):\n qrs = [\n [\n max(0, q + ms2samples(PlotCfg.q_onset, fs=self.get_fs(rec))),\n min(_data.shape[1], s + ms2samples(PlotCfg.s_offset, fs=self.get_fs(rec)))\n ] for q,s in zip(waves[\"q_peaks\"], waves[\"s_peaks\"])\n ]\n elif waves.get(\"r_peaks\", None):\n qrs = [\n [\n max(0, r + ms2samples(PlotCfg.qrs_radius, fs=self.get_fs(rec))),\n min(_data.shape[1], r + ms2samples(PlotCfg.qrs_radius, fs=self.get_fs(rec)))\n ] for r in waves[\"r_peaks\"]\n ]\n else:\n qrs = []\n if waves.get(\"t_onsets\", None) and waves.get(\"t_offsets\", None):\n t_waves = [\n [onset, offset] for onset, offset in zip(waves[\"t_onsets\"], waves[\"t_offsets\"])\n ]\n elif waves.get(\"t_peaks\", None):\n t_waves = [\n [\n max(0, t + ms2samples(PlotCfg.t_onset, fs=self.get_fs(rec))),\n min(_data.shape[1], t + ms2samples(PlotCfg.t_offset, fs=self.get_fs(rec)))\n ] for t in waves[\"t_peaks\"]\n ]\n else:\n t_waves = []\n else:\n p_waves, qrs, t_waves = [], [], []\n palette = {\"p_waves\": \"green\", \"qrs\": \"red\", \"t_waves\": \"pink\",}\n plot_alpha = 0.4\n\n if ann is None or data is None:\n diag_scored = self.get_labels(rec, scored_only=True, fmt=\"a\")\n diag_all = self.get_labels(rec, scored_only=False, fmt=\"a\")\n else:\n diag_scored = ann[\"scored\"]\n diag_all = ann[\"all\"]\n\n nb_leads = len(_leads)\n\n seg_len = self.fs[tranche] * 25 # 25 seconds\n nb_segs = _data.shape[1] // seg_len\n\n t = np.arange(_data.shape[1]) / self.fs[tranche]\n duration = len(t) / self.fs[tranche]\n fig_sz_w = int(round(DEFAULT_FIG_SIZE_PER_SEC * duration))\n fig_sz_h = 6 * np.maximum(y_ranges, 750) / 1500\n fig, axes = plt.subplots(nb_leads, 1, sharex=False, figsize=(fig_sz_w, np.sum(fig_sz_h)))\n if nb_leads == 1:\n axes = [axes]\n for idx in range(nb_leads):\n axes[idx].plot(t, _data[idx], color=\"black\", linewidth=\"2.0\", label=f\"lead - {_leads[idx]}\")\n axes[idx].axhline(y=0, linestyle=\"-\", linewidth=\"1.0\", color=\"red\")\n # NOTE that `Locator` has default `MAXTICKS` equal to 1000\n if ticks_granularity >= 1:\n axes[idx].xaxis.set_major_locator(plt.MultipleLocator(0.2))\n axes[idx].yaxis.set_major_locator(plt.MultipleLocator(500))\n axes[idx].grid(which=\"major\", linestyle=\"-\", linewidth=\"0.4\", color=\"red\")\n if ticks_granularity >= 2:\n axes[idx].xaxis.set_minor_locator(plt.MultipleLocator(0.04))\n axes[idx].yaxis.set_minor_locator(plt.MultipleLocator(100))\n axes[idx].grid(which=\"minor\", linestyle=\":\", linewidth=\"0.2\", color=\"gray\")\n # add extra info. to legend\n # https://stackoverflow.com/questions/16826711/is-it-possible-to-add-a-string-as-a-legend-item-in-matplotlib\n axes[idx].plot([], [], \" \", label=f\"labels_s - {','.join(diag_scored)}\")\n axes[idx].plot([], [], \" \", label=f\"labels_a - {','.join(diag_all)}\")\n axes[idx].plot([], [], \" \", label=f\"tranche - {self.tranche_names[tranche]}\")\n axes[idx].plot([], [], \" \", label=f\"fs - {self.fs[tranche]}\")\n for w in [\"p_waves\", \"qrs\", \"t_waves\"]:\n for itv in eval(w):\n axes[idx].axvspan(itv[0], itv[1], color=palette[w], alpha=plot_alpha)\n axes[idx].legend(loc=\"upper left\", fontsize=14)\n axes[idx].set_xlim(t[0], t[-1])\n axes[idx].set_ylim(min(-600, -y_ranges[idx]), max(600, y_ranges[idx]))\n axes[idx].set_xlabel(\"Time [s]\", fontsize=16)\n axes[idx].set_ylabel(\"Voltage [μV]\", fontsize=16)\n plt.subplots_adjust(hspace=0.05)\n fig.tight_layout()\n if kwargs.get(\"save_path\", None):\n plt.savefig(kwargs[\"save_path\"], dpi=200, bbox_inches=\"tight\")\n else:\n plt.show()\n\n def get_tranche_class_distribution(self, tranches:Sequence[str], scored_only:bool=True) -> Dict[str, int]:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n tranches: sequence of str,\n tranche symbols (A-F)\n scored_only: bool, default True,\n only get class distributions that are scored in the CINC2020 official phase\n \n Returns\n -------\n distribution: dict,\n keys are abbrevations of the classes, values are appearance of corr. classes in the tranche.\n \"\"\"\n tranche_names = [self.tranche_names[t] for t in tranches]\n df = dx_mapping_scored if scored_only else dx_mapping_all\n distribution = CFG()\n for _, row in df.iterrows():\n num = (row[[tranche_names]].values).sum()\n if num > 0:\n distribution[row[\"Abbreviation\"]] = num\n return distribution\n\n @staticmethod\n def get_arrhythmia_knowledge(arrhythmias:Union[str,List[str]], **kwargs) -> NoReturn:\n \"\"\" finished, checked,\n\n knowledge about ECG features of specific arrhythmias,\n\n Parameters\n ----------\n arrhythmias: str, or list of str,\n the arrhythmia(s) to check, in abbreviations or in SNOMED CT Code\n \"\"\"\n if isinstance(arrhythmias, str):\n d = [normalize_class(arrhythmias)]\n else:\n d = [normalize_class(c) for c in arrhythmias]\n # pp = pprint.PrettyPrinter(indent=4)\n # unsupported = [item for item in d if item not in dx_mapping_all[\"Abbreviation\"]]\n unsupported = [item for item in d if item not in dx_mapping_scored[\"Abbreviation\"].values]\n assert len(unsupported) == 0, \\\n f\"`{unsupported}` {'is' if len(unsupported)==1 else 'are'} not supported!\"\n for idx, item in enumerate(d):\n # pp.pprint(eval(f\"EAK.{item}\"))\n print(dict_to_str(eval(f\"EAK.{item}\")))\n if idx < len(d)-1:\n print(\"*\"*110)\n\n def load_resampled_data(self, rec:str, data_format:str=\"channel_first\", siglen:Optional[int]=None) -> np.ndarray:\n \"\"\" finished, checked,\n\n resample the data of `rec` to 500Hz,\n or load the resampled data in 500Hz, if the corr. data file already exists\n\n Parameters\n ----------\n rec: str,\n name of the record\n data_format: str, default \"channel_first\",\n format of the ecg data,\n \"channel_last\" (alias \"lead_last\"), or\n \"channel_first\" (alias \"lead_first\")\n siglen: int, optional,\n signal length, units in number of samples,\n if set, signal with length longer will be sliced to the length of `siglen`\n used for example when preparing/doing model training\n\n Returns\n -------\n data: ndarray,\n the resampled (and perhaps sliced) signal data\n \"\"\"\n tranche = self._get_tranche(rec)\n if siglen is None:\n rec_fp = os.path.join(self.db_dirs[tranche], f\"{rec}_500Hz.npy\")\n else:\n rec_fp = os.path.join(self.db_dirs[tranche], f\"{rec}_500Hz_siglen_{siglen}.npy\")\n if not os.path.isfile(rec_fp):\n # print(f\"corresponding file {os.basename(rec_fp)} does not exist\")\n data = self.load_data(rec, data_format=\"channel_first\", units=\"mV\", fs=None)\n if self.fs[tranche] != 500:\n data = resample_poly(data, 500, self.fs[tranche], axis=1)\n if siglen is not None and data.shape[1] >= siglen:\n # slice_start = (data.shape[1] - siglen)//2\n # slice_end = slice_start + siglen\n # data = data[..., slice_start:slice_end]\n data = ensure_siglen(data, siglen=siglen, fmt=\"channel_first\")\n np.save(rec_fp, data)\n elif siglen is None:\n np.save(rec_fp, data)\n else:\n # print(f\"loading from local file...\")\n data = np.load(rec_fp)\n if data_format.lower() in [\"channel_last\", \"lead_last\"]:\n data = data.T\n return data\n\n def load_raw_data(self, rec:str, backend:str=\"scipy\") -> np.ndarray:\n \"\"\" finished, checked,\n\n load raw data from corresponding files with no further processing,\n in order to facilitate feeding data into the `run_12ECG_classifier` function\n\n Parameters\n ----------\n rec: str,\n name of the record\n backend: str, default \"scipy\",\n the backend data reader, can also be \"wfdb\",\n note that \"scipy\" provides data in the format of \"lead_first\",\n while \"wfdb\" provides data in the format of \"lead_last\",\n\n Returns\n -------\n raw_data: ndarray,\n raw data (d_signal) loaded from corresponding data file,\n without subtracting baseline nor dividing adc gain\n \"\"\"\n tranche = self._get_tranche(rec)\n if backend.lower() == \"wfdb\":\n rec_fp = self.get_data_filepath(rec, with_ext=False)\n wfdb_rec = wfdb.rdrecord(rec_fp, physical=False)\n raw_data = np.asarray(wfdb_rec.d_signal)\n elif backend.lower() == \"scipy\":\n rec_fp = self.get_data_filepath(rec, with_ext=True)\n raw_data = loadmat(rec_fp)[\"val\"]\n return raw_data\n\n def _check_nan(self, tranches:Union[str, Sequence[str]]) -> NoReturn:\n \"\"\" finished, checked,\n\n check if records from `tranches` has nan values\n\n accessing data using `p_signal` of `wfdb` would produce nan values,\n if exceptionally large values are encountered,\n this could help detect abnormal records as well\n\n Parameters\n ----------\n tranches: str or sequence of str,\n tranches to check\n \"\"\"\n for t in tranches:\n for rec in self.all_records[t]:\n data = self.load_data(rec)\n if np.isnan(data).any():\n print(f\"record {rec} from tranche {t} has nan values\")\n\n\n\nfrom ..aux_data.cinc2020_aux_data import load_weights\n\n\ndef compute_all_metrics(classes:List[str], truth:Sequence, binary_pred:Sequence, scalar_pred:Sequence) -> Tuple[float]:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n classes: list of str,\n list of all the classes, in the format of abbrevations\n truth: sequence,\n ground truth array, of shape (n_records, n_classes), with values 0 or 1\n binary_pred: sequence,\n binary predictions, of shape (n_records, n_classes), with values 0 or 1\n scalar_pred: sequence,\n probability predictions, of shape (n_records, n_classes), with values within [0,1]\n\n Returns\n -------\n auroc: float,\n auprc: float,\n accuracy: float,\n f_measure: float,\n f_beta_measure: float,\n g_beta_measure: float,\n challenge_metric: float,\n \"\"\"\n # normal_class = \"426783006\"\n normal_class = \"NSR\"\n # equivalent_classes = [[\"713427006\", \"59118001\"], [\"284470004\", \"63593006\"], [\"427172004\", \"17338001\"]]\n weights = load_weights(classes=classes)\n\n _truth = np.array(truth)\n _binary_pred = np.array(binary_pred)\n _scalar_pred = np.array(scalar_pred)\n\n print(\"- AUROC and AUPRC...\")\n auroc, auprc = compute_auc(_truth, _scalar_pred)\n\n print(\"- Accuracy...\")\n accuracy = compute_accuracy(_truth, _binary_pred)\n\n print(\"- F-measure...\")\n f_measure = compute_f_measure(_truth, _binary_pred)\n\n print(\"- F-beta and G-beta measures...\")\n f_beta_measure, g_beta_measure = compute_beta_measures(_truth, _binary_pred, beta=2)\n\n print(\"- Challenge metric...\")\n challenge_metric = compute_challenge_metric(weights, _truth, _binary_pred, classes, normal_class)\n\n print(\"Done.\")\n\n # Return the results.\n return auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric\n\n\n# Compute recording-wise accuracy.\ndef compute_accuracy(labels:np.ndarray, outputs:np.ndarray) -> float:\n \"\"\" checked,\n \"\"\"\n num_recordings, num_classes = np.shape(labels)\n\n num_correct_recordings = 0\n for i in range(num_recordings):\n if np.all(labels[i, :]==outputs[i, :]):\n num_correct_recordings += 1\n\n return float(num_correct_recordings) / float(num_recordings)\n\n\n# Compute confusion matrices.\ndef compute_confusion_matrices(labels:np.ndarray, outputs:np.ndarray, normalize:bool=False) -> np.ndarray:\n \"\"\" checked,\n \"\"\"\n # Compute a binary confusion matrix for each class k:\n #\n # [TN_k FN_k]\n # [FP_k TP_k]\n #\n # If the normalize variable is set to true, then normalize the contributions\n # to the confusion matrix by the number of labels per recording.\n num_recordings, num_classes = np.shape(labels)\n\n if not normalize:\n A = np.zeros((num_classes, 2, 2))\n for i in range(num_recordings):\n for j in range(num_classes):\n if labels[i, j]==1 and outputs[i, j]==1: # TP\n A[j, 1, 1] += 1\n elif labels[i, j]==0 and outputs[i, j]==1: # FP\n A[j, 1, 0] += 1\n elif labels[i, j]==1 and outputs[i, j]==0: # FN\n A[j, 0, 1] += 1\n elif labels[i, j]==0 and outputs[i, j]==0: # TN\n A[j, 0, 0] += 1\n else: # This condition should not happen.\n raise ValueError(\"Error in computing the confusion matrix.\")\n else:\n A = np.zeros((num_classes, 2, 2))\n for i in range(num_recordings):\n normalization = float(max(np.sum(labels[i, :]), 1))\n for j in range(num_classes):\n if labels[i, j]==1 and outputs[i, j]==1: # TP\n A[j, 1, 1] += 1.0/normalization\n elif labels[i, j]==0 and outputs[i, j]==1: # FP\n A[j, 1, 0] += 1.0/normalization\n elif labels[i, j]==1 and outputs[i, j]==0: # FN\n A[j, 0, 1] += 1.0/normalization\n elif labels[i, j]==0 and outputs[i, j]==0: # TN\n A[j, 0, 0] += 1.0/normalization\n else: # This condition should not happen.\n raise ValueError(\"Error in computing the confusion matrix.\")\n\n return A\n\n\n# Compute macro F-measure.\ndef compute_f_measure(labels:np.ndarray, outputs:np.ndarray) -> float:\n \"\"\" checked,\n \"\"\"\n num_recordings, num_classes = np.shape(labels)\n\n A = compute_confusion_matrices(labels, outputs)\n\n f_measure = np.zeros(num_classes)\n for k in range(num_classes):\n tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]\n if 2 * tp + fp + fn:\n f_measure[k] = float(2 * tp) / float(2 * tp + fp + fn)\n else:\n f_measure[k] = float(\"nan\")\n\n macro_f_measure = np.nanmean(f_measure)\n\n return macro_f_measure\n\n\n# Compute F-beta and G-beta measures from the unofficial phase of the Challenge.\ndef compute_beta_measures(labels:np.ndarray, outputs:np.ndarray, beta:Real) -> Tuple[float, float]:\n \"\"\" checked,\n \"\"\"\n num_recordings, num_classes = np.shape(labels)\n\n A = compute_confusion_matrices(labels, outputs, normalize=True)\n\n f_beta_measure = np.zeros(num_classes)\n g_beta_measure = np.zeros(num_classes)\n for k in range(num_classes):\n tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]\n if (1+beta**2)*tp + fp + beta**2*fn:\n f_beta_measure[k] = float((1+beta**2)*tp) / float((1+beta**2)*tp + fp + beta**2*fn)\n else:\n f_beta_measure[k] = float(\"nan\")\n if tp + fp + beta*fn:\n g_beta_measure[k] = float(tp) / float(tp + fp + beta*fn)\n else:\n g_beta_measure[k] = float(\"nan\")\n\n macro_f_beta_measure = np.nanmean(f_beta_measure)\n macro_g_beta_measure = np.nanmean(g_beta_measure)\n\n return macro_f_beta_measure, macro_g_beta_measure\n\n\n# Compute macro AUROC and macro AUPRC.\ndef compute_auc(labels:np.ndarray, outputs:np.ndarray) -> Tuple[float, float]:\n \"\"\" checked,\n \"\"\"\n num_recordings, num_classes = np.shape(labels)\n\n # Compute and summarize the confusion matrices for each class across at distinct output values.\n auroc = np.zeros(num_classes)\n auprc = np.zeros(num_classes)\n\n for k in range(num_classes):\n # We only need to compute TPs, FPs, FNs, and TNs at distinct output values.\n thresholds = np.unique(outputs[:, k])\n thresholds = np.append(thresholds, thresholds[-1]+1)\n thresholds = thresholds[::-1]\n num_thresholds = len(thresholds)\n\n # Initialize the TPs, FPs, FNs, and TNs.\n tp = np.zeros(num_thresholds)\n fp = np.zeros(num_thresholds)\n fn = np.zeros(num_thresholds)\n tn = np.zeros(num_thresholds)\n fn[0] = np.sum(labels[:, k]==1)\n tn[0] = np.sum(labels[:, k]==0)\n\n # Find the indices that result in sorted output values.\n idx = np.argsort(outputs[:, k])[::-1]\n\n # Compute the TPs, FPs, FNs, and TNs for class k across thresholds.\n i = 0\n for j in range(1, num_thresholds):\n # Initialize TPs, FPs, FNs, and TNs using values at previous threshold.\n tp[j] = tp[j-1]\n fp[j] = fp[j-1]\n fn[j] = fn[j-1]\n tn[j] = tn[j-1]\n\n # Update the TPs, FPs, FNs, and TNs at i-th output value.\n while i < num_recordings and outputs[idx[i], k] >= thresholds[j]:\n if labels[idx[i], k]:\n tp[j] += 1\n fn[j] -= 1\n else:\n fp[j] += 1\n tn[j] -= 1\n i += 1\n\n # Summarize the TPs, FPs, FNs, and TNs for class k.\n tpr = np.zeros(num_thresholds)\n tnr = np.zeros(num_thresholds)\n ppv = np.zeros(num_thresholds)\n for j in range(num_thresholds):\n if tp[j] + fn[j]:\n tpr[j] = float(tp[j]) / float(tp[j] + fn[j])\n else:\n tpr[j] = float(\"nan\")\n if fp[j] + tn[j]:\n tnr[j] = float(tn[j]) / float(fp[j] + tn[j])\n else:\n tnr[j] = float(\"nan\")\n if tp[j] + fp[j]:\n ppv[j] = float(tp[j]) / float(tp[j] + fp[j])\n else:\n ppv[j] = float(\"nan\")\n\n # Compute AUROC as the area under a piecewise linear function with TPR/\n # sensitivity (x-axis) and TNR/specificity (y-axis) and AUPRC as the area\n # under a piecewise constant with TPR/recall (x-axis) and PPV/precision\n # (y-axis) for class k.\n for j in range(num_thresholds-1):\n auroc[k] += 0.5 * (tpr[j+1] - tpr[j]) * (tnr[j+1] + tnr[j])\n auprc[k] += (tpr[j+1] - tpr[j]) * ppv[j+1]\n\n # Compute macro AUROC and macro AUPRC across classes.\n macro_auroc = np.nanmean(auroc)\n macro_auprc = np.nanmean(auprc)\n\n return macro_auroc, macro_auprc\n\n\n# Compute modified confusion matrix for multi-class, multi-label tasks.\ndef compute_modified_confusion_matrix(labels:np.ndarray, outputs:np.ndarray) -> np.ndarray:\n \"\"\" checked,\n\n Compute a binary multi-class, multi-label confusion matrix,\n where the rows are the labels and the columns are the outputs.\n \"\"\"\n num_recordings, num_classes = np.shape(labels)\n A = np.zeros((num_classes, num_classes))\n\n # Iterate over all of the recordings.\n for i in range(num_recordings):\n # Calculate the number of positive labels and/or outputs.\n normalization = float(max(np.sum(np.any((labels[i, :], outputs[i, :]), axis=0)), 1))\n # Iterate over all of the classes.\n for j in range(num_classes):\n # Assign full and/or partial credit for each positive class.\n if labels[i, j]:\n for k in range(num_classes):\n if outputs[i, k]:\n A[j, k] += 1.0/normalization\n return A\n\n\n# Compute the evaluation metric for the Challenge.\ndef compute_challenge_metric(weights:np.ndarray, labels:np.ndarray, outputs:np.ndarray, classes:List[str], normal_class:str) -> float:\n \"\"\" checked,\n \"\"\"\n num_recordings, num_classes = np.shape(labels)\n normal_index = classes.index(normal_class)\n\n # Compute the observed score.\n A = compute_modified_confusion_matrix(labels, outputs)\n observed_score = np.nansum(weights * A)\n\n # Compute the score for the model that always chooses the correct label(s).\n correct_outputs = labels\n A = compute_modified_confusion_matrix(labels, correct_outputs)\n correct_score = np.nansum(weights * A)\n\n # Compute the score for the model that always chooses the normal class.\n inactive_outputs = np.zeros((num_recordings, num_classes), dtype=np.bool)\n inactive_outputs[:, normal_index] = 1\n A = compute_modified_confusion_matrix(labels, inactive_outputs)\n inactive_score = np.nansum(weights * A)\n\n if correct_score != inactive_score:\n normalized_score = float(observed_score - inactive_score) / float(correct_score - inactive_score)\n else:\n normalized_score = 0.0\n\n return normalized_score\n\n\n# alias\ncompute_metrics = compute_challenge_metric\n"
] | [
[
"torch.set_default_tensor_type",
"torch.nn.Dropout",
"torch.nn.BatchNorm1d",
"numpy.set_printoptions",
"torch.nn.ModuleDict"
],
[
"numpy.asarray",
"pandas.DataFrame",
"matplotlib.pyplot.MultipleLocator",
"numpy.all",
"numpy.nanmean",
"numpy.any",
"numpy.unique",
"numpy.arange",
"scipy.io.loadmat",
"numpy.save",
"numpy.nansum",
"matplotlib.pyplot.subplots_adjust",
"numpy.load",
"numpy.zeros",
"numpy.isin",
"scipy.signal.resample_poly",
"numpy.isnan",
"matplotlib.pyplot.savefig",
"numpy.append",
"numpy.argsort",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.set_printoptions",
"numpy.ones",
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
My-Technical-Architect/tensorflow | [
"35cf4653e6fe15953e2e565afc5a0fd2ab4d5290"
] | [
"tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import tensor_util\n\ndists = tf.contrib.distributions\n\n\nclass DistributionTest(tf.test.TestCase):\n\n def testParamShapesAndFromParams(self):\n classes = [\n dists.Normal,\n dists.Bernoulli,\n dists.Beta,\n dists.Chi2,\n dists.Exponential,\n dists.Gamma,\n dists.InverseGamma,\n dists.Laplace,\n dists.StudentT,\n dists.Uniform]\n\n sample_shapes = [(), (10,), (10, 20, 30)]\n with self.test_session():\n for cls in classes:\n for sample_shape in sample_shapes:\n param_shapes = cls.param_shapes(sample_shape)\n params = dict([(name, tf.random_normal(shape))\n for name, shape in param_shapes.items()])\n dist = cls(**params)\n self.assertAllEqual(sample_shape, tf.shape(dist.sample()).eval())\n dist_copy = dist.copy()\n self.assertAllEqual(sample_shape,\n tf.shape(dist_copy.sample()).eval())\n self.assertEqual(dist.parameters, dist_copy.parameters)\n\n def testCopyExtraArgs(self):\n with self.test_session():\n # Note: we cannot easily test all distributions since each requires\n # different initialization arguments. We therefore spot test a few.\n normal = dists.Normal(mu=1., sigma=2., validate_args=True)\n self.assertEqual(normal.parameters, normal.copy().parameters)\n wishart = dists.WishartFull(df=2, scale=[[1., 2], [2, 5]],\n validate_args=True)\n self.assertEqual(wishart.parameters, wishart.copy().parameters)\n\n def testCopyOverride(self):\n with self.test_session():\n normal = dists.Normal(mu=1., sigma=2., validate_args=True)\n normal_copy = normal.copy(validate_args=False)\n base_params = normal.parameters.copy()\n copy_params = normal.copy(validate_args=False).parameters.copy()\n self.assertNotEqual(base_params.pop(\"validate_args\"),\n copy_params.pop(\"validate_args\"))\n self.assertEqual(base_params, copy_params)\n\n def testIsScalar(self):\n with self.test_session():\n mu = 1.\n sigma = 2.\n\n normal = dists.Normal(mu, sigma,\n validate_args=True)\n self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))\n self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch))\n\n normal = dists.Normal([mu], [sigma],\n validate_args=True)\n self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))\n self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))\n\n mvn = dists.MultivariateNormalDiag([mu], [sigma],\n validate_args=True)\n self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))\n self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch))\n\n mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]],\n validate_args=True)\n self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))\n self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))\n\n # We now test every codepath within the underlying is_scalar_helper\n # function.\n\n # Test case 1, 2.\n x = tf.placeholder(dtype=tf.int32, shape=[])\n # None would fire an exception were it actually executed.\n self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None))\n self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None),\n lambda: tf.shape(x)))\n\n x = tf.placeholder(dtype=tf.int32, shape=[1])\n # None would fire an exception were it actually executed.\n self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None))\n self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None),\n lambda: tf.shape(x)))\n\n # Test case 3.\n x = tf.placeholder(dtype=tf.int32)\n is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x))\n self.assertTrue(is_scalar.eval(feed_dict={x: 1}))\n self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.TensorShape",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.test.main",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
project-delphi/ACS-QG | [
"03aa5b79030b5ba4c09a99363a58454743876592"
] | [
"model/encoder.py"
] | [
"\"\"\"\nImplement input sentence encoder.\n\"\"\"\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom .config import *\nfrom common.constants import DEVICE\nfrom util.tensor_utils import to_sorted_tensor, to_original_tensor\n\n\nclass Encoder(nn.Module):\n \"\"\"\n Transform embeddings to encoding representations.\n \"\"\"\n\n def __init__(self, config, input_size, dropout=0.1):\n \"\"\"\n Initialize a GRU encoder.\n :param config: configuration, includes total enc size, is bi-direction, etc.\n :param input_size: input dimension.\n :param dropout: dropout rate for GRU\n \"\"\"\n super(Encoder, self).__init__()\n self.config = config\n self.layers = config.layers\n self.num_directions = 2 if config.brnn else 1\n assert config.enc_rnn_size % self.num_directions == 0\n self.hidden_size = config.enc_rnn_size // self.num_directions\n self.rnn = nn.GRU(\n input_size, self.hidden_size,\n num_layers=config.layers, dropout=config.dropout,\n bidirectional=config.brnn, batch_first=True)\n\n def forward(self, input_emb, lengths, hidden=None):\n \"\"\"\n Given input embeddings and input seq lengths, calculate encoding representations.\n :param input_emb: embedding of a batch.\n Input shape - [seq_len, batch_size, hidden_dim]\n :param lengths: lengths of each sample.\n :param hidden: hidden of previous layer. Default None.\n :return: encoding of a batch.\n Output shape - [unpadded_max_thisbatch_seq_len, batch_size, hidden_dim * num_layers]\n TODO: revise code to make input and output shape be [batch, length, dim]\n \"\"\"\n # input_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]\n # sorted_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]\n sorted_input_emb, sorted_lengths, sorted_idx = to_sorted_tensor(\n input_emb, lengths, sort_dim=1, device=DEVICE)\n emb = pack(sorted_input_emb, sorted_lengths, batch_first=False)\n self.rnn.flatten_parameters()\n outputs, hidden_t = self.rnn(emb, hidden)\n # hidden_t shape: [num_layers, batch_size, hidden_dim] [2, 32, 256]\n # outputs shape: [unpadded_seq_len, batch_size, hidden_dim * num_layers] [79, 32, 512]\n # !!! NOTICE: it will unpack to max_unpadded_length.\n outputs = unpack(outputs, batch_first=False)[0]\n outputs = to_original_tensor(\n outputs, sorted_idx, sort_dim=1, device=DEVICE)\n return hidden_t, outputs\n"
] | [
[
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.GRU",
"torch.nn.utils.rnn.pack_padded_sequence"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RangeKing/Paddle | [
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27",
"2d87300809ae75d76f5b0b457d8112cb88dc3e27"
] | [
"python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py",
"python/paddle/fluid/tests/unittests/op_test.py",
"python/paddle/fluid/tests/unittests/xpu/test_momentum_op_xpu.py",
"python/paddle/tensor/to_string.py",
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py",
"python/paddle/fluid/tests/unittests/test_sparse_momentum_op.py",
"python/paddle/sparse/layer/conv.py",
"python/paddle/fluid/tests/unittests/test_logspace.py",
"python/paddle/fluid/tests/unittests/autograd/utils.py",
"python/paddle/fluid/initializer.py",
"python/paddle/distribution/distribution.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport paddle\nimport scipy.stats\n\nfrom config import ATOL, DEVICES, RTOL\nfrom parameterize import TEST_CASE_NAME, parameterize_cls, place, xrand\n\npaddle.enable_static()\n\n\n@place(DEVICES)\n@parameterize_cls((TEST_CASE_NAME, 'concentration'),\n [('test-one-dim', np.random.rand(89) + 5.0)])\nclass TestDirichlet(unittest.TestCase):\n def setUp(self):\n self.program = paddle.static.Program()\n self.executor = paddle.static.Executor()\n with paddle.static.program_guard(self.program):\n conc = paddle.static.data('conc', self.concentration.shape,\n self.concentration.dtype)\n self._paddle_diric = paddle.distribution.Dirichlet(conc)\n self.feeds = {'conc': self.concentration}\n\n def test_mean(self):\n with paddle.static.program_guard(self.program):\n [out] = self.executor.run(self.program,\n feed=self.feeds,\n fetch_list=[self._paddle_diric.mean])\n np.testing.assert_allclose(\n out,\n scipy.stats.dirichlet.mean(self.concentration),\n rtol=RTOL.get(str(self.concentration.dtype)),\n atol=ATOL.get(str(self.concentration.dtype)))\n\n def test_variance(self):\n with paddle.static.program_guard(self.program):\n [out] = self.executor.run(self.program,\n feed=self.feeds,\n fetch_list=[self._paddle_diric.variance])\n np.testing.assert_allclose(\n out,\n scipy.stats.dirichlet.var(self.concentration),\n rtol=RTOL.get(str(self.concentration.dtype)),\n atol=ATOL.get(str(self.concentration.dtype)))\n\n def test_prob(self):\n with paddle.static.program_guard(self.program):\n random_number = np.random.rand(*self.concentration.shape)\n random_number = random_number / random_number.sum()\n feeds = dict(self.feeds, value=random_number)\n value = paddle.static.data('value', random_number.shape,\n random_number.dtype)\n out = self._paddle_diric.prob(value)\n [out] = self.executor.run(self.program,\n feed=feeds,\n fetch_list=[out])\n np.testing.assert_allclose(\n out,\n scipy.stats.dirichlet.pdf(random_number, self.concentration),\n rtol=RTOL.get(str(self.concentration.dtype)),\n atol=ATOL.get(str(self.concentration.dtype)))\n\n def test_log_prob(self):\n with paddle.static.program_guard(self.program):\n random_number = np.random.rand(*self.concentration.shape)\n random_number = random_number / random_number.sum()\n feeds = dict(self.feeds, value=random_number)\n value = paddle.static.data('value', random_number.shape,\n random_number.dtype)\n out = self._paddle_diric.log_prob(value)\n [out] = self.executor.run(self.program,\n feed=feeds,\n fetch_list=[out])\n np.testing.assert_allclose(\n out,\n scipy.stats.dirichlet.logpdf(random_number, self.concentration),\n rtol=RTOL.get(str(self.concentration.dtype)),\n atol=ATOL.get(str(self.concentration.dtype)))\n\n def test_entropy(self):\n with paddle.static.program_guard(self.program):\n [out] = self.executor.run(\n self.program,\n feed=self.feeds,\n fetch_list=[self._paddle_diric.entropy()])\n np.testing.assert_allclose(\n out,\n scipy.stats.dirichlet.entropy(self.concentration),\n rtol=RTOL.get(str(self.concentration.dtype)),\n atol=ATOL.get(str(self.concentration.dtype)))\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport unittest\nimport warnings\nimport numpy as np\nimport random\nimport six\nimport struct\nimport time\nimport itertools\nimport collections\nfrom collections import defaultdict\nfrom copy import copy\n\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.framework import _dygraph_tracer\nimport paddle.fluid.core as core\nfrom paddle.fluid.framework import _in_legacy_dygraph, _enable_legacy_dygraph, _in_eager_without_dygraph_check, _disable_legacy_dygraph\nfrom paddle.fluid.framework import _test_eager_guard\nfrom paddle.fluid.backward import append_backward\nfrom paddle.fluid.op import Operator\nfrom paddle.fluid.executor import Executor\nfrom paddle.fluid.framework import Program, OpProtoHolder, Variable, _current_expected_place\nfrom paddle.fluid import unique_name\nfrom paddle.fluid.dygraph.dygraph_to_static.utils import parse_arg_and_kwargs\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__)))\nfrom testsuite import (\n create_op,\n set_input,\n append_input_output,\n append_loss_ops, )\nfrom white_list import (\n op_accuracy_white_list,\n check_shape_white_list,\n compile_vs_runtime_white_list,\n no_check_set_white_list,\n op_threshold_white_list,\n no_grad_set_white_list, )\n\n# For switch new eager mode globally\ng_is_in_eager = _in_eager_without_dygraph_check()\ng_enable_legacy_dygraph = _enable_legacy_dygraph if g_is_in_eager else lambda: None\ng_disable_legacy_dygraph = _disable_legacy_dygraph if g_is_in_eager else lambda: None\n\n\ndef check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs):\n \"\"\"\n Determines whether dtype of output tensor is as expected.\n\n Args:\n api_fn(callable): paddle api function\n in_specs(list[tuple]): list of shape and dtype information for constructing input tensor of api_fn, such as [(shape, dtype), (shape, dtype)].\n expected_dtype(list[str]): expected dtype of output tensor.\n target_index(int): indicate which one from in_specs to infer the dtype of output.\n config(dict): other arguments of paddle api function\n\n Example:\n check_out_dtype(fluid.layers.pad_constant_like, [([2,3,2,3], 'float64'), ([1, 3, 1,3], )], ['float32', 'float64', 'int64'], target_index=1, pad_value=0.)\n\n \"\"\"\n paddle.enable_static()\n for i, expect_dtype in enumerate(expect_dtypes):\n with paddle.static.program_guard(paddle.static.Program()):\n input_t = []\n for index, spec in enumerate(in_specs):\n if len(spec) == 1:\n shape = spec[0]\n dtype = expect_dtype if target_index == index else 'float32'\n elif len(spec) == 2:\n shape, dtype = spec\n else:\n raise ValueError(\n \"Value of in_specs[{}] should contains two elements: [shape, dtype]\".\n format(index))\n input_t.append(\n paddle.static.data(\n name='data_%s' % index, shape=shape, dtype=dtype))\n\n out = api_fn(*input_t, **configs)\n out_dtype = fluid.data_feeder.convert_dtype(out.dtype)\n\n if out_dtype != expect_dtype:\n raise ValueError(\n \"Expected out.dtype is {}, but got {} from {}.\".format(\n expect_dtype, out_dtype, api_fn.__name__))\n\n\ndef _set_use_system_allocator(value=None):\n USE_SYSTEM_ALLOCATOR_FLAG = \"FLAGS_use_system_allocator\"\n old_value = core.globals()[USE_SYSTEM_ALLOCATOR_FLAG]\n value = old_value if value is None else value\n core.globals()[USE_SYSTEM_ALLOCATOR_FLAG] = value\n return old_value\n\n\ndef randomize_probability(batch_size, class_num, dtype='float32'):\n prob = np.random.uniform(\n 0.1, 1.0, size=(batch_size, class_num)).astype(dtype)\n prob_sum = prob.sum(axis=1)\n for i in six.moves.xrange(len(prob)):\n prob[i] /= prob_sum[i]\n return prob\n\n\ndef get_numeric_gradient(place,\n scope,\n op,\n inputs,\n input_to_check,\n output_names,\n delta=0.005,\n in_place=False):\n # FIXME: change this method by compile time concepts\n set_input(scope, op, inputs, place)\n\n def product(dim):\n return six.moves.reduce(lambda a, b: a * b, dim, 1)\n\n tensor_to_check = scope.find_var(input_to_check).get_tensor()\n tensor_size = product(tensor_to_check.shape())\n tensor_to_check_dtype = tensor_to_check._dtype()\n if tensor_to_check_dtype == core.VarDesc.VarType.FP32:\n tensor_to_check_dtype = np.float32\n elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:\n tensor_to_check_dtype = np.float64\n elif tensor_to_check_dtype == core.VarDesc.VarType.FP16:\n tensor_to_check_dtype = np.float16\n # set delta as np.float16, will automatic convert to float32, float64\n delta = np.array(delta).astype(np.float16)\n elif tensor_to_check_dtype == core.VarDesc.VarType.BF16:\n tensor_to_check_dtype = np.float32\n elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX64:\n tensor_to_check_dtype = np.complex64\n elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX128:\n tensor_tp_check_dtype = np.complex128\n else:\n raise ValueError(\"Not supported data type \" + str(tensor_to_check_dtype)\n + \", tensor name : \" + str(input_to_check))\n\n def get_output():\n sum = []\n op.run(scope, place)\n for output_name in output_names:\n output_numpy = np.array(scope.find_var(output_name).get_tensor())\n # numpy.dtype does not have bfloat16, thus we use numpy.uint16 to\n # store bfloat16 data, and need to be converted to float to check\n # the floating precision.\n if tensor_to_check._dtype() == core.VarDesc.VarType.BF16:\n output_numpy = convert_uint16_to_float(output_numpy)\n sum.append(output_numpy.astype(tensor_to_check_dtype).mean())\n return tensor_to_check_dtype(np.array(sum).sum() / len(output_names))\n\n gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)\n\n def __get_elem__(tensor, i):\n if tensor_to_check_dtype == np.float16:\n numpy_tensor = np.array(tensor).astype(np.float16)\n numpy_tensor = numpy_tensor.flatten()\n return numpy_tensor[i]\n elif tensor_to_check._dtype() == core.VarDesc.VarType.BF16:\n numpy_tensor = np.array(tensor).astype(np.uint16)\n numpy_tensor = numpy_tensor.flatten()\n return struct.unpack('<f',\n struct.pack('<I',\n np.uint32(numpy_tensor[i])\n << np.uint32(16)))[0]\n elif tensor_to_check_dtype == np.float32:\n return tensor._get_float_element(i)\n elif tensor_to_check_dtype == np.float64:\n return tensor._get_double_element(i)\n else:\n raise TypeError(\"Unsupported test data type %s.\" %\n tensor_to_check_dtype)\n\n def __set_elem__(tensor, i, e):\n if tensor_to_check_dtype == np.float16:\n numpy_tensor = np.array(tensor).astype(np.float16)\n shape = numpy_tensor.shape\n numpy_tensor = numpy_tensor.flatten()\n numpy_tensor[i] = e\n numpy_tensor = numpy_tensor.reshape(shape)\n tensor.set(numpy_tensor, place)\n elif tensor_to_check._dtype() == core.VarDesc.VarType.BF16:\n numpy_tensor = np.array(tensor).astype(np.uint16)\n shape = numpy_tensor.shape\n numpy_tensor = numpy_tensor.flatten()\n numpy_tensor[i] = np.uint16(copy_bits_from_float_to_uint16(e))\n numpy_tensor = numpy_tensor.reshape(shape)\n tensor.set(numpy_tensor, place)\n elif tensor_to_check_dtype == np.float32:\n tensor._set_float_element(i, e)\n elif tensor_to_check_dtype == np.float64:\n tensor._set_double_element(i, e)\n else:\n raise TypeError(\"Unsupported test data type %s.\" %\n tensor_to_check_dtype)\n\n # we only compute gradient of one element each time.\n # we use a for loop to compute the gradient of every element.\n for i in six.moves.xrange(tensor_size):\n if in_place:\n set_input(scope, op, inputs, place)\n\n # get one input element throw it's index i.\n origin = __get_elem__(tensor_to_check, i)\n # add delta to it, run op and then get the sum of the result tensor.\n x_pos = origin + delta\n __set_elem__(tensor_to_check, i, x_pos)\n y_pos = get_output()\n\n if in_place:\n set_input(scope, op, inputs, place)\n\n x_neg = origin - delta\n __set_elem__(tensor_to_check, i, x_neg)\n y_neg = get_output()\n\n __set_elem__(tensor_to_check, i, origin)\n gradient_flat[i] = (y_pos - y_neg) / delta / 2\n\n return gradient_flat.reshape(tensor_to_check.shape())\n\n\ndef skip_check_grad_ci(reason=None):\n \"\"\"Decorator to skip check_grad CI.\n\n Check_grad is required for Op test cases. However, there are some special\n cases that do not need to do check_grad. This decorator is used to skip the\n check_grad of the above cases.\n\n Note: the execution of unit test will not be skipped. It just avoids check_grad\n checking in tearDownClass method by setting a `no_need_check_grad` flag.\n\n Example:\n @skip_check_grad_ci(reason=\"For inference, check_grad is not required.\")\n class TestInference(OpTest):\n \"\"\"\n if not isinstance(reason, str):\n raise AssertionError(\"The reason for skipping check_grad is required.\")\n\n def wrapper(cls):\n cls.no_need_check_grad = True\n return cls\n\n return wrapper\n\n\ndef copy_bits_from_float_to_uint16(f):\n return struct.unpack('<I', struct.pack('<f', f))[0] >> 16\n\n\ndef convert_float_to_uint16(float_list, data_format=\"NCHW\"):\n if data_format == \"NHWC\":\n float_list = np.transpose(float_list, [0, 3, 1, 2])\n\n new_output = []\n for x in np.nditer(float_list):\n new_output.append(np.uint16(copy_bits_from_float_to_uint16(x)))\n new_output = np.reshape(new_output, float_list.shape).view(np.uint16)\n\n if data_format == \"NHWC\":\n new_output = np.transpose(new_output, [0, 2, 3, 1])\n return new_output\n\n\ndef convert_uint16_to_float(in_list):\n in_list = np.asarray(in_list)\n out = np.vectorize(\n lambda x: struct.unpack('<f', struct.pack('<I', np.uint32(x) << np.uint32(16)))[0],\n otypes=[np.float32])(in_list.flat)\n return np.reshape(out, in_list.shape)\n\n\nclass OpTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n '''Fix random seeds to remove randomness from tests'''\n cls._np_rand_state = np.random.get_state()\n cls._py_rand_state = random.getstate()\n cls.call_once = False\n cls.dtype = None\n cls.outputs = {}\n cls.input_shape_is_large = True\n\n np.random.seed(123)\n random.seed(124)\n\n if paddle.is_compiled_with_npu():\n cls._use_system_allocator = _set_use_system_allocator(False)\n else:\n cls._use_system_allocator = _set_use_system_allocator(True)\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Restore random seeds\"\"\"\n np.random.set_state(cls._np_rand_state)\n random.setstate(cls._py_rand_state)\n\n _set_use_system_allocator(cls._use_system_allocator)\n\n def is_empty_grad_op(op_type):\n all_op_kernels = core._get_all_register_op_kernels()\n grad_op = op_type + '_grad'\n if grad_op in all_op_kernels.keys():\n if is_mkldnn_op_test():\n grad_op_kernels = all_op_kernels[grad_op]\n for grad_op_kernel in grad_op_kernels:\n if 'MKLDNN' in grad_op_kernel:\n return False\n else:\n return False\n return True\n\n def is_xpu_op_test():\n return hasattr(cls, \"use_xpu\") and cls.use_xpu == True\n\n def is_mkldnn_op_test():\n return hasattr(cls, \"use_mkldnn\") and cls.use_mkldnn == True\n\n def is_rocm_op_test():\n return core.is_compiled_with_rocm()\n\n def is_npu_op_test():\n return hasattr(cls, \"use_npu\") and cls.use_npu == True\n\n def is_mlu_op_test():\n return hasattr(cls, \"use_mlu\") and cls.use_mlu == True\n\n def is_custom_device_op_test():\n return hasattr(\n cls, \"use_custom_device\") and cls.use_custom_device == True\n\n if not hasattr(cls, \"op_type\"):\n raise AssertionError(\n \"This test do not have op_type in class attrs, \"\n \"please set self.__class__.op_type=the_real_op_type manually.\")\n\n # case in NO_FP64_CHECK_GRAD_CASES and op in NO_FP64_CHECK_GRAD_OP_LIST should be fixed\n if not hasattr(cls, \"no_need_check_grad\") \\\n and not is_empty_grad_op(cls.op_type):\n if cls.dtype is None or \\\n (cls.dtype == np.float16 \\\n and cls.op_type not in op_accuracy_white_list.NO_FP16_CHECK_GRAD_OP_LIST \\\n and not hasattr(cls, \"exist_check_grad\")):\n raise AssertionError(\"This test of %s op needs check_grad.\" %\n cls.op_type)\n\n # check for op test with fp64 precision, but not check mkldnn op test for now\n if cls.dtype in [np.float32, np.float64] \\\n and cls.op_type not in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST \\\n and not hasattr(cls, 'exist_fp64_check_grad') \\\n and not is_xpu_op_test() \\\n and not is_mkldnn_op_test() \\\n and not is_rocm_op_test() \\\n and not is_npu_op_test() \\\n and not is_mlu_op_test() \\\n and not is_custom_device_op_test():\n raise AssertionError(\n \"This test of %s op needs check_grad with fp64 precision.\" %\n cls.op_type)\n\n if not cls.input_shape_is_large \\\n and cls.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST:\n raise AssertionError(\n \"Input's shape should be large than or equal to 100 for \" +\n cls.op_type + \" Op.\")\n\n def try_call_once(self, data_type):\n if not self.call_once:\n self.call_once = True\n self.dtype = data_type\n\n def is_bfloat16_op(self):\n # self.dtype is the dtype of inputs, and is set in infer_dtype_from_inputs_outputs.\n # Make sure this function is called after calling infer_dtype_from_inputs_outputs.\n return self.dtype == np.uint16 or (\n hasattr(self, 'output_dtype') and\n self.output_dtype == np.uint16) or (\n hasattr(self, 'mkldnn_data_type') and\n getattr(self, 'mkldnn_data_type') == \"bfloat16\") or (\n hasattr(self, 'attrs') and\n 'mkldnn_data_type' in self.attrs and\n self.attrs['mkldnn_data_type'] == 'bfloat16')\n\n def is_mkldnn_op(self):\n return (hasattr(self, \"use_mkldnn\") and self.use_mkldnn == True) or (\n hasattr(self, \"attrs\") and \"use_mkldnn\" in self.attrs and\n self.attrs[\"use_mkldnn\"] == True)\n\n def is_xpu_op(self):\n return (hasattr(self, \"use_xpu\") and self.use_xpu == True) or (\n hasattr(self, \"attrs\") and \"use_xpu\" in self.attrs and\n self.attrs[\"use_xpu\"] == True)\n\n # set the self.output_dtype .\n def infer_dtype_from_inputs_outputs(self, inputs, outputs):\n def is_np_data(input):\n return isinstance(input, (np.ndarray, np.generic))\n\n def infer_dtype(numpy_dict, dtype_set):\n assert isinstance(\n numpy_dict,\n dict), \"self.inputs, self.outputs must be numpy_dict\"\n # the inputs are as follows:\n # case 1: inputs = {'X': x}\n # case 2: inputs = {'X': (x, x_lod)}\n # case 3: inputs = {\"X\": [(\"x0\", x0), (\"x1\", x1), (\"x2\", x2)]}\n # case 4: inputs = {'X': [(\"x1\", (x1, [x1_lod1])), (\"x2\", (x2, [x2_.lod2]))]}\n # TODO(juncaipeng) infer dtype from inputs maybe obtain wrong type.\n for _, var_value in six.iteritems(numpy_dict):\n if is_np_data(var_value): # case 1\n dtype_set.add(var_value.dtype)\n elif isinstance(var_value, (list, tuple)): # case 2, 3, 4\n for sub_val_value in var_value:\n if is_np_data(sub_val_value): # case 2\n dtype_set.add(sub_val_value.dtype)\n elif len(sub_val_value) > 1 and is_np_data(\n sub_val_value[1]): # case 3\n dtype_set.add(sub_val_value[1].dtype)\n elif len(sub_val_value) > 1 and isinstance(sub_val_value[1], (list, tuple)) \\\n and is_np_data(sub_val_value[1][0]): # case 4\n dtype_set.add(sub_val_value[1][0].dtype)\n\n # infer dtype from inputs, and dtype means the precision of the test\n # collect dtype of all inputs\n input_dtype_set = set()\n infer_dtype(inputs, input_dtype_set)\n dtype_list = [\n np.dtype(np.float64), np.dtype(np.float32), np.dtype(np.float16),\n np.dtype(np.int64), np.dtype(np.int32), np.dtype(np.uint16),\n np.dtype(np.int16), np.dtype(np.int8), np.dtype(np.uint8),\n np.dtype(np.bool)\n ]\n # check the dtype in dtype_list in order, select the first dtype that in dtype_set\n for dtype in dtype_list:\n if dtype in input_dtype_set:\n self.dtype = dtype\n break\n # save input dtype in class attr\n self.__class__.dtype = self.dtype\n\n # infer dtype of outputs\n output_dtype_set = set()\n infer_dtype(outputs, output_dtype_set)\n for dtype in dtype_list:\n if dtype in output_dtype_set:\n self.output_dtype = dtype\n break\n\n def feed_var(self, input_vars, place):\n feed_map = {}\n for var_name in input_vars:\n if isinstance(input_vars[var_name], list):\n for name, np_value in self.inputs[var_name]:\n tensor = core.LoDTensor()\n if isinstance(np_value, tuple):\n tensor.set(np_value[0], place)\n tensor.set_recursive_sequence_lengths(np_value[1])\n else:\n tensor.set(np_value, place)\n feed_map[name] = tensor\n else:\n tensor = core.LoDTensor()\n if isinstance(self.inputs[var_name], tuple):\n tensor.set(self.inputs[var_name][0], place)\n tensor.set_recursive_sequence_lengths(self.inputs[var_name][\n 1])\n else:\n tensor.set(self.inputs[var_name], place)\n feed_map[var_name] = tensor\n return feed_map\n\n def _append_ops(self, block):\n self.__class__.op_type = self.op_type # for ci check, please not delete it for now\n if self.is_mkldnn_op():\n self.__class__.use_mkldnn = True\n\n if self.is_xpu_op():\n self.__class__.use_xpu = True\n\n op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)\n \"infer datatype from inputs and outputs for this test case\"\n if self.is_bfloat16_op():\n self.dtype = np.uint16\n self.__class__.dtype = self.dtype\n self.output_dtype = np.uint16\n else:\n self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)\n inputs = append_input_output(block, op_proto, self.inputs, True,\n self.dtype)\n outputs = append_input_output(block, op_proto, self.outputs, False,\n self.dtype)\n\n if hasattr(self, \"cache_name_list\"):\n for name in self.cache_name_list:\n inputs[name] = block.create_var(\n name=name,\n persistable=True,\n type=core.VarDesc.VarType.RAW,\n stop_gradient=True)\n\n op = block.append_op(\n type=self.op_type,\n inputs=inputs,\n outputs=outputs,\n attrs=copy(self.attrs) if hasattr(self, \"attrs\") else dict())\n # infer variable type and infer shape in compile-time\n op.desc.infer_var_type(block.desc)\n op.desc.infer_shape(block.desc)\n\n return op\n\n def _get_io_vars(self, block, numpy_inputs):\n inputs = {}\n for name, value in six.iteritems(numpy_inputs):\n if isinstance(value, list):\n var_list = [\n block.var(sub_name) for sub_name, sub_value in value\n ]\n inputs[name] = var_list\n else:\n inputs[name] = block.var(name)\n return inputs\n\n def _get_inputs(self, block):\n return self._get_io_vars(block, self.inputs)\n\n def _get_outputs(self, block):\n return self._get_io_vars(block, self.outputs)\n\n def calc_output(self, place):\n outs, _ = self._calc_output(place)\n return outs\n\n def _create_var_from_numpy(self, value):\n if isinstance(value, tuple):\n data = value[0]\n lod = value[1]\n v = fluid.dygraph.base.to_variable(value=data)\n v.value().get_tensor().set_recursive_sequence_lengths(lod)\n return v\n else:\n return fluid.dygraph.base.to_variable(value)\n\n def get_sequence_batch_size_1_input(self, lod=None, shape=None):\n \"\"\"Get LoD input data whose batch size is 1.\n All sequence related OP unittests should call this function to contain the case of batch size = 1.\n Args:\n lod (list[list of int], optional): Length-based LoD, length of lod[0] should be 1. Default: [[13]].\n shape (list, optional): Shape of input, shape[0] should be equals to lod[0][0]. Default: [13, 23].\n Returns:\n tuple (ndarray, lod) : LoD input data whose batch size is 1.\n \"\"\"\n if lod is None:\n lod = [[13]]\n if shape is None:\n shape = [13, 23]\n assert len(lod[0]) == 1\n assert lod[0][0] == shape[0]\n x = np.random.uniform(0.1, 1, shape).astype('float32')\n return (x, lod)\n\n def lod_has_single_zero(self, lod):\n for i in range(len(lod) - 2):\n if lod[i] != 0 and lod[i + 1] == 0 and lod[i + 2] != 0:\n return True\n return False\n\n def lod_has_continuous_zero(self, lod):\n for i in range(len(lod) - 3):\n if lod[i] != 0 and lod[i + 1] == 0 and lod[i + 2] == 0 and lod[\n i + 3] != 0:\n return True\n return False\n\n def get_sequence_instance_size_0_input(self, lod=None, shape=None):\n \"\"\"Get LoD input data whose instance size is 0.\n All sequence related OP unittests should call this function to contain the case of instance size is 0.\n Args:\n lod (list[list of int], optional): Length-based LoD, lod[0]'s size must at least eight, lod[0] must at least two zeros at the beginning and at least two zeros at the end, the middle position of lod[0] contains a single zero and multiple zero. Default: [[0, 0, 4, 0, 3, 0, 0, 5, 0, 0]].\n shape (list, optional): Shape of input, shape[0] should be equals to lod[0][0]. Default: [13, 23].\n Returns:\n tuple (ndarray, lod): LoD input data whose instance size is 0.\n \"\"\"\n if lod is None:\n lod = [[0, 0, 4, 0, 3, 0, 0, 5, 0, 0]]\n if shape is None:\n shape = [12, 10]\n assert len(lod[0]) >= 8\n assert lod[0][0] == 0 and lod[0][1] == 0 and lod[0][-1] == 0 and lod[0][\n -2] == 0\n assert self.lod_has_single_zero(lod[0]) is True\n assert self.lod_has_continuous_zero(lod[0]) is True\n assert sum(lod[0]) == shape[0]\n\n x = np.random.uniform(0.1, 1, shape).astype('float32')\n return (x, lod)\n\n def append_input_output_for_dygraph(self, op_proto, np_list, is_input,\n if_return_inputs_grad_dict, block):\n def create_var(np_value, name, is_input, if_return_inputs_grad_dict):\n np_value_temp = np_value\n has_lod = False\n lod_temp = None\n if isinstance(np_value, tuple):\n np_value_temp = np_value[0]\n has_lod = True\n lod_temp = np_value[1]\n\n if is_input:\n v = self._create_var_from_numpy(np_value_temp)\n\n if if_return_inputs_grad_dict:\n v.stop_gradient = False\n if not _in_legacy_dygraph():\n v.retain_grads()\n\n if has_lod:\n v.value().get_tensor().set_recursive_sequence_lengths(\n lod_temp)\n else:\n v = block.create_var(\n name=name,\n dtype=np_value_temp.dtype,\n type=core.VarDesc.VarType.LOD_TENSOR,\n persistable=False,\n stop_gradient=False)\n return v\n\n # prepare variable for input or output\n var_dict = defaultdict(list)\n if if_return_inputs_grad_dict:\n inputs_grad_dict = defaultdict()\n proto_list = op_proto.inputs if is_input else op_proto.outputs\n for var_proto in proto_list:\n name = var_proto.name\n if (name not in np_list) and var_proto.dispensable:\n continue\n if name not in np_list:\n assert var_proto.intermediate, \"{} not found\".format(name)\n v = block.create_var(\n dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR)\n var_dict[name].append(v)\n if if_return_inputs_grad_dict:\n inputs_grad_dict[name] = v\n continue\n if var_proto.duplicable:\n assert isinstance(\n np_list[name],\n list), \"Duplicable {} should be set as list\".format(name)\n var_list = []\n slot_name = name\n for (name, np_value) in np_list[name]:\n v = create_var(np_value, name, is_input,\n if_return_inputs_grad_dict)\n var_list.append(v)\n if if_return_inputs_grad_dict:\n inputs_grad_dict[name] = v\n var_dict[slot_name] = var_list\n else:\n nplist_value_temp = None\n name_temp = None\n if isinstance(np_list[name], list):\n nplist_value_temp = np_list[name][0]\n name_temp = name\n else:\n nplist_value_temp = np_list[name]\n name_temp = unique_name.generate(\"%s_out\" % (name))\n v = create_var(nplist_value_temp, name_temp, is_input,\n if_return_inputs_grad_dict)\n var_dict[name].append(v)\n if if_return_inputs_grad_dict:\n inputs_grad_dict[name] = v\n\n if if_return_inputs_grad_dict:\n return var_dict, inputs_grad_dict\n else:\n return var_dict\n\n def _check_api_outs_by_dygraph_outs(self, api_outs, dygraph_outs, place):\n \"\"\" for quick verify, here we take a simplest strategy:\n 1. we only check variable in api_outs.\n 2. we simply check the numpy (tensor) .\n 3. we set atol and rtol as 1e-5, because they are unrelated to dtype.\n \"\"\"\n for name in api_outs:\n np_api = np.array(api_outs[name])\n np_dyg = np.array(dygraph_outs[name])\n self.assertTrue(\n np.allclose(\n np_api, np_dyg, equal_nan=False),\n \"Output (\" + name + \") has diff at \" + str(place) + \"\\nExpect \"\n + str(np_dyg) + \"\\n\" + \"But Got\" + str(np_api) + \" in class \" +\n self.__class__.__name__)\n\n def _calc_python_api_output(self, place, egr_inps=None, egr_oups=None):\n \"\"\" set egr_inps and egr_oups = None if you want to create it by yourself.\n \"\"\"\n\n def prepare_python_api_arguments(api, op_proto_ins, op_proto_attrs,\n kernel_sig):\n \"\"\" map from `op proto inputs and attrs` to `api input list and api attrs dict`\n \n NOTE: the op_proto_attrs and op_proto_ins is a default dict. default value is []\n \"\"\"\n\n class Empty:\n pass\n\n def is_empty(a):\n return isinstance(a, Empty)\n\n def get_default(idx, defaults):\n assert not isinstance(\n defaults[idx], Empty\n ), \"%d-th params of python api don't have default value.\" % idx\n return defaults[idx]\n\n def to_defaults_list(params, defaults):\n return [defaults[p] for p in params if p in defaults]\n\n def parse_attri_value(name, op_inputs, op_attrs):\n \"\"\" parse true value from inputs and attrs, if there is no name passed by OpTest, return Empty\n 1. if the name in op_attrs, use the op_attrs[name]\n 2. if the name in op_inputs, convert the op_inputs to [type of default value]\n 3. if the name not in op_attrs ans op_inputs, return Empty. (this will use the default value from python api)\n \"\"\"\n if name in op_proto_attrs:\n return op_proto_attrs[name]\n elif name in op_inputs:\n if len(op_inputs[name]) == 1:\n # why don't use numpy().item() : if the Tensor is float64, we will change it to python.float32, where we loss accuracy: [allclose_op]\n # why we reconstruct a tensor: because we want the tensor in cpu. \n return paddle.to_tensor(\n op_inputs[name][0].numpy(), place='cpu')\n else:\n # if this is a list (test_unsqueeze2_op): we just pass it into the python api.\n return op_inputs[name]\n else:\n return Empty()\n\n # NOTE(xiongkun): the logic of constructing parameters: \n # for example: \n # python api: cumprod(x, dim, dtype=None, name=None) \n # kernel sig: [[\"x\"], [\"dim\"], [\"out\"]]\"\n #\n # we will construct a lot of list with the same length : len == len(api_params), here is 4\n # api_params = [\"x\", \"dim\", \"dtype\", \"name\"]\n # api_defaults = [Empty, Empty, None, None]; empty means no defaults.\n # inputs_and_attrs = [\"x\", \"dim\"] , the length may shorter or longer than api_params\n # input_arguments = [RealValue in self.inputs and self.attrs]\n # then ,we will loop for the api_params, construct a result list: \n # if the name in ['name', 'dtype', 'out', 'output'], we will use the default value\n # else, we will consume a input_arguments. (because the name is not corresponding, so we only use the order)\n\n api_params, api_defaults = parse_arg_and_kwargs(api)\n api_defaults = to_defaults_list(api_params, api_defaults)\n api_defaults = [\n Empty() for i in range(len(api_params) - len(api_defaults))\n ] + api_defaults\n assert len(api_defaults) == len(\n api_params), \"Error happens. contack xiongkun03 to solve.\"\n inputs_sig, attrs_sig, outputs_sig = kernel_sig\n inputs_and_attrs = inputs_sig + attrs_sig\n input_arguments = [\n op_proto_ins.get(name, Empty()) for name in inputs_sig\n ] + [\n parse_attri_value(name, op_proto_ins, op_proto_attrs)\n for name in attrs_sig\n ]\n results = []\n api_ignore_param_list = set(['name', 'dtype', 'out', 'output'])\n idx_of_op_proto_arguments = 0\n for idx, arg_name in enumerate(api_params):\n if arg_name in api_ignore_param_list:\n results.append(get_default(idx, api_defaults))\n else:\n if (idx_of_op_proto_arguments < len(input_arguments)):\n tmp = input_arguments[idx_of_op_proto_arguments]\n idx_of_op_proto_arguments += 1\n else:\n tmp = Empty() # use the default value\n\n if isinstance(tmp, Empty):\n results.append(get_default(idx, api_defaults))\n else:\n results.append(tmp)\n assert len(results) == len(api_params)\n return results\n\n def construct_output_dict_by_kernel_sig(ret_tuple, output_sig):\n if hasattr(self, \"python_out_sig\"):\n output_sig = self.python_out_sig\n if not isinstance(ret_tuple, (tuple, list)):\n ret_tuple = [ret_tuple]\n if len(output_sig) == len(ret_tuple):\n # [assumption]: we assume {\"Out\": [Tensor]}\n return {a: [b] for a, b in zip(output_sig, ret_tuple)}\n else:\n # [assumption]: return multi-Tensor in a single output. such as paddle.split()\n assert len(\n output_sig\n ) == 1, \"Don't support multi-output with multi-tensor output. (May be you can use set `python_out_sig`, see `test_squeeze2_op` as a example.)\"\n return {output_sig[0]: ret_tuple}\n\n def assumption_assert_and_transform(args, inp_num):\n \"\"\"\n transform inputs by the following rules:\n 1. [Tensor] -> Tensor\n 2. [Tensor, Tensor, ...] -> list of Tensors\n 3. None -> None\n 4. Others: raise Error\n\n only support \"X\" is list of Tensor, currently don't support other structure like dict.\n \"\"\"\n inp_args = [[inp] if inp is None else inp\n for inp in args[:inp_num]] # convert None -> [None]\n for inp in inp_args:\n assert isinstance(\n inp, list\n ), \"currently only support `X` is [Tensor], don't support other structure.\"\n args = [inp[0] if len(inp) == 1 else inp\n for inp in inp_args] + args[inp_num:]\n return args\n\n def _get_kernel_signature(eager_tensor_inputs, eager_tensor_outputs,\n attrs_outputs):\n try:\n kernel_sig = _dygraph_tracer()._get_kernel_signature(\n self.op_type, eager_tensor_inputs, eager_tensor_outputs,\n attrs_outputs)\n except RuntimeError as re:\n \"\"\" we think the kernel_sig is missing.\n \"\"\"\n kernel_sig = None\n print(\n \"[Warning: op_test.py] Kernel Signature is not found for %s, fall back to intermediate state.\"\n % self.op_type)\n return kernel_sig\n\n def cal_python_api(python_api, args, kernel_sig):\n inputs_sig, attrs_sig, outputs_sig = kernel_sig\n args = assumption_assert_and_transform(args, len(inputs_sig))\n ret_tuple = python_api(*args)\n return construct_output_dict_by_kernel_sig(ret_tuple, outputs_sig)\n\n with fluid.dygraph.base.guard(place=place):\n block = fluid.default_main_program().global_block()\n op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)\n # prepare input variable\n eager_tensor_inputs = egr_inps if egr_inps else self.append_input_output_for_dygraph(\n op_proto, self.inputs, True, False, block)\n # prepare output variable\n eager_tensor_outputs = egr_oups if egr_oups else self.append_input_output_for_dygraph(\n op_proto, self.outputs, False, False, block)\n\n # prepare attributes\n attrs_outputs = {}\n if hasattr(self, \"attrs\"):\n for attrs_name in self.attrs:\n if self.attrs[attrs_name] is not None:\n attrs_outputs[attrs_name] = self.attrs[attrs_name]\n\n kernel_sig = _get_kernel_signature(\n eager_tensor_inputs, eager_tensor_outputs, attrs_outputs)\n if not kernel_sig:\n return None\n assert hasattr(\n self, \"python_api\"\n ), \"Detect there is KernelSignature for `%s` op, please set the `self.python_api` if you set check_eager = True\" % self.op_type\n args = prepare_python_api_arguments(\n self.python_api, eager_tensor_inputs, attrs_outputs, kernel_sig)\n \"\"\" we directly return the cal_python_api value because the value is already tensor. \n \"\"\"\n return cal_python_api(self.python_api, args, kernel_sig)\n\n def _calc_dygraph_output(self, place, parallel=False, no_check_set=None):\n self.__class__.op_type = self.op_type # for ci check, please not delete it for now\n with fluid.dygraph.base.guard(place=place):\n block = fluid.default_main_program().global_block()\n\n op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)\n\n # prepare input variable\n inputs = self.append_input_output_for_dygraph(op_proto, self.inputs,\n True, False, block)\n # prepare output variable\n outputs = self.append_input_output_for_dygraph(\n op_proto, self.outputs, False, False, block)\n\n # prepare attributes\n attrs_outputs = {}\n if hasattr(self, \"attrs\"):\n for attrs_name in self.attrs:\n if self.attrs[attrs_name] is not None:\n attrs_outputs[attrs_name] = self.attrs[attrs_name]\n\n block.append_op(\n type=self.op_type,\n inputs=inputs,\n outputs=outputs,\n attrs=attrs_outputs if hasattr(self, \"attrs\") else None)\n return outputs\n\n def _calc_output(self,\n place,\n parallel=False,\n no_check_set=None,\n loss=None,\n enable_inplace=None,\n for_inplace_test=None):\n program = Program()\n block = program.global_block()\n op = self._append_ops(block)\n\n inputs = self._get_inputs(block)\n outputs = self._get_outputs(block)\n feed_map = self.feed_var(inputs, place)\n\n if for_inplace_test:\n # Some variables' tensors hold no buffer (tensor's _holder is NULL), like XShape in reshape2 op,\n # and the shapes of those variables contain 0 (eg. Xshape.shape = [0, 2, 5]).\n # Set persistable for those variables in order to get them from global_scope for inplace grad test directly other than feed them,\n # since feed op calls check_memory_size() which fails when tensor's holder_ is NULL.\n for out_name in op.output_arg_names:\n var = block.var(out_name)\n if 0 in var.shape:\n var.persistable = True\n original_program = program\n if parallel:\n use_cuda = False\n if isinstance(place, fluid.CUDAPlace):\n use_cuda = True\n compiled_prog = fluid.CompiledProgram(program).with_data_parallel(\n loss_name=loss.name if loss else None, places=place)\n program = compiled_prog\n fetch_list = getattr(self, \"fetch_list\", [])\n # if the fetch_list is customized by user, we use it directly.\n # if not, fill the fetch_list by the user configured outputs in test.\n if len(fetch_list) == 0:\n for var_name, var in six.iteritems(outputs):\n if no_check_set is not None and var_name in no_check_set:\n continue\n if isinstance(var, list):\n for v in var:\n fetch_list.append(v.name)\n else:\n fetch_list.append(var.name)\n # if the fetch_list still empty, fill the fetch_list by the operator output.\n if len(fetch_list) == 0:\n for out_name, out_dup in Operator.get_op_outputs(self.op_type):\n fetch_list.append(str(out_name))\n\n if enable_inplace is not None:\n build_strategy = fluid.BuildStrategy()\n build_strategy.enable_inplace = enable_inplace\n\n compiled_prog = fluid.CompiledProgram(program).with_data_parallel(\n build_strategy=build_strategy, places=place)\n program = compiled_prog\n\n executor = Executor(place)\n outs = executor.run(program,\n feed=feed_map,\n fetch_list=fetch_list,\n return_numpy=False)\n self.op = op\n self.program = original_program\n if for_inplace_test:\n return outs, fetch_list, feed_map, original_program, op.desc\n else:\n return outs, fetch_list\n\n def _compare_expect_and_actual_outputs(self,\n place,\n fetch_list,\n expect_outs,\n actual_outs,\n inplace_atol=None):\n \"\"\"Compare expect outs and actual outs of an tested op.\n\n Args:\n place (CPUPlace | CUDAPlace): The place where the op runs.\n fetch_list (list): The outputs of tested op.\n expect_outs (list): The expect outs of tested op.\n actual_outs (list): The actual outs of tested op.\n inplace_atol (float): The tolerable error, only set when tested op doesn't ensure computational consistency, like group_norm op.\n\n Returns:\n None.\n \"\"\"\n # compare expect_outs and actual_outs\n for i, name in enumerate(fetch_list):\n # Note(zhiqiu): inplace_atol should be only set when op doesn't ensure\n # computational consistency.\n # When inplace_atol is not None, the inplace check uses numpy.allclose\n # to check inplace result instead of numpy.array_equal.\n expect_out = np.array(expect_outs[i])\n actual_out = np.array(actual_outs[i])\n if inplace_atol is not None:\n self.assertTrue(\n np.allclose(\n expect_out, actual_out, atol=inplace_atol),\n \"Output (\" + name + \") has diff at \" + str(place) +\n \" when using and not using inplace\" + \"\\nExpect \" +\n str(expect_out) + \"\\n\" + \"But Got\" + str(actual_out) +\n \" in class \" + self.__class__.__name__)\n else:\n self.assertTrue(\n np.array_equal(expect_out, actual_out),\n \"Output (\" + name + \") has diff at \" + str(place) +\n \" when using and not using inplace\" + \"\\nExpect \" +\n str(expect_out) + \"\\n\" + \"But Got\" + str(actual_out) +\n \" in class \" + self.__class__.__name__ + '\\n')\n\n def _construct_grad_program_from_forward(self, fwd_program, grad_op_desc,\n op_grad_to_var):\n \"\"\"Generate grad_program which contains the grad_op.\n\n Args:\n fwd_program (tuple): The program that contains grad_op_desc's corresponding forward op.\n grad_op_desc (OpDesc): The OpDesc of grad op.\n op_grad_to_var (dict): The relation of variables in grad op and its forward op.\n\n Returns:\n grad_program (program): The program which contains the grad_op.\n \"\"\"\n grad_program = Program()\n grad_block = grad_program.global_block()\n new_op_desc = grad_block.desc.append_op()\n new_op_desc.copy_from(grad_op_desc)\n grad_program._sync_with_cpp()\n\n # Create grad vars based on fwd vars (shape and dtype)\n for arg in grad_op_desc.input_arg_names(\n ) + grad_op_desc.output_arg_names():\n fwd_var_name = op_grad_to_var.get(arg, None)\n if fwd_var_name is None:\n fwd_var_name = arg\n fwd_var = fwd_program.global_block().vars.get(fwd_var_name)\n assert fwd_var is not None, \"{} cannot be found\".format(\n fwd_var_name)\n grad_var = grad_block.create_var(\n name=arg,\n dtype=fwd_var.dtype,\n shape=fwd_var.shape,\n type=fwd_var.type,\n persistable=False)\n\n # Some variables' tensors hold no buffer (tensor's _holder is NULL), like XShape in reshape2 op,\n # and the shapes of those variables contain 0 (eg. Xshape.shape = [0, 2, 5]).\n # Set persistable for those variables in order to get them from global_scope for inplace grad test directly other than feed them,\n # since feed op calls check_memory_size() which fails when tensor's holder_ is NULL.\n if 0 in grad_var.shape:\n grad_var.persistable = True\n grad_program._sync_with_cpp()\n return grad_program\n\n def _construct_grad_feed_map_from_forward(self, place, fwd_res,\n grad_op_desc, op_grad_to_var):\n \"\"\"Generate grad_feed_map for grad_program.\n\n since we don`t really check gradient accuracy, but check the consistency when using and not using inplace,\n we use fwd outs (also inputs sometimes) to construct grad inputs.\n\n Args:\n place (CPUPlace | CUDAPlace): The place where the op runs.\n fwd_res (tuple): The outputs of its forward op, in the same form as returns of _calc_outputs() when for_inplace_test is True.\n i.e., tuple(fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc)\n grad_op_desc (OpDesc): The OpDesc of grad op.\n op_grad_to_var (dict): The relation of variables in grad op and its fwd_op.\n\n Returns:\n grad_feed_map (dict): The feed_map of grad_op.\n \"\"\"\n fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc = fwd_res\n p = core.Place()\n p.set_place(place)\n grad_feed_map = {}\n for arg in grad_op_desc.input_arg_names():\n if arg in fwd_feed_map.keys():\n grad_feed_map[arg] = fwd_feed_map[arg]._copy(p)\n else:\n fwd_var_name = op_grad_to_var.get(arg, None)\n if fwd_var_name is None:\n fwd_var_name = arg\n\n for i, out_name in enumerate(fwd_fetch_list):\n if out_name == fwd_var_name:\n # don't feed variables whose tensors hold no buffer (shape contains 0 like shape = [0,2,5] and holder_ is NULL), like XShape in reshape2 op.\n # get them from global_scope directly since we have set them persistable in fwd execution\n if 0 in fwd_program.global_block().var(out_name).shape:\n continue\n else:\n grad_feed_map[arg] = fwd_outs[i]._copy(p)\n return grad_feed_map\n\n def _get_need_run_ops(self, op_desc, fwd_op_desc=None):\n \"\"\"Postorder traversal of the 'grad' tree to get all ops that need to run during inplace test.\n An op needs to run druing inplace check if,\n (1) it has infer_inplace,\n (2) it has infer_inplace in its grad descendants. (since we need its outputs as to construct its grad's inputs)\n\n Args:\n op_desc (OpDesc): The op_desc of current op.\n fwd_op_desc (OpDesc): The op_desc of current op's forward op, None if current op has no forward op.\n Eg. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc.\n\n Returns:\n need_run_ops (list[(op_desc, fwd_op_desc)]): The ops that need to run during inplace test.\n \"\"\"\n need_run_ops = []\n visited_ops = []\n\n def _dfs_grad_op(op_desc, fwd_op_desc=None):\n visited_ops.append(op_desc.type())\n has_infer_inplace = fluid.core.has_infer_inplace(op_desc.type())\n has_grad_op_maker = fluid.core.has_grad_op_maker(op_desc.type())\n has_infer_inplace_in_grad_descendants = False\n if not has_grad_op_maker:\n has_infer_inplace_in_descendants = False\n else:\n # get grad_op_desc\n grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(\n op_desc, set(), [])\n if not grad_op_desc_list:\n has_infer_inplace_in_grad_descendants = False\n else:\n for i, grad_op_desc in enumerate(grad_op_desc_list):\n if grad_op_desc.type(\n ) not in visited_ops and _dfs_grad_op(\n grad_op_desc, fwd_op_desc=op_desc):\n has_infer_inplace_in_grad_descendants = True\n if has_infer_inplace or has_infer_inplace_in_grad_descendants:\n need_run_ops.append((op_desc, fwd_op_desc))\n return True\n else:\n return False\n\n _dfs_grad_op(op_desc, fwd_op_desc=fwd_op_desc)\n return need_run_ops\n\n def _check_forward_inplace(self,\n place,\n no_check_set=None,\n inplace_atol=None):\n \"\"\"Check the inplace correctness of given op (self.op_type).\n Run the op twice with same inputs, one enable inplace and another disable, compare their outputs.\n\n Args:\n place (CPUPlace | CUDAPlace): The place where the op runs.\n no_check_set (list): The names of outputs that needn't check, like XShape of reshape op.\n inplace_atol (float): The tolerable error, only set when op doesn't ensure computational consistency, like group_norm op.\n\n Returns:\n expect_res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given op.\n We return this to construct grad_program and grad_feed_map for grad inplace check.\n \"\"\"\n # _calc_output() returns in the form tuple(outs, fetch_list, feed_map, program, op_desc) when for_inplace_test=True.\n expect_res = self._calc_output(\n place,\n no_check_set=no_check_set,\n enable_inplace=False,\n for_inplace_test=True)\n actual_res = self._calc_output(\n place,\n no_check_set=no_check_set,\n enable_inplace=True,\n for_inplace_test=True)\n # compare expect_outs and actual_outs\n self._compare_expect_and_actual_outputs(\n place,\n expect_res[1],\n expect_res[0],\n actual_res[0],\n inplace_atol=inplace_atol)\n return expect_res\n\n def _calc_grad_output(self,\n place,\n fwd_res,\n grad_op_desc,\n enable_inplace=None):\n \"\"\"Calculate grad_output for given grad_op_desc.\n\n since we don`t really check gradient accuracy, but check the consistency when using and not using inplace,\n we use fwd outs (also inputs sometimes) to construct grad inputs.\n\n Args:\n place (CPUPlace | CUDAPlace): The place where the op runs.\n fwd_res (tuple): The outputs of its forward op, in the same form as returns of _calc_outputs() when for_inplace_test is True.\n i.e., tuple(fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc).\n grad_op_desc (OpDesc): The OpDesc of grad op.\n enable_inplace (bool): Enable inplace or not.\n\n Returns:\n res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given grad_op_desc.\n \"\"\"\n fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc = fwd_res\n grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(fwd_op_desc,\n set(), [])\n grad_program = self._construct_grad_program_from_forward(\n fwd_program, grad_op_desc, op_grad_to_var)\n grad_feed_map = self._construct_grad_feed_map_from_forward(\n place, fwd_res, grad_op_desc, op_grad_to_var)\n grad_fetch_list = grad_op_desc.output_arg_names()\n exe = Executor(place)\n program = grad_program\n if enable_inplace is not None:\n build_strategy = fluid.BuildStrategy()\n build_strategy.enable_inplace = enable_inplace\n compiled_program = fluid.CompiledProgram(\n grad_program).with_data_parallel(\n loss_name=\"\", build_strategy=build_strategy, places=place)\n program = compiled_program\n outs = exe.run(program,\n feed=grad_feed_map,\n fetch_list=grad_fetch_list,\n return_numpy=False)\n return outs, grad_fetch_list, grad_feed_map, grad_program, grad_op_desc\n\n def _check_grad_inplace(self,\n place,\n fwd_res,\n grad_op_desc,\n inplace_atol=None):\n \"\"\"Check the inplace correctness of given grad_op_desc.\n\n Run the grad op twice with same inputs, one enable inplace and another disable, compare their outputs.\n It works like _check_forward_inplace, but the way to construct program and feed_map differs.\n So we define a new function for grad, grad_grad, etc.\n\n Args:\n place (CPUPlace | CUDAPlace): The place where the op runs.\n fwd_res (tuple): The outputs of its forward op, in the same form as returns of _calc_outputs() when for_inplace_test is True.\n i.e., tuple(fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc).\n grad_op_desc (OpDesc): The OpDesc of grad op.\n inplace_atol (float): The tolerable error, only set when op doesn't ensure computational consistency, like group_norm op.\n\n Returns:\n expect_res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given op.\n We return this to construct grad_program and grad_feed_map for grad inplace check.\n \"\"\"\n expect_res = self._calc_grad_output(\n place, fwd_res, grad_op_desc, enable_inplace=False)\n actual_res = self._calc_grad_output(\n place, fwd_res, grad_op_desc, enable_inplace=True)\n self._compare_expect_and_actual_outputs(\n place,\n expect_res[1],\n expect_res[0],\n actual_res[0],\n inplace_atol=inplace_atol)\n return expect_res\n\n def check_inplace_output_with_place(self,\n place,\n no_check_set=None,\n inplace_atol=None):\n \"\"\"Chech the inplace correctness of given op, its grad op, its grad_grad op, etc.\n\n (1) Get all ops need to run. (see conditions in _get_need_run_ops())\n (2) Run op in need_run_ops, and do inplace check if it has infer_inplace.\n\n Args:\n place (CPUPlace | CUDAPlace): The place where the op runs.\n no_check_set (list): The names of outputs that needn't check, like XShape of reshape op.\n inplace_atol (float): The tolerable error, only set when op doesn't ensure computational consistency, like group_norm op.\n\n Returns:\n None\n \"\"\"\n has_infer_inplace = fluid.core.has_infer_inplace(self.op_type)\n has_grad_op_maker = fluid.core.has_grad_op_maker(self.op_type)\n\n fwd_res = self._calc_output(\n place, no_check_set=no_check_set, for_inplace_test=True)\n op_desc = fwd_res[4]\n need_run_ops = self._get_need_run_ops(op_desc)\n\n res = {}\n if hasattr(self, 'attrs') and bool(self.attrs.get('use_xpu', False)):\n return\n for op_desc, father_op_desc in reversed(need_run_ops):\n # The first one is the forward op\n has_infer_inplace = fluid.core.has_infer_inplace(op_desc.type())\n if op_desc.type() == self.op_type:\n if has_infer_inplace:\n res[op_desc] = self._check_forward_inplace(\n place,\n no_check_set=no_check_set,\n inplace_atol=inplace_atol)\n else:\n res[op_desc] = self._calc_output(\n place, no_check_set=no_check_set, for_inplace_test=True)\n else:\n # TODO(zhiqiu): enhance inplace_grad test for ops (sum and activation) using mkldnn\n # skip op that use_mkldnn currently\n flags_use_mkldnn = fluid.core.globals()[\"FLAGS_use_mkldnn\"]\n attrs_use_mkldnn = hasattr(\n self,\n 'attrs') and bool(self.attrs.get('use_mkldnn', False))\n if flags_use_mkldnn or attrs_use_mkldnn:\n warnings.warn(\n \"check inplace_grad for ops using mkldnn is not supported\"\n )\n continue\n if has_infer_inplace:\n fwd_res = res[father_op_desc]\n res[op_desc] = self._check_grad_inplace(\n place, fwd_res, op_desc, inplace_atol=inplace_atol)\n else:\n res[op_desc] = self._calc_grad_output(place, fwd_res,\n op_desc)\n\n def check_output_with_place(self,\n place,\n atol=0,\n no_check_set=None,\n equal_nan=False,\n check_dygraph=True,\n inplace_atol=None,\n check_eager=False):\n def find_imperative_actual(target_name, dygraph_outs, place):\n for name in dygraph_outs:\n if name == target_name:\n return dygraph_outs[name][0]\n var_list = dygraph_outs[name]\n for i, var in enumerate(var_list):\n if var.name == target_name:\n return dygraph_outs[name][i]\n self.assertTrue(False, \"Found failed {} {}\".format(\n dygraph_outs.keys(), target_name))\n\n def find_actual(target_name, fetch_list):\n found = [\n i for i, var_name in enumerate(fetch_list)\n if var_name == target_name\n ]\n self.assertTrue(\n len(found) == 1, \"Found {} {}\".format(len(found), target_name))\n return found[0]\n\n class Checker(object):\n \"\"\" base class for check with self.outputs.\n currently don't support check between checkers.\n \"\"\"\n\n def __init__(self, op_test, expect_dict):\n \"\"\" expect_dict is the self.outputs\n support : {str: [numpy]} and {str: [(str, numpy), (str, numpy)]}\n \"\"\"\n self.expects = expect_dict\n self.checker_name = \"checker\"\n self.op_test = op_test # stop the op_test object.\n self.op_type = op_test.op_type\n\n def init(self):\n pass\n\n def convert_uint16_to_float(self, actual_np, expect_np):\n raise NotImplementedError(\"base class, not implement!\")\n\n def calculate_output(self):\n \"\"\"\n judge whether convert current output and expect to uint16.\n return True | False\n \"\"\"\n pass\n\n def _is_skip_name(self, name):\n if name not in self.expects:\n return True\n if no_check_set is not None and name in no_check_set:\n return True\n return False\n\n def find_actual_value(self, name):\n \"\"\" return: (actual_tensor(var_base), actual_numpy)\n \"\"\"\n raise NotImplementedError(\"base class, not implement!\")\n\n def _compare_numpy(self, name, actual_np, expect_np):\n self.op_test.assertTrue(\n np.allclose(\n actual_np,\n expect_np,\n atol=atol,\n rtol=self.rtol if hasattr(self, 'rtol') else 1e-5,\n equal_nan=equal_nan),\n \"Output (\" + name + \") has diff at \" + str(place) + \" in \" +\n self.checker_name)\n\n def _compare_list(self, name, actual, expect):\n \"\"\" if expect is a tuple, we need to compare list.\n \"\"\"\n raise NotImplementedError(\"base class, not implement!\")\n\n def compare_single_output_with_expect(self, name, expect):\n actual, actual_np = self.find_actual_value(name)\n expect_np = expect[0] \\\n if isinstance(expect, tuple) else expect\n actual_np, expect_np = self.convert_uint16_to_float_ifneed(\n actual_np, expect_np)\n # NOTE(zhiqiu): np.allclose([], [1.]) returns True\n # see details: https://stackoverflow.com/questions/38331703/why-does-numpys-broadcasting-sometimes-allow-comparing-arrays-of-different-leng\n if expect_np.size == 0:\n self.op_test.assertTrue(actual_np.size == 0) # }}}\n self._compare_numpy(name, actual_np, expect_np)\n if isinstance(expect, tuple):\n self._compare_list(name, actual, expect)\n\n def compare_outputs_with_expects(self):\n for out_name, out_dup in Operator.get_op_outputs(self.op_type):\n if self._is_skip_name(out_name): continue\n if out_dup:\n # if self.output = {'name': [(subname, Tensor), (subname, Tensor)]}\n sub_out = self.expects[out_name]\n if not isinstance(sub_out, list):\n raise AssertionError(\"sub_out type %s is not list\",\n type(sub_out))\n for item in sub_out:\n sub_out_name, expect = item[0], item[1]\n self.compare_single_output_with_expect(sub_out_name,\n expect)\n else:\n expect = self.expects[out_name]\n self.compare_single_output_with_expect(out_name, expect)\n\n def check(self):\n \"\"\"\n return None means ok, raise Error means failed.\n\n the main enter point of Checker class\n \"\"\"\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()\n\n class StaticChecker(Checker):\n def init(self):\n self.checker_name = \"static checker\"\n\n def calculate_output(self):\n outs, fetch_list = self.op_test._calc_output(\n place, no_check_set=no_check_set)\n self.outputs = outs\n self.fetch_list = fetch_list\n\n def find_actual_value(self, name):\n idx = find_actual(name, self.fetch_list)\n actual = self.outputs[idx]\n actual_t = np.array(actual)\n return actual, actual_t\n\n def convert_uint16_to_float_ifneed(self, actual_np, expect_np):\n \"\"\"\n judge whether convert current output and expect to uint16.\n return True | False\n \"\"\"\n if actual_np.dtype == np.uint16 and expect_np.dtype in [\n np.float32, np.float64\n ]:\n actual_np = convert_uint16_to_float(actual_np)\n self.rtol = 1.e-2\n else:\n self.rtol = 1.e-5\n if expect_np.dtype == np.uint16 and actual_np.dtype == np.uint16:\n nonlocal atol\n expect_np = convert_uint16_to_float(expect_np)\n actual_np = convert_uint16_to_float(actual_np)\n atol = max(atol, 0.03)\n return actual_np, expect_np\n\n def _compare_list(self, name, actual, expect):\n \"\"\" if expect is a tuple, we need to compare list.\n \"\"\"\n self.op_test.assertListEqual(\n actual.recursive_sequence_lengths(), expect[1],\n \"Output (\" + name + \") has different lod at \" + str(place))\n\n class DygraphChecker(Checker):\n def init(self):\n self.checker_name = \"dygraph checker\"\n\n def calculate_output(self):\n self.outputs = self.op_test._calc_dygraph_output(\n place, no_check_set=no_check_set)\n\n def find_actual_value(self, name):\n with fluid.dygraph.base.guard(place=place):\n imperative_actual = find_imperative_actual(\n name, self.outputs, place)\n imperative_actual_t = np.array(imperative_actual.value()\n .get_tensor())\n return imperative_actual, imperative_actual_t\n\n def convert_uint16_to_float_ifneed(self, actual_np, expect_np):\n if actual_np.dtype == np.uint16 and expect_np.dtype in [\n np.float32, np.float64\n ]:\n self.rtol = 1.e-2\n else:\n self.rtol = 1.e-5\n if self.op_test.is_bfloat16_op():\n if actual_np.dtype == np.uint16:\n actual_np = convert_uint16_to_float(actual_np)\n if expect_np.dtype == np.uint16:\n expect_np = convert_uint16_to_float(expect_np)\n return actual_np, expect_np\n\n def _compare_list(self, name, actual, expect):\n \"\"\" if expect is a tuple, we need to compare list.\n \"\"\"\n with fluid.dygraph.base.guard(place=place):\n self.op_test.assertListEqual(\n actual.value().get_tensor()\n .recursive_sequence_lengths(), expect[1],\n \"Output (\" + name + \") has different lod at \" +\n str(place) + \" in dygraph mode\")\n\n def _compare_numpy(self, name, actual_np, expect_np):\n if six.moves.reduce(lambda x, y: x * y, actual_np.shape,\n 1) == 0 and six.moves.reduce(\n lambda x, y: x * y, expect_np.shape,\n 1) == 0:\n pass\n else:\n self.op_test.assertTrue(\n np.allclose(\n actual_np,\n expect_np,\n atol=atol,\n rtol=self.rtol if hasattr(self, 'rtol') else 1e-5,\n equal_nan=equal_nan),\n \"Output (\" + name + \") has diff at \" + str(place) +\n \" in \" + self.checker_name)\n\n class EagerChecker(DygraphChecker):\n def init(self):\n self.checker_name = \"eager checker\"\n\n def calculate_output(self):\n # we only check end2end api when check_eager=True\n with _test_eager_guard():\n self.is_python_api_test = True\n eager_dygraph_outs = self.op_test._calc_python_api_output(\n place)\n if eager_dygraph_outs is None:\n self.is_python_api_test = False\n # missing KernelSignature, fall back to eager middle output.\n eager_dygraph_outs = self.op_test._calc_dygraph_output(\n place, no_check_set=no_check_set)\n self.outputs = eager_dygraph_outs\n\n def _compare_numpy(self, name, actual_np, expect_np):\n with _test_eager_guard():\n super()._compare_numpy(name, actual_np, expect_np)\n\n def convert_uint16_to_float_ifneed(self, actual_np, expect_np):\n with _test_eager_guard():\n return super().convert_uint16_to_float_ifneed(actual_np,\n expect_np)\n\n def find_actual_value(self, name):\n with _test_eager_guard():\n return super().find_actual_value(name)\n\n def _compare_list(self, name, actual, expect):\n \"\"\" if expect is a tuple, we need to compare list.\n \"\"\"\n with _test_eager_guard():\n super()._compare_list(name, actual, expect)\n\n def _is_skip_name(self, name):\n # if in final state and kernel signature don't have name, then skip it.\n if self.is_python_api_test and hasattr(\n self.op_test, \"python_out_sig\"\n ) and name not in self.op_test.python_out_sig:\n return True\n return super()._is_skip_name(name)\n\n # set some flags by the combination of arguments. \n self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)\n if self.dtype == np.float64 and \\\n self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST:\n atol = 0\n\n if self.is_bfloat16_op():\n if self.is_mkldnn_op():\n check_dygraph = False\n check_eager = False\n if hasattr(self, 'force_fp32_output') and getattr(\n self, 'force_fp32_output'):\n atol = 1e-2\n else:\n atol = 2\n else:\n atol = 1e-1\n\n if no_check_set is not None:\n if self.op_type not in no_check_set_white_list.no_check_set_white_list:\n raise AssertionError(\n \"no_check_set of op %s must be set to None.\" % self.op_type)\n static_checker = StaticChecker(self, self.outputs)\n static_checker.check()\n outs, fetch_list = static_checker.outputs, static_checker.fetch_list\n if check_dygraph:\n # always enable legacy dygraph\n g_enable_legacy_dygraph()\n\n dygraph_checker = DygraphChecker(self, self.outputs)\n dygraph_checker.check()\n dygraph_outs = dygraph_checker.outputs\n # yield the original state\n g_disable_legacy_dygraph()\n if check_eager:\n eager_checker = EagerChecker(self, self.outputs)\n eager_checker.check()\n eager_dygraph_outs = eager_checker.outputs\n\n # Note(zhiqiu): inplace_atol should be only set when op doesn't ensure\n # computational consistency.\n # For example, group_norm uses AtomicAdd on CUDAPlace, which do not ensure\n # computation order when multiple threads write the same address. So the\n # result of group_norm is non-deterministic when datatype is float.\n # When inplace_atol is not None, the inplace check uses numpy.allclose\n # to check inplace result instead of numpy.array_equal.\n if inplace_atol is not None:\n warnings.warn(\n \"inplace_atol should only be set when op doesn't ensure computational consistency, please check it!\"\n )\n # Check inplace for given op, its grad op, its grad_grad op, etc.\n # No effect on original OpTest\n # Currently not support ParallelExecutor on XPUPlace.\n if not paddle.is_compiled_with_xpu(\n ) and not paddle.is_compiled_with_npu(\n ) and not paddle.is_compiled_with_mlu():\n self.check_inplace_output_with_place(\n place, no_check_set=no_check_set, inplace_atol=inplace_atol)\n\n if check_eager:\n return outs, dygraph_outs, eager_dygraph_outs, fetch_list\n elif check_dygraph:\n return outs, dygraph_outs, fetch_list\n else:\n return outs, fetch_list\n\n def check_compile_vs_runtime(self, fetch_list, fetch_outs):\n def find_fetch_index(target_name, fetch_list):\n found = [\n i for i, var_name in enumerate(fetch_list)\n if var_name == target_name\n ]\n if len(found) == 0:\n return -1\n else:\n self.assertTrue(\n len(found) == 1,\n \"Found {} {}\".format(len(found), target_name))\n return found[0]\n\n for name in self.op.desc.output_names():\n var_names = self.op.desc.output(name)\n for var_name in var_names:\n i = find_fetch_index(var_name, fetch_list)\n if i == -1:\n # The output is dispensiable or intermediate.\n break\n out = fetch_outs[i]\n if isinstance(out, core.LoDTensor):\n lod_level_runtime = len(out.lod())\n else:\n if isinstance(out, core.LoDTensorArray):\n warnings.warn(\n \"The check of LoDTensorArray's lod_level is not implemented now!\"\n )\n lod_level_runtime = 0\n\n var = self.program.global_block().var(var_name)\n if var.type == core.VarDesc.VarType.LOD_TENSOR:\n lod_level_compile = var.lod_level\n else:\n lod_level_compile = 0\n self.assertEqual(\n lod_level_compile, lod_level_runtime,\n \"The lod_level of Output (\" + name +\n \") is different between compile-time and runtime (\" +\n str(lod_level_compile) + \" vs \" + str(lod_level_runtime) +\n \")\")\n\n def _get_places(self):\n if self.dtype == np.float16:\n if core.is_compiled_with_cuda() and core.op_support_gpu(\n self.op_type):\n place = core.CUDAPlace(0)\n if core.is_float16_supported(place):\n return [place]\n else:\n return []\n else:\n return []\n places = [fluid.CPUPlace()]\n cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False\n if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\\\n and not cpu_only:\n places.append(core.CUDAPlace(0))\n return places\n\n def check_output(self,\n atol=1e-5,\n no_check_set=None,\n equal_nan=False,\n check_dygraph=True,\n inplace_atol=None,\n check_eager=False):\n self.__class__.op_type = self.op_type\n if self.is_mkldnn_op():\n self.__class__.use_mkldnn = True\n\n if self.is_xpu_op():\n self.__class__.use_xpu = True\n\n places = self._get_places()\n for place in places:\n res = self.check_output_with_place(\n place,\n atol,\n no_check_set,\n equal_nan,\n check_dygraph,\n inplace_atol,\n check_eager=check_eager)\n if check_eager:\n assert check_dygraph == True\n outs, dygraph_outs, eager_dygraph_outs, fetch_list = res\n elif check_dygraph:\n outs, dygraph_outs, fetch_list = res\n else:\n outs, fetch_list = res\n if self.op_type not in compile_vs_runtime_white_list.COMPILE_RUN_OP_WHITE_LIST:\n self.check_compile_vs_runtime(fetch_list, outs)\n\n def check_output_customized(self, checker, custom_place=None):\n places = self._get_places()\n if custom_place:\n places.append(custom_place)\n for place in places:\n outs = self.calc_output(place)\n outs = [np.array(out) for out in outs]\n outs.sort(key=len)\n checker(outs)\n\n def check_output_with_place_customized(self, checker, place):\n outs = self.calc_output(place)\n outs = [np.array(out) for out in outs]\n outs.sort(key=len)\n checker(outs)\n\n def _assert_is_close(self, numeric_grads, analytic_grads, names,\n max_relative_error, msg_prefix):\n for a, b, name in six.moves.zip(numeric_grads, analytic_grads, names):\n # It asserts np.abs(a - b) / np.abs(a) < max_relative_error, in which\n # max_relative_error is 1e-7. According to the value of np.abs(a), we\n # change np.abs(a) to achieve dynamic threshold. For example, if\n # the value of np.abs(a) is between 1e-10 and 1e-8, we set np.abs(a)*=1e4.\n # Therefore, it asserts np.abs(a - b) / (np.abs(a)*1e4) < max_relative_error,\n # which is the same as np.abs(a - b) / np.abs(a) < max_relative_error*1e4.\n abs_a = np.abs(a)\n if self.dtype == np.float64 and \\\n self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST:\n abs_a[abs_a < 1e-10] = 1e-3\n abs_a[np.logical_and(abs_a > 1e-10, abs_a <= 1e-8)] *= 1e4\n abs_a[np.logical_and(abs_a > 1e-8, abs_a <= 1e-6)] *= 1e2\n elif self.is_bfloat16_op():\n abs_a[abs_a < 1e-2] = 1\n else:\n abs_a[abs_a < 1e-3] = 1\n\n diff_mat = np.abs(a - b) / abs_a\n max_diff = np.max(diff_mat)\n\n def err_msg():\n offset = np.argmax(diff_mat > max_relative_error)\n return (\"Operator %s error, %s variable %s (shape: %s, dtype: %s) max gradient diff %e over limit %e, \"\n \"the first error element is %d, expected %e, but got %e.\") \\\n % (self.op_type, msg_prefix, name, str(a.shape), self.dtype, max_diff, max_relative_error,\n offset, a.flatten()[offset], b.flatten()[offset])\n\n self.assertLessEqual(max_diff, max_relative_error, err_msg())\n\n def _check_grad_helper(self):\n self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)\n self.__class__.op_type = self.op_type\n self.__class__.exist_check_grad = True\n if self.dtype == np.float64:\n self.__class__.exist_fp64_check_grad = True\n\n def check_grad(self,\n inputs_to_check,\n output_names,\n no_grad_set=None,\n numeric_grad_delta=0.005,\n in_place=False,\n max_relative_error=0.005,\n user_defined_grads=None,\n user_defined_grad_outputs=None,\n check_dygraph=True,\n check_eager=False):\n self._check_grad_helper()\n places = self._get_places()\n for place in places:\n self.check_grad_with_place(\n place,\n inputs_to_check,\n output_names,\n no_grad_set,\n numeric_grad_delta,\n in_place,\n max_relative_error,\n user_defined_grads,\n user_defined_grad_outputs,\n check_dygraph,\n check_eager=check_eager)\n\n def check_grad_with_place(self,\n place,\n inputs_to_check,\n output_names,\n no_grad_set=None,\n numeric_grad_delta=0.005,\n in_place=False,\n max_relative_error=0.005,\n user_defined_grads=None,\n user_defined_grad_outputs=None,\n check_dygraph=True,\n numeric_place=None,\n check_eager=False):\n self.scope = core.Scope()\n op_inputs = self.inputs if hasattr(self, \"inputs\") else dict()\n op_outputs = self.outputs if hasattr(self, \"outputs\") else dict()\n op_attrs = self.attrs if hasattr(self, \"attrs\") else dict()\n\n self._check_grad_helper()\n if self.is_bfloat16_op() and self.is_mkldnn_op():\n check_dygraph = False\n check_eager = False\n\n if self.dtype == np.float64 and \\\n self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST:\n numeric_grad_delta = 1e-5\n max_relative_error = 1e-7\n\n cache_list = None\n if hasattr(self, \"cache_name_list\"):\n cache_list = self.cache_name_list\n\n # oneDNN numeric gradient should use CPU kernel\n use_onednn = False\n if \"use_mkldnn\" in op_attrs and op_attrs[\"use_mkldnn\"] == True:\n op_attrs[\"use_mkldnn\"] = False\n use_onednn = True\n\n self.op = create_op(\n self.scope,\n self.op_type,\n op_inputs,\n op_outputs,\n op_attrs,\n cache_list=cache_list)\n\n if use_onednn:\n op_attrs[\"use_mkldnn\"] = True\n\n if no_grad_set is None:\n no_grad_set = set()\n else:\n if (self.op_type not in no_grad_set_white_list.NEED_TO_FIX_OP_LIST\n ) and (\n self.op_type not in no_grad_set_white_list.NOT_CHECK_OP_LIST\n ) and (not self.is_bfloat16_op()):\n raise AssertionError(\"no_grad_set must be None, op_type is \" +\n self.op_type + \" Op.\")\n\n for input_to_check in inputs_to_check:\n set_input(self.scope, self.op, self.inputs, place)\n tensor_to_check = self.scope.find_var(input_to_check).get_tensor()\n tensor_size = six.moves.reduce(lambda a, b: a * b,\n tensor_to_check.shape(), 1)\n if tensor_size < 100:\n self.__class__.input_shape_is_large = False\n\n if not type(output_names) is list:\n output_names = [output_names]\n\n if numeric_place is None:\n numeric_place = place\n\n numeric_grads = user_defined_grads or [\n get_numeric_gradient(\n numeric_place,\n self.scope,\n self.op,\n self.inputs,\n input_to_check,\n output_names,\n delta=numeric_grad_delta,\n in_place=in_place) for input_to_check in inputs_to_check\n ]\n analytic_grads = self._get_gradient(inputs_to_check, place,\n output_names, no_grad_set,\n user_defined_grad_outputs)\n # comparison of bf16 results will happen as fp32\n # loop over list of grads and convert bf16 to fp32\n fp32_analytic_grads = []\n for grad in analytic_grads:\n if grad.dtype == np.uint16:\n grad = convert_uint16_to_float(grad)\n max_relative_error = 0.04 if max_relative_error < 0.04 else max_relative_error\n fp32_analytic_grads.append(grad)\n analytic_grads = fp32_analytic_grads\n\n fp32_numeric_grads = []\n for grad in numeric_grads:\n if grad.dtype == np.uint16:\n grad = convert_uint16_to_float(grad)\n max_relative_error = 0.04 if max_relative_error < 0.04 else max_relative_error\n fp32_numeric_grads.append(grad)\n numeric_grads = fp32_numeric_grads\n\n self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check,\n max_relative_error,\n \"Gradient Check On %s\" % str(place))\n\n if check_dygraph:\n # ensure switch into legacy dygraph\n g_enable_legacy_dygraph()\n\n dygraph_grad = self._get_dygraph_grad(\n inputs_to_check, place, output_names, user_defined_grad_outputs,\n no_grad_set, False)\n fp32_grads = []\n for grad in dygraph_grad:\n if grad.dtype == np.uint16:\n grad = convert_uint16_to_float(grad)\n max_relative_error = 0.03 if max_relative_error < 0.03 else max_relative_error\n fp32_grads.append(grad)\n dygraph_grad = fp32_grads\n self._assert_is_close(numeric_grads, dygraph_grad, inputs_to_check,\n max_relative_error,\n \"Gradient Check On %s\" % str(place))\n # ensure switch back eager dygraph\n g_disable_legacy_dygraph()\n\n if check_eager:\n with fluid.dygraph.base.guard(place):\n with _test_eager_guard():\n eager_dygraph_grad = self._get_dygraph_grad(\n inputs_to_check, place, output_names,\n user_defined_grad_outputs, no_grad_set, check_eager)\n fp32_grads = []\n for grad in eager_dygraph_grad:\n if grad.dtype == np.uint16:\n grad = convert_uint16_to_float(grad)\n max_relative_error = 0.03 if max_relative_error < 0.03 else max_relative_error\n fp32_grads.append(grad)\n eager_dygraph_grad = fp32_grads\n self._assert_is_close(numeric_grads, eager_dygraph_grad,\n inputs_to_check, max_relative_error,\n \"Gradient Check On %s\" % str(place))\n\n def _find_var_in_dygraph(self, output_vars, name):\n if name in output_vars:\n return output_vars[name]\n else:\n for output_vars_index in output_vars:\n for output_vars_selected in output_vars[output_vars_index]:\n if output_vars_selected.name == name:\n return output_vars_selected\n\n def _get_dygraph_grad(self,\n inputs_to_check,\n place,\n output_names,\n user_defined_grad_outputs=None,\n no_grad_set=None,\n check_eager=False):\n with fluid.dygraph.base.guard(place=place):\n block = fluid.default_main_program().global_block()\n\n op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)\n\n # prepare input variable\n inputs, inputs_grad_dict = self.append_input_output_for_dygraph(\n op_proto, self.inputs, True, True, block)\n\n # prepare output variable\n outputs = self.append_input_output_for_dygraph(\n op_proto, self.outputs, False, False, block)\n\n # prepare attributes\n attrs_outputs = {}\n if hasattr(self, \"attrs\"):\n for attrs_name in self.attrs:\n if self.attrs[attrs_name] is not None:\n attrs_outputs[attrs_name] = self.attrs[attrs_name]\n\n if check_eager:\n eager_outputs = self._calc_python_api_output(place, inputs,\n outputs)\n # if outputs is None, kernel sig is empty or other error is happens.\n if not check_eager or eager_outputs is None:\n block.append_op(\n type=self.op_type,\n inputs=inputs,\n outputs=outputs,\n attrs=attrs_outputs if hasattr(self, \"attrs\") else None)\n else:\n outputs = eager_outputs\n\n if self.dtype == np.uint16:\n cast_inputs = self._find_var_in_dygraph(outputs,\n output_names[0])\n cast_outputs = block.create_var(\n dtype=\"float32\", shape=cast_inputs[0].shape)\n cast_op = block.append_op(\n inputs={\"X\": cast_inputs},\n outputs={\"Out\": cast_outputs},\n type=\"cast\",\n attrs={\n \"in_dtype\": core.VarDesc.VarType.BF16,\n \"out_dtype\": core.VarDesc.VarType.FP32\n })\n outputs = {output_names[0]: cast_outputs}\n\n outputs_valid = {}\n for output_name in output_names:\n outputs_valid[output_name] = self._find_var_in_dygraph(\n outputs, output_name)\n\n if user_defined_grad_outputs is None:\n if len(outputs_valid) == 1:\n loss = block.create_var(\n dtype=self.dtype,\n type=core.VarDesc.VarType.LOD_TENSOR,\n persistable=False,\n stop_gradient=False,\n shape=[1])\n for outputs_valid_key in outputs_valid:\n block.append_op(\n type=\"mean\",\n inputs={\"X\": outputs_valid[outputs_valid_key]},\n outputs={\"Out\": [loss]},\n attrs=None)\n else:\n avg_sum = []\n for cur_loss in outputs_valid:\n cur_avg_loss = block.create_var(\n dtype=self.dtype,\n type=core.VarDesc.VarType.LOD_TENSOR,\n persistable=False,\n stop_gradient=False)\n block.append_op(\n type=\"mean\",\n inputs={\"X\": outputs_valid[cur_loss]},\n outputs={\"Out\": [cur_avg_loss]},\n attrs=None)\n avg_sum.append(cur_avg_loss)\n loss_sum = block.create_var(\n dtype=self.dtype,\n type=core.VarDesc.VarType.LOD_TENSOR,\n persistable=False,\n stop_gradient=False,\n shape=[1])\n block.append_op(\n type='sum',\n inputs={\"X\": avg_sum},\n outputs={\"Out\": loss_sum},\n attrs=None)\n loss = block.create_var(\n dtype=self.dtype,\n type=core.VarDesc.VarType.LOD_TENSOR,\n persistable=False,\n stop_gradient=False,\n shape=[1])\n block.append_op(\n type='scale',\n inputs={\"X\": loss_sum},\n outputs={\"Out\": loss},\n attrs={'scale': 1.0 / float(len(avg_sum))})\n loss.backward()\n\n fetch_list_grad = []\n for inputs_to_check_name in inputs_to_check:\n a = inputs_grad_dict[inputs_to_check_name].gradient()\n fetch_list_grad.append(a)\n return fetch_list_grad\n else:\n # user_defined_grad_outputs here are numpy arrays\n if not isinstance(user_defined_grad_outputs, list):\n user_defined_grad_outputs = [user_defined_grad_outputs]\n grad_outputs = []\n for grad_out_value in user_defined_grad_outputs:\n grad_outputs.append(paddle.to_tensor(grad_out_value))\n # delete the inputs which no need to calculate grad \n for no_grad_val in no_grad_set:\n del (inputs[no_grad_val])\n\n if not _in_legacy_dygraph():\n core.eager.run_backward(\n fluid.layers.utils.flatten(outputs), grad_outputs,\n False)\n grad_inputs = []\n for inputs_list in inputs.values():\n for inp in inputs_list:\n grad_inputs.append(inp.grad.numpy())\n return grad_inputs\n else:\n grad_inputs = paddle.grad(\n outputs=fluid.layers.utils.flatten(outputs),\n inputs=fluid.layers.utils.flatten(inputs),\n grad_outputs=grad_outputs)\n return [grad.numpy() for grad in grad_inputs]\n\n @staticmethod\n def _numpy_to_lod_tensor(np_value, lod, place):\n tensor = core.LoDTensor()\n tensor.set(np_value, place)\n if lod is not None:\n tensor.set_recursive_sequence_lengths(lod)\n return tensor\n\n @staticmethod\n def np_dtype_to_fluid_dtype(input):\n return input\n\n @staticmethod\n def fluid_dtype_to_np_dtype(self, dtype):\n return dtype\n\n @staticmethod\n def np_value_to_fluid_value(input):\n return input\n\n def _get_gradient(self,\n input_to_check,\n place,\n output_names,\n no_grad_set,\n user_defined_grad_outputs=None,\n parallel=False):\n prog = Program()\n scope = core.Scope()\n block = prog.global_block()\n self._append_ops(block)\n\n inputs = self._get_inputs(block)\n outputs = self._get_outputs(block)\n feed_dict = self.feed_var(inputs, place)\n\n if user_defined_grad_outputs is None:\n if self.dtype == np.uint16:\n cast_inputs = list(map(block.var, output_names))\n cast_outputs = block.create_var(\n dtype=\"float32\", shape=cast_inputs[0].shape)\n cast_op = block.append_op(\n inputs={\"X\": cast_inputs},\n outputs={\"Out\": cast_outputs},\n type=\"cast\",\n attrs={\n \"in_dtype\": core.VarDesc.VarType.BF16,\n \"out_dtype\": core.VarDesc.VarType.FP32\n })\n cast_op.desc.infer_var_type(block.desc)\n cast_op.desc.infer_shape(block.desc)\n output_names = [cast_outputs.name]\n loss = append_loss_ops(block, output_names)\n param_grad_list = append_backward(\n loss=loss,\n parameter_list=input_to_check,\n no_grad_set=no_grad_set)\n fetch_list = [g for p, g in param_grad_list]\n else:\n assert parallel is False, \"unsupported parallel mode when giving custom grad outputs.\"\n # user_defined_grad_outputs here are numpy arrays\n if not isinstance(user_defined_grad_outputs, list):\n user_defined_grad_outputs = [user_defined_grad_outputs]\n grad_outputs = []\n for grad_out_value in user_defined_grad_outputs:\n # `presistable` is used to avoid executor create new var in local scope\n var = block.create_var(\n shape=grad_out_value.shape,\n dtype=grad_out_value.dtype,\n persistable=True)\n true_var = scope.var(var.name)\n tensor = true_var.get_tensor()\n tensor.set(grad_out_value, place)\n grad_outputs.append(var)\n targets = [\n outputs[name] for name in outputs if name in output_names\n ]\n inputs = [inputs[name] for name in input_to_check if name in inputs]\n grad_inputs = paddle.static.gradients(targets, inputs, grad_outputs,\n no_grad_set)\n fetch_list = grad_inputs\n\n if parallel:\n use_cuda = False\n if isinstance(place, fluid.CUDAPlace):\n use_cuda = True\n compiled_prog = fluid.CompiledProgram(prog).with_data_parallel(\n loss_name=loss.name, places=place)\n prog = compiled_prog\n executor = fluid.Executor(place)\n return list(\n map(np.array,\n executor.run(prog,\n feed_dict,\n fetch_list,\n scope=scope,\n return_numpy=False)))\n\n\nclass OpTestTool:\n @classmethod\n def skip_if(cls, condition: object, reason: str):\n return unittest.skipIf(condition, reason)\n\n @classmethod\n def skip_if_not_cpu_bf16(cls):\n return OpTestTool.skip_if(\n not (isinstance(_current_expected_place(), core.CPUPlace) and\n core.supports_bfloat16()),\n \"Place does not support BF16 evaluation\")\n\n @classmethod\n def skip_if_not_cpu(cls):\n return OpTestTool.skip_if(\n not isinstance(_current_expected_place(), core.CPUPlace),\n \"OneDNN supports only CPU for now\")\n",
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport sys\nsys.path.append(\"..\")\n\nimport paddle\nimport paddle.fluid.core as core\n\nfrom op_test import OpTest\nfrom op_test_xpu import XPUOpTest\nfrom xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper\n\npaddle.enable_static()\n\n\ndef calculate_momentum_by_numpy(param, grad, mu, velocity, use_nesterov,\n learning_rate, regularization_method,\n regularization_coeff):\n if regularization_method == \"l2_decay\":\n grad = grad + regularization_coeff * param\n velocity_out = mu * velocity + grad\n if use_nesterov:\n param_out = param - (grad + velocity_out * mu) * learning_rate\n else:\n param_out = param - learning_rate * velocity_out\n else:\n velocity_out = mu * velocity + grad\n if use_nesterov:\n param_out = param - grad * learning_rate - \\\n velocity_out * mu * learning_rate\n else:\n param_out = param - learning_rate * velocity_out\n return param_out, velocity_out\n\n\nclass XPUTestMomentumOP(XPUOpTestWrapper):\n def __init__(self):\n self.op_name = 'momentum'\n self.use_dynamic_create_class = False\n\n class TestMomentumOPBase(XPUOpTest):\n def setUp(self):\n self.place = paddle.XPUPlace(0)\n self.xpu_version = core.get_xpu_device_version(0)\n self.init_dtype()\n self.set_case()\n\n def set_case(self):\n self.op_type = 'momentum'\n self.dtype = self.in_type\n self.init_config()\n\n self.param = np.random.uniform(-1, 1,\n self.input_shape).astype(self.dtype)\n self.grad = np.random.uniform(-1, 1,\n self.input_shape).astype(self.dtype)\n self.velocity = np.random.uniform(\n -1, 1, self.input_shape).astype(self.dtype)\n\n param_out, velocity_out = calculate_momentum_by_numpy(\n param=self.param,\n grad=self.grad,\n mu=self.mu,\n velocity=self.velocity,\n use_nesterov=self.use_nesterov,\n learning_rate=self.learning_rate,\n regularization_method=self.regularization_method,\n regularization_coeff=self.regularization_coeff)\n self.inputs = {\n 'Param': self.param,\n 'Grad': self.grad,\n 'Velocity': self.velocity,\n 'LearningRate': self.learning_rate,\n }\n self.attrs = {\n 'use_xpu': True,\n 'mu': self.mu,\n 'use_nesterov': self.use_nesterov,\n 'regularization_method': self.regularization_method,\n 'regularization_coeff': self.regularization_coeff\n }\n self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}\n\n def init_dtype(self):\n self.dtype = np.float32\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def init_config(self):\n self.input_shape = [864]\n self.learning_rate = np.array([0.001]).astype(self.dtype)\n self.mu = 0.0001\n self.use_nesterov = False\n self.regularization_method = None\n self.regularization_coeff = 0\n\n class XPUTestMomentum1(TestMomentumOPBase):\n def init_config(self):\n self.input_shape = [2, 768]\n self.learning_rate = np.array([0.002]).astype(self.dtype)\n self.mu = 0.001\n self.use_nesterov = False\n self.regularization_method = None\n self.regularization_coeff = 0\n\n class XPUTestMomentum2(TestMomentumOPBase):\n def init_config(self):\n self.input_shape = [3, 8, 4096]\n self.learning_rate = np.array([0.005]).astype(self.dtype)\n self.mu = 0.002\n self.use_nesterov = True\n self.regularization_method = None\n self.regularization_coeff = 0\n\n class XPUTestMomentum3(TestMomentumOPBase):\n def init_config(self):\n self.input_shape = [1024]\n self.learning_rate = np.array([0.01]).astype(self.dtype)\n self.mu = 0.0001\n self.use_nesterov = False\n if self.xpu_version != core.XPUVersion.XPU1:\n self.regularization_method = \"l2_decay\"\n self.regularization_coeff = 0.005\n else:\n # regularization not supported on XPU1\n self.regularization_method = None\n self.regularization_coeff = 0\n\n class XPUTestMomentum4(TestMomentumOPBase):\n def init_config(self):\n self.input_shape = [2, 2, 255]\n self.learning_rate = np.array([0.0005]).astype(self.dtype)\n self.mu = 0.005\n self.use_nesterov = True\n if self.xpu_version != core.XPUVersion.XPU1:\n self.regularization_method = \"l2_decay\"\n self.regularization_coeff = 0.005\n else:\n # regularization not supported on XPU1\n self.regularization_method = None\n self.regularization_coeff = 0\n\n\nsupport_types = get_xpu_op_support_types('momentum')\nfor stype in support_types:\n create_test_class(globals(), XPUTestMomentumOP, stype)\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport numpy as np\nfrom ..framework import core\nfrom paddle.fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype\n\n__all__ = []\n\n\nclass PrintOptions(object):\n precision = 8\n threshold = 1000\n edgeitems = 3\n linewidth = 80\n sci_mode = False\n\n\nDEFAULT_PRINT_OPTIONS = PrintOptions()\n\n\ndef set_printoptions(precision=None,\n threshold=None,\n edgeitems=None,\n sci_mode=None,\n linewidth=None):\n \"\"\"Set the printing options for Tensor.\n\n Args:\n precision (int, optional): Number of digits of the floating number, default 8.\n threshold (int, optional): Total number of elements printed, default 1000.\n edgeitems (int, optional): Number of elements in summary at the beginning and ending of each dimension, default 3.\n sci_mode (bool, optional): Format the floating number with scientific notation or not, default False.\n linewidth (int, optional): Number of characters each line, default 80.\n \n \n Returns:\n None.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.seed(10)\n a = paddle.rand([10, 20])\n paddle.set_printoptions(4, 100, 3)\n print(a)\n \n '''\n Tensor(shape=[10, 20], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n [[0.0002, 0.8503, 0.0135, ..., 0.9508, 0.2621, 0.6661],\n [0.9710, 0.2605, 0.9950, ..., 0.4427, 0.9241, 0.9363],\n [0.0948, 0.3226, 0.9955, ..., 0.1198, 0.0889, 0.9231],\n ...,\n [0.7206, 0.0941, 0.5292, ..., 0.4856, 0.1379, 0.0351],\n [0.1745, 0.5621, 0.3602, ..., 0.2998, 0.4011, 0.1764],\n [0.0728, 0.7786, 0.0314, ..., 0.2583, 0.1654, 0.0637]])\n '''\n \"\"\"\n kwargs = {}\n\n if precision is not None:\n check_type(precision, 'precision', (int), 'set_printoptions')\n DEFAULT_PRINT_OPTIONS.precision = precision\n kwargs['precision'] = precision\n if threshold is not None:\n check_type(threshold, 'threshold', (int), 'set_printoptions')\n DEFAULT_PRINT_OPTIONS.threshold = threshold\n kwargs['threshold'] = threshold\n if edgeitems is not None:\n check_type(edgeitems, 'edgeitems', (int), 'set_printoptions')\n DEFAULT_PRINT_OPTIONS.edgeitems = edgeitems\n kwargs['edgeitems'] = edgeitems\n if linewidth is not None:\n check_type(linewidth, 'linewidth', (int), 'set_printoptions')\n DEFAULT_PRINT_OPTIONS.linewidth = linewidth\n kwargs['linewidth'] = linewidth\n if sci_mode is not None:\n check_type(sci_mode, 'sci_mode', (bool), 'set_printoptions')\n DEFAULT_PRINT_OPTIONS.sci_mode = sci_mode\n kwargs['sci_mode'] = sci_mode\n core.set_printoptions(**kwargs)\n\n\ndef _to_summary(var):\n edgeitems = DEFAULT_PRINT_OPTIONS.edgeitems\n\n # Handle tensor of shape contains 0, like [0, 2], [3, 0, 3]\n if np.prod(var.shape) == 0:\n return np.array([])\n\n if len(var.shape) == 0:\n return var\n elif len(var.shape) == 1:\n if var.shape[0] > 2 * edgeitems:\n return np.concatenate([var[:edgeitems], var[(-1 * edgeitems):]])\n else:\n return var\n else:\n # recursively handle all dimensions\n if var.shape[0] > 2 * edgeitems:\n begin = [x for x in var[:edgeitems]]\n end = [x for x in var[(-1 * edgeitems):]]\n return np.stack([_to_summary(x) for x in (begin + end)])\n else:\n return np.stack([_to_summary(x) for x in var])\n\n\ndef _format_item(np_var, max_width=0, signed=False):\n if np_var.dtype == np.float32 or np_var.dtype == np.float64 or np_var.dtype == np.float16:\n if DEFAULT_PRINT_OPTIONS.sci_mode:\n item_str = '{{:.{}e}}'.format(\n DEFAULT_PRINT_OPTIONS.precision).format(np_var)\n elif np.ceil(np_var) == np_var:\n item_str = '{:.0f}.'.format(np_var)\n else:\n item_str = '{{:.{}f}}'.format(\n DEFAULT_PRINT_OPTIONS.precision).format(np_var)\n else:\n item_str = '{}'.format(np_var)\n\n if max_width > len(item_str):\n if signed: # handle sign character for tenosr with negative item\n if np_var < 0:\n return item_str.ljust(max_width)\n else:\n return ' ' + item_str.ljust(max_width - 1)\n else:\n return item_str.ljust(max_width)\n else: # used for _get_max_width\n return item_str\n\n\ndef _get_max_width(var):\n # return max_width for a scalar\n max_width = 0\n signed = False\n for item in list(var.flatten()):\n if (not signed) and (item < 0):\n signed = True\n item_str = _format_item(item)\n max_width = max(max_width, len(item_str))\n\n return max_width, signed\n\n\ndef _format_tensor(var, summary, indent=0, max_width=0, signed=False):\n \"\"\"\n Format a tensor\n\n Args:\n var(Tensor): The tensor to be formatted.\n summary(bool): Do summary or not. If true, some elements will not be printed, and be replaced with \"...\".\n indent(int): The indent of each line.\n max_width(int): The max width of each elements in var.\n signed(bool): Print +/- or not.\n \"\"\"\n edgeitems = DEFAULT_PRINT_OPTIONS.edgeitems\n linewidth = DEFAULT_PRINT_OPTIONS.linewidth\n\n if len(var.shape) == 0:\n # currently, shape = [], i.e., scaler tensor is not supported.\n # If it is supported, it should be formatted like this.\n return _format_item(var, max_width, signed)\n elif len(var.shape) == 1:\n item_length = max_width + 2\n items_per_line = (linewidth - indent) // item_length\n items_per_line = max(1, items_per_line)\n\n if summary and var.shape[0] > 2 * edgeitems:\n items = [\n _format_item(item, max_width, signed)\n for item in list(var)[:edgeitems]\n ] + ['...'] + [\n _format_item(item, max_width, signed)\n for item in list(var)[(-1 * edgeitems):]\n ]\n else:\n items = [\n _format_item(item, max_width, signed) for item in list(var)\n ]\n lines = [\n items[i:i + items_per_line]\n for i in range(0, len(items), items_per_line)\n ]\n s = (',\\n' + ' ' *\n (indent + 1)).join([', '.join(line) for line in lines])\n return '[' + s + ']'\n else:\n # recursively handle all dimensions\n if summary and var.shape[0] > 2 * edgeitems:\n vars = [\n _format_tensor(x, summary, indent + 1, max_width, signed)\n for x in var[:edgeitems]\n ] + ['...'] + [\n _format_tensor(x, summary, indent + 1, max_width, signed)\n for x in var[(-1 * edgeitems):]\n ]\n else:\n vars = [\n _format_tensor(x, summary, indent + 1, max_width, signed)\n for x in var\n ]\n\n return '[' + (',' + '\\n' * (len(var.shape) - 1) + ' ' *\n (indent + 1)).join(vars) + ']'\n\n\ndef to_string(var, prefix='Tensor'):\n indent = len(prefix) + 1\n\n dtype = convert_dtype(var.dtype)\n if var.dtype == core.VarDesc.VarType.BF16:\n dtype = 'bfloat16'\n\n _template = \"{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\\n{indent}{data})\"\n\n tensor = var.value().get_tensor()\n if not tensor._is_initialized():\n return \"Tensor(Not initialized)\"\n\n if var.dtype == core.VarDesc.VarType.BF16:\n var = var.astype('float32')\n np_var = var.numpy()\n\n if len(var.shape) == 0:\n size = 0\n else:\n size = 1\n for dim in var.shape:\n size *= dim\n\n summary = False\n if size > DEFAULT_PRINT_OPTIONS.threshold:\n summary = True\n\n max_width, signed = _get_max_width(_to_summary(np_var))\n\n data = _format_tensor(\n np_var, summary, indent=indent, max_width=max_width, signed=signed)\n\n return _template.format(\n prefix=prefix,\n shape=var.shape,\n dtype=dtype,\n place=var._place_str,\n stop_gradient=var.stop_gradient,\n indent=' ' * indent,\n data=data)\n\n\ndef _format_dense_tensor(tensor, indent):\n if tensor.dtype == core.VarDesc.VarType.BF16:\n tensor = tensor.astype('float32')\n\n np_tensor = tensor.numpy()\n\n if len(tensor.shape) == 0:\n size = 0\n else:\n size = 1\n for dim in tensor.shape:\n size *= dim\n\n sumary = False\n if size > DEFAULT_PRINT_OPTIONS.threshold:\n sumary = True\n\n max_width, signed = _get_max_width(_to_summary(np_tensor))\n\n data = _format_tensor(\n np_tensor, sumary, indent=indent, max_width=max_width, signed=signed)\n return data\n\n\ndef sparse_tensor_to_string(tensor, prefix='Tensor'):\n indent = len(prefix) + 1\n if tensor.is_sparse_coo():\n _template = \"{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \\n{indent}{indices}, \\n{indent}{values})\"\n indices_tensor = tensor.indices()\n values_tensor = tensor.values()\n indices_data = 'indices=' + _format_dense_tensor(indices_tensor, indent\n + len('indices='))\n values_data = 'values=' + _format_dense_tensor(values_tensor, indent +\n len('values='))\n return _template.format(\n prefix=prefix,\n shape=tensor.shape,\n dtype=tensor.dtype,\n place=tensor._place_str,\n stop_gradient=tensor.stop_gradient,\n indent=' ' * indent,\n indices=indices_data,\n values=values_data)\n else:\n _template = \"{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \\n{indent}{crows}, \\n{indent}{cols}, \\n{indent}{values})\"\n crows_tensor = tensor.crows()\n cols_tensor = tensor.cols()\n elements_tensor = tensor.values()\n crows_data = 'crows=' + _format_dense_tensor(crows_tensor, indent +\n len('crows='))\n cols_data = 'cols=' + _format_dense_tensor(cols_tensor, indent +\n len('cols='))\n values_data = 'values=' + _format_dense_tensor(elements_tensor, indent +\n len('values='))\n\n return _template.format(\n prefix=prefix,\n shape=tensor.shape,\n dtype=tensor.dtype,\n place=tensor._place_str,\n stop_gradient=tensor.stop_gradient,\n indent=' ' * indent,\n crows=crows_data,\n cols=cols_data,\n values=values_data)\n\n\ndef tensor_to_string(tensor, prefix='Tensor'):\n indent = len(prefix) + 1\n\n dtype = convert_dtype(tensor.dtype)\n if tensor.dtype == core.VarDesc.VarType.BF16:\n dtype = 'bfloat16'\n\n _template = \"{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\\n{indent}{data})\"\n\n if tensor.is_sparse():\n return sparse_tensor_to_string(tensor, prefix)\n\n if not tensor._is_dense_tensor_hold_allocation():\n return \"Tensor(Not initialized)\"\n else:\n data = _format_dense_tensor(tensor, indent)\n return _template.format(\n prefix=prefix,\n shape=tensor.shape,\n dtype=dtype,\n place=tensor._place_str,\n stop_gradient=tensor.stop_gradient,\n indent=' ' * indent,\n data=data)\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport tempfile\nimport unittest\nimport numpy as np\n\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator\nfrom paddle.fluid.dygraph.jit import declarative\nfrom paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from\nfrom paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX, INFER_PARAMS_INFO_SUFFIX\n\nSEED = 2020\n\nnp.random.seed(SEED)\n\nplace = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(\n)\nprogram_translator = ProgramTranslator()\n\n\nclass SimpleFcLayer(fluid.dygraph.Layer):\n def __init__(self, fc_size):\n super(SimpleFcLayer, self).__init__()\n self._linear = fluid.dygraph.Linear(fc_size, fc_size)\n\n @declarative\n def forward(self, x):\n y = self._linear(x)\n z = self._linear(y)\n out = fluid.layers.mean(z)\n return out, y\n\n\nclass TestDyToStaticSaveInferenceModel(unittest.TestCase):\n def setUp(self):\n self.temp_dir = tempfile.TemporaryDirectory()\n\n def tearDown(self):\n self.temp_dir.cleanup()\n\n def test_save_inference_model(self):\n fc_size = 20\n x_data = np.random.random((fc_size, fc_size)).astype('float32')\n with fluid.dygraph.guard(place):\n fluid.default_startup_program().random_seed = SEED\n fluid.default_main_program().random_seed = SEED\n\n x = fluid.dygraph.to_variable(x_data)\n layer = SimpleFcLayer(fc_size)\n adam = fluid.optimizer.SGD(learning_rate=0.1,\n parameter_list=layer.parameters())\n\n for i in range(5):\n loss, pred = layer(x)\n loss.backward()\n adam.minimize(loss)\n layer.clear_gradients()\n # test for saving model in dygraph.guard\n infer_model_prefix = os.path.join(\n self.temp_dir.name, \"test_dy2stat_inference_in_guard/model\")\n infer_model_dir = os.path.join(self.temp_dir.name,\n \"test_dy2stat_inference_in_guard\")\n fluid.dygraph.jit.save(\n layer=layer,\n path=infer_model_prefix,\n input_spec=[x],\n output_spec=[pred])\n # Check the correctness of the inference\n dygraph_out, _ = layer(x)\n self.check_save_inference_model(layer, [x_data], dygraph_out.numpy())\n self.check_save_inference_model(\n layer, [x_data], dygraph_out.numpy(), fetch=[loss])\n self.check_save_inference_model(\n layer, [x_data], dygraph_out.numpy(), feed=[x])\n\n def check_save_inference_model(self,\n model,\n inputs,\n gt_out,\n feed=None,\n fetch=None):\n\n expected_persistable_vars = set([p.name for p in model.parameters()])\n\n infer_model_prefix = os.path.join(self.temp_dir.name,\n \"test_dy2stat_inference/model\")\n infer_model_dir = os.path.join(self.temp_dir.name,\n \"test_dy2stat_inference\")\n model_filename = \"model\" + INFER_MODEL_SUFFIX\n params_filename = \"model\" + INFER_PARAMS_SUFFIX\n fluid.dygraph.jit.save(\n layer=model,\n path=infer_model_prefix,\n input_spec=feed if feed else None,\n output_spec=fetch if fetch else None)\n # Check the correctness of the inference\n infer_out = self.load_and_run_inference(infer_model_dir, model_filename,\n params_filename, inputs)\n self.assertTrue(np.allclose(gt_out, infer_out))\n\n def load_and_run_inference(self, model_path, model_filename,\n params_filename, inputs):\n paddle.enable_static()\n exe = fluid.Executor(place)\n [inference_program, feed_target_names,\n fetch_targets] = fluid.io.load_inference_model(\n dirname=model_path,\n executor=exe,\n model_filename=model_filename,\n params_filename=params_filename)\n results = exe.run(inference_program,\n feed=dict(zip(feed_target_names, inputs)),\n fetch_list=fetch_targets)\n\n return np.array(results[0])\n\n\nclass TestPartialProgramRaiseError(unittest.TestCase):\n def test_param_type(self):\n program_translator = ProgramTranslator()\n program_translator.enable(True)\n x_data = np.random.random((20, 20)).astype('float32')\n\n with fluid.dygraph.guard(fluid.CPUPlace()):\n net = SimpleFcLayer(20)\n x = fluid.dygraph.to_variable(x_data)\n out = net(x)\n\n program_cache = net.forward.program_cache\n _, (concrete_program, _) = program_cache.last()\n\n params = concrete_program.parameters\n\n concrete_program.parameters = params[0]\n # TypeError: Type of self._params should be list or tuple,\n # but received <class 'paddle.fluid.framework.ParamBase'>.\n with self.assertRaises(TypeError):\n partial_program_from(concrete_program)\n\n params[0] = \"linear.w.0\"\n concrete_program.parameters = params\n # TypeError: Type of self._params[0] should be framework.ParamBase,\n # but received <type 'str'>.\n with self.assertRaises(TypeError):\n partial_program_from(concrete_program)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle.fluid.core as core\nfrom paddle.fluid.op import Operator\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\n\n\ndef calculate_sparse_momentum_by_numpy(param,\n grad,\n mu,\n velocity,\n use_nesterov,\n learning_rate,\n index,\n axis,\n regularization_method=None,\n regularization_coeff=1.0):\n sub_grad = grad.copy()\n grad = np.zeros_like(param)\n if axis == 0:\n unique_index = np.unique(index)\n for idx in unique_index:\n grad[idx, :] = np.sum(sub_grad[index == idx, :], axis=0)\n else:\n unique_index = np.unique(index)\n for idx in unique_index:\n grad[:, idx] = np.sum(sub_grad[:, index == idx], axis=1)\n if regularization_method == \"l2_decay\":\n grad = grad + regularization_coeff * param\n\n velocity_out = mu * velocity + grad\n if use_nesterov:\n param_out = param - (grad + velocity_out * mu) * learning_rate\n else:\n param_out = param - learning_rate * velocity_out\n else:\n velocity_out = mu * velocity + grad\n if use_nesterov:\n param_out = param - grad * learning_rate - \\\n velocity_out * mu * learning_rate\n else:\n param_out = param - learning_rate * velocity_out\n\n return param_out, velocity_out\n\n\nclass TestSparseMomentumOp(OpTest):\n def setUp(self):\n self.op_type = \"sparse_momentum\"\n self.dtype = np.float32\n self.index_dtype = np.int32\n self.axis = 0\n self.multi_precision = False\n self.use_nesterov = False\n self.batch_size = 20\n self.num_classes = 20\n self.init_dtype()\n self.init_axis()\n self.init_multi_precision()\n self.init_use_nesterov()\n\n if self.multi_precision:\n assert self.dtype == np.float16\n\n param = np.random.random(\n (self.batch_size, self.num_classes)).astype(self.dtype)\n grad = np.random.random(\n (self.batch_size, self.num_classes)).astype(self.dtype)\n if self.axis == 0:\n index = np.random.randint(\n 0,\n self.batch_size,\n size=(self.batch_size // 2, ),\n dtype=self.index_dtype)\n grad = grad[index]\n else:\n index = np.random.randint(\n 0,\n self.num_classes,\n size=(self.num_classes // 2, ),\n dtype=self.index_dtype)\n grad = grad[:, index]\n velocity = np.random.random(\n (self.batch_size, self.num_classes)).astype(self.dtype)\n learning_rate = np.array([0.001]).astype(self.dtype)\n\n mu = 0.9\n regularization_method = \"l2_decay\"\n regularization_coeff = 1.0\n\n param_out, velocity_out = calculate_sparse_momentum_by_numpy(\n param=param,\n grad=grad,\n mu=mu,\n velocity=velocity,\n use_nesterov=self.use_nesterov,\n learning_rate=learning_rate,\n regularization_method=regularization_method,\n regularization_coeff=regularization_coeff,\n index=index,\n axis=self.axis)\n\n self.attrs = {\n 'mu': mu,\n 'use_nesterov': self.use_nesterov,\n 'regularization_method': regularization_method,\n 'regularization_coeff': regularization_coeff,\n 'multi_precision': self.multi_precision,\n 'axis': self.axis,\n }\n\n self.inputs = {\n 'Param': param.astype(\"float16\") if self.multi_precision else param,\n 'Velocity': velocity.astype(\"float32\")\n if self.multi_precision else velocity,\n 'LearningRate': learning_rate.astype(\"float32\")\n if self.multi_precision else learning_rate,\n 'Grad': grad.astype(\"float16\") if self.multi_precision else grad,\n 'Index': index,\n 'Axis': np.array(self.axis).astype(np.int32),\n }\n self.outputs = {\n 'ParamOut': param_out.astype(\"float16\")\n if self.multi_precision else param_out,\n 'VelocityOut': velocity_out.astype(\"float32\")\n if self.multi_precision else velocity_out,\n }\n\n if self.multi_precision:\n self.inputs['MasterParam'] = param.astype(\n \"float32\") if self.multi_precision else param\n self.outputs['MasterParamOut'] = param_out.astype(\n \"float32\") if self.multi_precision else param_out\n\n def init_dtype(self):\n pass\n\n def init_axis(self):\n pass\n\n def init_multi_precision(self):\n pass\n\n def init_use_nesterov(self):\n pass\n\n def test_check_output(self):\n self.check_output(\n atol=5e-3 if self.multi_precision else 1e-5, check_eager=True)\n\n\nclass TestSparseMomentumOpDtype1(TestSparseMomentumOp):\n def init_dtype(self):\n self.dtype = np.float32\n self.index_dtype = np.int64\n\n\nclass TestSparseMomentumOpDtype2(TestSparseMomentumOp):\n def init_dtype(self):\n self.dtype = np.float64\n self.index_dtype = np.int32\n\n\nclass TestSparseMomentumOpDtype3(TestSparseMomentumOp):\n def init_dtype(self):\n self.dtype = np.float64\n self.index_dtype = np.int64\n\n\nclass TestSparseMomentumOpAxis(TestSparseMomentumOp):\n def init_axis(self):\n self.axis = 1\n\n\nclass TestSparseMomentumOpNesterov(TestSparseMomentumOp):\n def init_use_nesterov(self):\n self.use_nesterov = True\n\n\nclass TestSparseMomentumOpMultiPrecision(TestSparseMomentumOp):\n def init_dtype(self):\n self.dtype = np.float16\n self.index_dtype = np.int32\n\n def init_multi_precision(self):\n self.multi_precision = True\n\n def init_use_nesterov(self):\n self.use_nesterov = True\n\n\nclass TestSparseMomentumOpMultiPrecision1(TestSparseMomentumOp):\n def init_dtype(self):\n self.dtype = np.float16\n self.index_dtype = np.int64\n\n def init_multi_precision(self):\n self.multi_precision = True\n\n def init_use_nesterov(self):\n self.use_nesterov = True\n\n\nclass TestSparseMomentumOpMultiPrecision2(TestSparseMomentumOp):\n def init_dtype(self):\n self.dtype = np.float16\n self.index_dtype = np.int32\n\n def init_multi_precision(self):\n self.multi_precision = True\n\n def init_use_nesterov(self):\n self.use_nesterov = False\n\n\nclass TestSparseMomentumOpMultiPrecision3(TestSparseMomentumOp):\n def init_dtype(self):\n self.dtype = np.float16\n self.index_dtype = np.int64\n\n def init_multi_precision(self):\n self.multi_precision = True\n\n def init_use_nesterov(self):\n self.use_nesterov = False\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom .. import functional as F\nfrom paddle.nn import Layer\nfrom paddle.nn.initializer import Normal\nfrom ..functional.conv import _update_padding_nd\nfrom ...fluid.layers import utils\n\n__all__ = []\n\n\nclass _Conv3D(Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n subm=False,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NDHWC\"):\n super(_Conv3D, self).__init__()\n assert weight_attr is not False, \"weight_attr should not be False in Conv.\"\n self._param_attr = weight_attr\n self._bias_attr = bias_attr\n self._groups = groups\n self._in_channels = in_channels\n self._out_channels = out_channels\n self._data_format = data_format\n self._subm = subm\n\n assert padding_mode == 'zeros', \"Currently, only support padding_mode='zeros'\"\n assert groups == 1, \"Currently, only support groups=1\"\n\n valid_format = {'NDHWC'}\n if data_format not in valid_format:\n raise ValueError(\n \"data_format must be one of {}, but got data_format='{}'\".\n format(valid_format, data_format))\n\n channel_last = data_format == \"NDHWC\"\n\n dims = 3\n self._stride = utils.convert_to_list(stride, dims, 'stride')\n self._dilation = utils.convert_to_list(dilation, dims, 'dilation')\n self._kernel_size = utils.convert_to_list(kernel_size, dims,\n 'kernel_size')\n self._padding = padding\n self._padding_mode = padding_mode\n self._updated_padding, self._padding_algorithm = _update_padding_nd(\n padding, channel_last, dims)\n\n # the sparse conv restricts the shape is [D, H, W, in_channels, out_channels]\n filter_shape = self._kernel_size + [\n self._in_channels, self._out_channels\n ]\n\n def _get_default_param_initializer():\n filter_elem_num = np.prod(self._kernel_size) * self._in_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std)\n\n self.weight = self.create_parameter(\n shape=filter_shape,\n attr=self._param_attr,\n default_initializer=_get_default_param_initializer())\n #self.bias = self.create_parameter(\n # attr=self._bias_attr, shape=[self._out_channels], is_bias=True)\n self.bias = None\n\n def forward(self, x):\n out = F.conv._conv3d(\n x,\n self.weight,\n bias=self.bias,\n stride=self._stride,\n padding=self._updated_padding,\n dilation=self._dilation,\n groups=self._groups,\n subm=self._subm,\n data_format=self._data_format)\n return out\n\n def extra_repr(self):\n main_str = '{_in_channels}, {_out_channels}, kernel_size={_kernel_size}'\n if self._stride != [1] * len(self._stride):\n main_str += ', stride={_stride}'\n if self._padding != 0:\n main_str += ', padding={_padding}'\n if self._padding_mode != 'zeros':\n main_str += ', padding_mode={_padding_mode}'\n if self._dilation != [1] * len(self._dilation):\n main_str += ', dilation={_dilation}'\n if self._groups != 1:\n main_str += ', groups={_groups}'\n main_str += ', data_format={_data_format}'\n return main_str.format(**self.__dict__)\n\n\nclass Conv3D(_Conv3D):\n r\"\"\"\n **Sparse Convlution3d Layer**\n The Sparse convolution3d layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input(Input) and\n Output(Output) are multidimensional SparseCooTensors with a shape of \n :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of\n channels, D is the depth of the feature, H is the height of the feature,\n and W is the width of the feature. If bias attribution is provided, \n bias is added to the output of the convolution. \n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = W \\ast X + b\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NDHWC format.\n * :math:`W`: Filter value, a tensor with DHWCM format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 1-D tensor with shape [M].\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Parameters:\n in_channels(int): The number of input channels in the input image.\n out_channels(int): The number of output channels produced by the convolution.\n kernel_size(int|list|tuple, optional): The size of the convolving kernel.\n stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must\n contain three integers, (stride_D, stride_H, stride_W). Otherwise, the\n stride_D = stride_H = stride_W = stride. The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. The default value is 1, currently, only support groups=1.\n padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Currently only support ``'zeros'``.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d. If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCDHW\" or \"NDHWC\". Currently, only support \"NCDHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`\n\n - weight: :math:`(K_{d}, K_{h}, K_{w}, C_{in}, C_{out})`\n\n - bias: :math:`(C_{out})`\n\n - output: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`\n\n Where\n\n .. math::\n\n D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (kernel\\_size[0] - 1) + 1))}{strides[0]} + 1\n\n H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (kernel\\_size[1] - 1) + 1))}{strides[1]} + 1\n\n W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (kernel\\_size[2] - 1) + 1))}{strides[2]} + 1\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n from paddle.fluid.framework import _test_eager_guard\n \n with _test_eager_guard():\n indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]\n values = [[1], [2], [3], [4]]\n indices = paddle.to_tensor(indices, dtype='int32')\n values = paddle.to_tensor(values, dtype='float32')\n dense_shape = [1, 1, 3, 4, 1]\n sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) \n conv = paddle.sparse.Conv3D(1, 1, (1, 3, 3))\n y = conv(sparse_x)\n print(y.shape)\n # (1, 1, 1, 2, 1)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NDHWC\"):\n super(Conv3D, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n subm=False,\n padding_mode=padding_mode,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n\nclass SubmConv3D(_Conv3D):\n r\"\"\"\n **Sparse Submanifold Convlution3d Layer**\n The Sparse submanifold convolution3d layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input(Input) and\n Output(Output) are multidimensional SparseCooTensors with a shape of \n :math:`[N, D, H, W, C]` . Where N is batch size, C is the number of\n channels, D is the depth of the feature, H is the height of the feature,\n and W is the width of the feature. If bias attribution is provided, \n bias is added to the output of the convolution.\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out =(W \\ast X + b\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NDHWC format.\n * :math:`W`: Filter value, a tensor with DHWCM format.\n * :math:`\\\\ast`: Submanifold Convolution operation, refer to the paper: https://arxiv.org/abs/1706.01307.\n * :math:`b`: Bias value, a 1-D tensor with shape [M].\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Parameters:\n in_channels(int): The number of input channels in the input image.\n out_channels(int): The number of output channels produced by the convolution.\n kernel_size(int|list|tuple, optional): The size of the convolving kernel.\n stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must\n contain three integers, (stride_D, stride_H, stride_W). Otherwise, the\n stride_D = stride_H = stride_W = stride. The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. The default value is 1.\n padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Currently only support ``'zeros'``.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d. If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCDHW\" or \"NDHWC\". Currently, only support \"NCDHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, D_{in}, H_{in}, W_{in}, C_{in})`\n\n - weight: :math:`(K_{d}, K_{h}, K_{w}, C_{in}, C_{out})`\n\n - bias: :math:`(C_{out})`\n\n - output: :math:`(N, D_{out}, H_{out}, W_{out}, C_{out})`\n\n Where\n\n .. math::\n\n D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (kernel\\_size[0] - 1) + 1))}{strides[0]} + 1\n\n H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (kernel\\_size[1] - 1) + 1))}{strides[1]} + 1\n\n W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (kernel\\_size[2] - 1) + 1))}{strides[2]} + 1\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n from paddle.fluid.framework import _test_eager_guard\n \n with _test_eager_guard():\n indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]\n values = [[1], [2], [3], [4]]\n dense_shape = [1, 1, 3, 4, 1]\n indices = paddle.to_tensor(indices, dtype='int32')\n values = paddle.to_tensor(values, dtype='float32')\n sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True) \n subm_conv = paddle.sparse.SubmConv3D(1, 1, (1, 3, 3))\n y = subm_conv(sparse_x)\n print(y.shape)\n # (1, 1, 3, 4, 1)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NDHWC\"):\n super(SubmConv3D, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n subm=True,\n padding_mode=padding_mode,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n",
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle\n\n\nclass TestLogspaceOpCommonCase(OpTest):\n def setUp(self):\n self.op_type = \"logspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([0]).astype(dtype),\n 'Stop': np.array([10]).astype(dtype),\n 'Num': np.array([11]).astype('int32'),\n 'Base': np.array([2]).astype(dtype),\n }\n self.attrs = {'dtype': int(paddle.float32)}\n\n self.outputs = {'Out': np.power(2, np.arange(0, 11)).astype(dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLogspaceOpReverseCase(OpTest):\n def setUp(self):\n self.op_type = \"logspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([10]).astype(dtype),\n 'Stop': np.array([0]).astype(dtype),\n 'Num': np.array([11]).astype('int32'),\n 'Base': np.array([2]).astype(dtype)\n }\n self.attrs = {'dtype': int(paddle.float32)}\n\n self.outputs = {'Out': np.power(2, np.arange(10, -1, -1)).astype(dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLogspaceOpNumOneCase(OpTest):\n def setUp(self):\n self.op_type = \"logspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([10]).astype(dtype),\n 'Stop': np.array([0]).astype(dtype),\n 'Num': np.array([1]).astype('int32'),\n 'Base': np.array([2]).astype(dtype)\n }\n self.attrs = {'dtype': int(paddle.float32)}\n\n self.outputs = {'Out': np.power(2, np.array(10)).astype(dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLogspaceOpMinusBaseCase(OpTest):\n def setUp(self):\n self.op_type = \"logspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([0]).astype(dtype),\n 'Stop': np.array([10]).astype(dtype),\n 'Num': np.array([11]).astype('int32'),\n 'Base': np.array([-2]).astype(dtype),\n }\n self.attrs = {'dtype': int(paddle.float32)}\n\n self.outputs = {'Out': np.power(-2, np.arange(0, 11)).astype(dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLogspaceOpZeroBaseCase(OpTest):\n def setUp(self):\n self.op_type = \"logspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([0]).astype(dtype),\n 'Stop': np.array([10]).astype(dtype),\n 'Num': np.array([11]).astype('int32'),\n 'Base': np.array([0]).astype(dtype),\n }\n self.attrs = {'dtype': int(paddle.float32)}\n\n self.outputs = {'Out': np.power(0, np.arange(0, 11)).astype(dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLogspaceAPI(unittest.TestCase):\n def test_variable_input1(self):\n paddle.enable_static()\n prog = paddle.static.Program()\n with paddle.static.program_guard(prog):\n start = paddle.full(shape=[1], fill_value=0, dtype='float32')\n stop = paddle.full(shape=[1], fill_value=10, dtype='float32')\n num = paddle.full(shape=[1], fill_value=5, dtype='int32')\n base = paddle.full(shape=[1], fill_value=2, dtype='float32')\n out = paddle.logspace(start, stop, num, base, dtype='float32')\n\n exe = paddle.static.Executor()\n res = exe.run(prog, fetch_list=[out])\n np_res = np.logspace(0, 10, 5, base=2, dtype='float32')\n self.assertEqual((res == np_res).all(), True)\n paddle.disable_static()\n\n def test_variable_input2(self):\n paddle.disable_static()\n start = paddle.full(shape=[1], fill_value=0, dtype='float32')\n stop = paddle.full(shape=[1], fill_value=10, dtype='float32')\n num = paddle.full(shape=[1], fill_value=5, dtype='int32')\n base = paddle.full(shape=[1], fill_value=2, dtype='float32')\n out = paddle.logspace(start, stop, num, base, dtype='float32')\n np_res = np.logspace(0, 10, 5, base=2, dtype='float32')\n self.assertEqual((out.numpy() == np_res).all(), True)\n paddle.enable_static()\n\n def test_dtype(self):\n paddle.enable_static()\n prog = paddle.static.Program()\n with paddle.static.program_guard(prog):\n out_1 = paddle.logspace(0, 10, 5, 2, dtype='float32')\n out_2 = paddle.logspace(0, 10, 5, 2, dtype=np.float32)\n\n exe = paddle.static.Executor()\n res_1, res_2 = exe.run(prog, fetch_list=[out_1, out_2])\n assert np.array_equal(res_1, res_2)\n paddle.disable_static()\n\n def test_name(self):\n with paddle.static.program_guard(paddle.static.Program()):\n out = paddle.logspace(\n 0, 10, 5, 2, dtype='float32', name='logspace_res')\n assert 'logspace_res' in out.name\n\n def test_imperative(self):\n paddle.disable_static()\n out1 = paddle.logspace(0, 10, 5, 2, dtype='float32')\n np_out1 = np.logspace(0, 10, 5, base=2, dtype='float32')\n out2 = paddle.logspace(0, 10, 5, 2, dtype='int32')\n np_out2 = np.logspace(0, 10, 5, base=2, dtype='int32')\n out3 = paddle.logspace(0, 10, 200, 2, dtype='int32')\n np_out3 = np.logspace(0, 10, 200, base=2, dtype='int32')\n paddle.enable_static()\n self.assertEqual((out1.numpy() == np_out1).all(), True)\n self.assertEqual((out2.numpy() == np_out2).all(), True)\n self.assertEqual((out3.numpy() == np_out3).all(), True)\n\n\nclass TestLogspaceOpError(unittest.TestCase):\n def test_errors(self):\n with paddle.static.program_guard(paddle.static.Program()):\n\n def test_dtype():\n paddle.logspace(0, 10, 1, 2, dtype=\"int8\")\n\n self.assertRaises(TypeError, test_dtype)\n\n def test_dtype1():\n paddle.logspace(0, 10, 1.33, 2, dtype=\"int32\")\n\n self.assertRaises(TypeError, test_dtype1)\n\n def test_start_type():\n paddle.logspace([0], 10, 1, 2, dtype=\"float32\")\n\n self.assertRaises(TypeError, test_start_type)\n\n def test_end_type():\n paddle.logspace(0, [10], 1, 2, dtype=\"float32\")\n\n self.assertRaises(TypeError, test_end_type)\n\n def test_num_type():\n paddle.logspace(0, 10, [0], 2, dtype=\"float32\")\n\n self.assertRaises(TypeError, test_num_type)\n\n def test_start_dtype():\n start = paddle.static.data(\n shape=[1], dtype=\"float64\", name=\"start\")\n paddle.logspace(start, 10, 1, 2, dtype=\"float32\")\n\n self.assertRaises(ValueError, test_start_dtype)\n\n def test_end_dtype():\n end = paddle.static.data(shape=[1], dtype=\"float64\", name=\"end\")\n paddle.logspace(0, end, 1, 2, dtype=\"float32\")\n\n self.assertRaises(ValueError, test_end_dtype)\n\n def test_num_dtype():\n num = paddle.static.data(\n shape=[1], dtype=\"float32\", name=\"step\")\n paddle.logspace(0, 10, num, 2, dtype=\"float32\")\n\n self.assertRaises(TypeError, test_num_dtype)\n\n def test_base_dtype():\n base = paddle.static.data(\n shape=[1], dtype=\"float64\", name=\"end\")\n paddle.logspace(0, 10, 1, base, dtype=\"float32\")\n\n self.assertRaises(ValueError, test_base_dtype)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport typing\nimport enum\nimport sys\nimport re\nimport inspect\nimport functools\nimport contextlib\nimport collections\nimport numpy as np\nimport paddle\nfrom paddle.autograd.functional import _as_tensors\n\n\n##########################################################\n# Finite Difference Utils\n##########################################################\ndef _product(t):\n if isinstance(t, int):\n return t\n else:\n return np.product(t)\n\n\ndef _get_item(t, idx):\n assert isinstance(\n t,\n paddle.fluid.framework.Variable), \"The first argument t must be Tensor.\"\n assert isinstance(idx,\n int), \"The second argument idx must be an int number.\"\n flat_t = paddle.reshape(t, [-1])\n return flat_t.__getitem__(idx)\n\n\ndef _set_item(t, idx, value):\n assert isinstance(\n t,\n paddle.fluid.framework.Variable), \"The first argument t must be Tensor.\"\n assert isinstance(idx,\n int), \"The second argument idx must be an int number.\"\n flat_t = paddle.reshape(t, [-1])\n flat_t.__setitem__(idx, value)\n return paddle.reshape(flat_t, t.shape)\n\n\ndef _compute_numerical_jacobian(func, xs, delta, np_dtype):\n xs = list(_as_tensors(xs))\n ys = list(_as_tensors(func(*xs)))\n fin_size = len(xs)\n fout_size = len(ys)\n jacobian = list([] for _ in range(fout_size))\n for i in range(fout_size):\n jac_i = list([] for _ in range(fin_size))\n for j in range(fin_size):\n jac_i[j] = np.zeros(\n (_product(ys[i].shape), _product(xs[j].shape)), dtype=np_dtype)\n jacobian[i] = jac_i\n\n for j in range(fin_size):\n for q in range(_product(xs[j].shape)):\n orig = _get_item(xs[j], q)\n x_pos = orig + delta\n xs[j] = _set_item(xs[j], q, x_pos)\n ys_pos = _as_tensors(func(*xs))\n\n x_neg = orig - delta\n xs[j] = _set_item(xs[j], q, x_neg)\n ys_neg = _as_tensors(func(*xs))\n\n xs[j] = _set_item(xs[j], q, orig)\n\n for i in range(fout_size):\n for p in range(_product(ys[i].shape)):\n y_pos = _get_item(ys_pos[i], p)\n y_neg = _get_item(ys_neg[i], p)\n jacobian[i][j][p][q] = (y_pos - y_neg) / delta / 2.\n return jacobian\n\n\ndef _compute_numerical_hessian(func, xs, delta, np_dtype):\n xs = list(_as_tensors(xs))\n ys = list(_as_tensors(func(*xs)))\n fin_size = len(xs)\n hessian = list([] for _ in range(fin_size))\n for i in range(fin_size):\n hessian_i = list([] for _ in range(fin_size))\n for j in range(fin_size):\n hessian_i[j] = np.zeros(\n (_product(xs[i].shape), _product(xs[j].shape)), dtype=np_dtype)\n hessian[i] = hessian_i\n\n for i in range(fin_size):\n for p in range(_product(xs[i].shape)):\n for j in range(fin_size):\n for q in range(_product(xs[j].shape)):\n orig = _get_item(xs[j], q)\n x_pos = orig + delta\n xs[j] = _set_item(xs[j], q, x_pos)\n jacobian_pos = _compute_numerical_jacobian(func, xs, delta,\n np_dtype)\n x_neg = orig - delta\n xs[j] = _set_item(xs[j], q, x_neg)\n jacobian_neg = _compute_numerical_jacobian(func, xs, delta,\n np_dtype)\n xs[j] = _set_item(xs[j], q, orig)\n hessian[i][j][p][q] = (\n jacobian_pos[0][i][0][p] - jacobian_neg[0][i][0][p]\n ) / delta / 2.\n return hessian\n\n\ndef concat_to_matrix(xs, is_batched=False):\n \"\"\"Concats a tuple of tuple of Jacobian/Hessian matrix into one matrix\"\"\"\n rows = []\n for i in range(len(xs)):\n rows.append(np.concatenate([x for x in xs[i]], -1))\n return np.concatenate(rows, 1) if is_batched else np.concatenate(rows, 0)\n\n\ndef _compute_numerical_batch_jacobian(func,\n xs,\n delta,\n np_dtype,\n merge_batch=True):\n no_batch_jacobian = _compute_numerical_jacobian(func, xs, delta, np_dtype)\n xs = list(_as_tensors(xs))\n ys = list(_as_tensors(func(*xs)))\n fin_size = len(xs)\n fout_size = len(ys)\n bs = xs[0].shape[0]\n bat_jac = []\n for i in range(fout_size):\n batch_jac_i = []\n for j in range(fin_size):\n jac = no_batch_jacobian[i][j]\n jac_shape = jac.shape\n out_size = jac_shape[0] // bs\n in_size = jac_shape[1] // bs\n jac = np.reshape(jac, (bs, out_size, bs, in_size))\n batch_jac_i_j = np.zeros(shape=(out_size, bs, in_size))\n for p in range(out_size):\n for b in range(bs):\n for q in range(in_size):\n batch_jac_i_j[p][b][q] = jac[b][p][b][q]\n if merge_batch:\n batch_jac_i_j = np.reshape(batch_jac_i_j, (out_size, -1))\n batch_jac_i.append(batch_jac_i_j)\n bat_jac.append(batch_jac_i)\n\n return bat_jac\n\n\ndef _compute_numerical_batch_hessian(func, xs, delta, np_dtype):\n xs = list(_as_tensors(xs))\n batch_size = xs[0].shape[0]\n fin_size = len(xs)\n hessian = []\n for b in range(batch_size):\n x_l = []\n for j in range(fin_size):\n x_l.append(paddle.reshape(xs[j][b], shape=[1, -1]))\n hes_b = _compute_numerical_hessian(func, x_l, delta, np_dtype)\n if fin_size == 1:\n hessian.append(hes_b[0][0])\n else:\n hessian.append(hes_b)\n\n hessian_res = []\n for index in range(fin_size):\n x_reshape = paddle.reshape(xs[index], shape=[batch_size, -1])\n for index_ in range(fin_size):\n for i in range(x_reshape.shape[1]):\n tmp = []\n for j in range(batch_size):\n if fin_size == 1:\n tmp.extend(hessian[j][i])\n else:\n tmp.extend(hessian[j][i][index_][index])\n hessian_res.append(tmp)\n if fin_size == 1:\n return hessian_res\n\n hessian_result = []\n mid = len(hessian_res) // 2\n for i in range(mid):\n hessian_result.append(\n np.stack(\n (hessian_res[i], hessian_res[mid + i]), axis=0))\n return hessian_result\n\n\ndef _compute_numerical_vjp(func, xs, v, delta, np_dtype):\n xs = _as_tensors(xs)\n jacobian = np.array(_compute_numerical_jacobian(func, xs, delta, np_dtype))\n if v is None:\n v = [paddle.ones_like(x) for x in xs]\n flat_v = np.array([v_el.numpy().reshape(-1) for v_el in v])\n vjp = [np.zeros((_product(x.shape)), dtype=np_dtype) for x in xs]\n for j in range(len(xs)):\n for q in range(_product(xs[j].shape)):\n vjp[j][q] = np.sum(jacobian[:, j, :, q].reshape(flat_v.shape) *\n flat_v)\n vjp = [vjp[j].reshape(xs[j].shape) for j in range(len(xs))]\n return vjp\n\n\ndef _compute_numerical_vhp(func, xs, v, delta, np_dtype):\n xs = list(_as_tensors(xs))\n hessian = np.array(_compute_numerical_hessian(func, xs, delta, np_dtype))\n flat_v = np.array([v_el.numpy().reshape(-1) for v_el in v])\n vhp = [np.zeros((_product(x.shape)), dtype=np_dtype) for x in xs]\n for j in range(len(xs)):\n for q in range(_product(xs[j].shape)):\n vhp[j][q] = np.sum(hessian[:, j, :, q].reshape(flat_v.shape) *\n flat_v)\n vhp = [vhp[j].reshape(xs[j].shape) for j in range(len(xs))]\n return vhp\n\n\n##########################################################\n# TestCases of different function.\n##########################################################\ndef reduce(x):\n return paddle.sum(x)\n\n\ndef reduce_dim(x):\n return paddle.sum(x, axis=0)\n\n\ndef matmul(x, y):\n return paddle.matmul(x, y)\n\n\ndef mul(x, y):\n return x * y\n\n\ndef pow(x, y):\n return paddle.pow(x, y)\n\n\ndef o2(x, y):\n return paddle.multiply(x, y), paddle.matmul(x, y.t())\n\n\ndef unuse(x, y):\n return paddle.sum(x)\n\n\ndef nested(x):\n def inner(y):\n return x * y\n\n return inner\n\n\ndef square(x):\n return x * x\n\n\n##########################################################\n# Parameterized Test Utils.\n##########################################################\n\nTEST_CASE_NAME = 'suffix'\n\n\ndef place(devices, key='place'):\n \"\"\"A Decorator for a class which will make the class running on different \n devices .\n\n Args:\n devices (Sequence[Paddle.CUDAPlace|Paddle.CPUPlace]): Device list.\n key (str, optional): Defaults to 'place'.\n \"\"\"\n\n def decorate(cls):\n module = sys.modules[cls.__module__].__dict__\n raw_classes = {\n k: v\n for k, v in module.items() if k.startswith(cls.__name__)\n }\n\n for raw_name, raw_cls in raw_classes.items():\n for d in devices:\n test_cls = dict(raw_cls.__dict__)\n test_cls.update({key: d})\n new_name = raw_name + '.' + d.__class__.__name__\n module[new_name] = type(new_name, (raw_cls, ), test_cls)\n del module[raw_name]\n return cls\n\n return decorate\n\n\ndef parameterize(fields, values=None):\n \"\"\"Decorator for a unittest class which make the class running on different \n test cases.\n\n Args:\n fields (Sequence): The feild name sequence of test cases.\n values (Sequence, optional): The test cases sequence. Defaults to None.\n\n \"\"\"\n fields = [fields] if isinstance(fields, str) else fields\n params = [dict(zip(fields, vals)) for vals in values]\n\n def decorate(cls):\n test_cls_module = sys.modules[cls.__module__].__dict__\n for i, values in enumerate(params):\n test_cls = dict(cls.__dict__)\n values = {\n k: staticmethod(v) if callable(v) else v\n for k, v in values.items()\n }\n test_cls.update(values)\n name = cls.__name__ + str(i)\n name = name + '.' + \\\n values.get('suffix') if values.get('suffix') else name\n\n test_cls_module[name] = type(name, (cls, ), test_cls)\n\n for m in list(cls.__dict__):\n if m.startswith(\"test\"):\n delattr(cls, m)\n return cls\n\n return decorate\n\n\n##########################################################\n# Utils for transpose different Jacobian/Hessian matrix format.\n##########################################################\n\n# B is batch size, N is row size, M is column size.\nMatrixFormat = enum.Enum('MatrixFormat', ('NBM', 'BNM', 'NMB', 'NM'))\n\n\ndef _np_transpose_matrix_format(src, src_format, des_format):\n \"\"\"Transpose Jacobian/Hessian matrix format.\"\"\"\n supported_format = (MatrixFormat.NBM, MatrixFormat.BNM, MatrixFormat.NMB)\n if src_format not in supported_format or des_format not in supported_format:\n raise ValueError(\n f\"Supported Jacobian format is {supported_format}, but got src: {src_format}, des: {des_format}\"\n )\n\n src_axis = {c: i for i, c in enumerate(src_format.name)}\n dst_axis = tuple(src_axis[c] for c in des_format.name)\n\n return np.transpose(src, dst_axis)\n\n\ndef _np_concat_matrix_sequence(src, src_format=MatrixFormat.NM):\n \"\"\"Convert a sequence of sequence of Jacobian/Hessian matrix into one huge \n matrix.\"\"\"\n\n def concat_col(xs):\n if src_format in (MatrixFormat.NBM, MatrixFormat.BNM, MatrixFormat.NM):\n return np.concatenate(xs, axis=-1)\n else:\n return np.concatenate(xs, axis=1)\n\n def concat_row(xs):\n if src_format in (MatrixFormat.NBM, MatrixFormat.NM, MatrixFormat.NMB):\n return np.concatenate(xs, axis=0)\n else:\n return np.concatenate(xs, axis=1)\n\n supported_format = (MatrixFormat.NBM, MatrixFormat.BNM, MatrixFormat.NMB,\n MatrixFormat.NM)\n if src_format not in supported_format:\n raise ValueError(\n f\"Supported Jacobian format is {supported_format}, but got {src_format}\"\n )\n if not isinstance(src, typing.Sequence):\n return src\n if not isinstance(src[0], typing.Sequence):\n src = [src]\n return concat_row(tuple(concat_col(xs) for xs in src))\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport math\nfrom . import framework\nfrom . import core\nfrom .framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph, default_main_program, _current_expected_place\nimport numpy as np\nfrom .core import VarDesc\nfrom . import unique_name\nfrom .data_feeder import check_variable_and_dtype, check_type, check_dtype\nfrom paddle import _C_ops\n\n__all__ = [\n 'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',\n 'MSRA', 'ConstantInitializer', 'UniformInitializer', 'NormalInitializer',\n 'TruncatedNormalInitializer', 'XavierInitializer', 'BilinearInitializer',\n 'MSRAInitializer', 'NumpyArrayInitializer', 'set_global_initializer'\n]\n\n_global_weight_initializer_ = None\n_global_bias_initializer_ = None\n\n\nclass Initializer(object):\n \"\"\"Base class for variable initializers\n\n Defines the common interface of variable initializers.\n They add operations to the init program that are used\n to initialize variables. Users should not use this class\n directly, but need to use one of its implementations.\n \"\"\"\n\n def __init__(self):\n pass\n\n def __call__(self, param, block=None):\n \"\"\"Add corresponding initialization operations to the network\n \"\"\"\n raise NotImplementedError()\n\n def _check_block(self, block):\n if block is None:\n block = default_main_program().global_block()\n\n return block\n\n def _compute_fans(self, var):\n \"\"\"Compute the fan_in and the fan_out for layers\n\n This method computes the fan_in and the fan_out\n for neural network layers, if not specified. It is\n not possible to perfectly estimate fan_in and fan_out.\n This method will estimate it correctly for matrix multiply and\n convolutions.\n\n Args:\n var: variable for which fan_in and fan_out have to be computed\n\n Returns:\n tuple of two integers (fan_in, fan_out)\n \"\"\"\n shape = var.shape\n if not shape or len(shape) == 0:\n fan_in = fan_out = 1\n elif len(shape) == 1:\n fan_in = fan_out = shape[0]\n elif len(shape) == 2:\n # This is the case for simple matrix multiply\n fan_in = shape[0]\n fan_out = shape[1]\n else:\n # Assume this to be a convolutional kernel\n # In PaddlePaddle, the shape of the kernel is like:\n # [num_filters, num_filter_channels, ...] where the remaining\n # dimensions are the filter_size\n receptive_field_size = np.prod(shape[2:])\n fan_in = shape[1] * receptive_field_size\n fan_out = shape[0] * receptive_field_size\n\n return (fan_in, fan_out)\n\n\nclass ConstantInitializer(Initializer):\n \"\"\"Implements the constant initializer\n\n Args:\n value (float32): constant value to initialize the variable \n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n paddle.enable_static()\n x = fluid.data(name=\"data\", shape=[8, 32, 32], dtype=\"float32\")\n fc = fluid.layers.fc(\n input=x,\n size=10,\n param_attr=fluid.initializer.Constant(value=2.0))\n\n \"\"\"\n\n def __init__(self, value=0.0, force_cpu=False):\n assert value is not None\n super(ConstantInitializer, self).__init__()\n self._value = value\n self._force_cpu = force_cpu\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with constant.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n assert (isinstance(var, framework.Variable) or\n isinstance(var, framework.EagerParamBase))\n assert isinstance(block, framework.Block)\n\n if framework._non_static_mode():\n _C_ops.fill_constant(var, 'value',\n float(self._value), 'force_cpu',\n self._force_cpu, 'dtype',\n int(var.dtype), 'str_value',\n str(float(self._value)), 'shape', var.shape)\n return None\n else:\n # fill constant should set the \"str_value\" to preserve precision\n op = block.append_op(\n type=\"fill_constant\",\n outputs={\"Out\": var},\n attrs={\n \"shape\": var.shape,\n \"dtype\": int(var.dtype),\n \"value\": float(self._value),\n 'str_value': str(float(self._value)),\n 'force_cpu': self._force_cpu\n },\n stop_gradient=True)\n\n var.op = op\n return op\n\n\nclass UniformInitializer(Initializer):\n \"\"\"Implements the random uniform distribution initializer\n\n Args:\n low (float): lower boundary of the uniform distribution\n high (float): upper boundary of the uniform distribution\n seed (int): random seed\n diag_num (int): the number of diagonal elements to initialize.\n If set to 0, diagonal initialization will be not performed.\n diag_step (int): Step size between two diagonal elements,\n which is generally the width of the square matrix.\n diag_val (float): the value of the diagonal element to be initialized,\n default 1.0. It takes effect only if the diag_num is greater than 0.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n x = fluid.data(name='x', shape=[None, 1], dtype='float32')\n fc = fluid.layers.fc(input=x, size=10,\n \t\tparam_attr=fluid.initializer.Uniform(low=-0.5, high=0.5))\n \"\"\"\n\n def __init__(self,\n low=-1.0,\n high=1.0,\n seed=0,\n diag_num=0,\n diag_step=0,\n diag_val=1.0):\n assert low is not None\n assert high is not None\n assert high >= low\n assert seed is not None\n assert diag_num is not None\n assert diag_step is not None\n assert diag_val is not None\n if diag_num > 0 or diag_step > 0:\n assert (diag_num > 0 and diag_step > 0)\n super(UniformInitializer, self).__init__()\n self._low = low\n self._high = high\n self._seed = seed\n self._diag_num = diag_num\n self._diag_step = diag_step\n self._diag_val = diag_val\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with Uniform distribution.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n assert isinstance(block, framework.Block)\n check_variable_and_dtype(var, \"Out\",\n [\"uint16\", \"float16\", \"float32\", \"float64\"],\n \"uniform_random\")\n\n if self._seed == 0:\n self._seed = block.program.random_seed\n\n # to be compatible of fp16 initializers\n if var.dtype == VarDesc.VarType.FP16:\n out_dtype = VarDesc.VarType.FP32\n out_var = block.create_var(\n name=unique_name.generate(\".\".join(\n ['uniform_random', var.name, 'tmp'])),\n shape=var.shape,\n dtype=out_dtype,\n type=VarDesc.VarType.LOD_TENSOR,\n persistable=False)\n else:\n out_dtype = var.dtype\n out_var = var\n\n if framework._non_static_mode():\n out_var = _C_ops.uniform_random(\n 'shape', var.shape, 'min', self._low, 'max', self._high, 'seed',\n self._seed, 'dtype', out_dtype, 'diag_num', self._diag_num,\n 'diag_step', self._diag_step, 'diag_val', self._diag_val)\n if var.dtype == VarDesc.VarType.FP16:\n var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,\n 'out_dtype', var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n else:\n op = block.append_op(\n type=\"uniform_random\",\n inputs={},\n outputs={\"Out\": out_var},\n attrs={\n \"shape\": var.shape,\n \"dtype\": out_dtype,\n \"min\": self._low,\n \"max\": self._high,\n \"seed\": self._seed,\n \"diag_num\": self._diag_num,\n \"diag_step\": self._diag_step,\n \"diag_val\": self._diag_val\n },\n stop_gradient=True)\n\n if var.dtype == VarDesc.VarType.FP16:\n block.append_op(\n type=\"cast\",\n inputs={\"X\": out_var},\n outputs={\"Out\": var},\n attrs={\"in_dtype\": out_var.dtype,\n \"out_dtype\": var.dtype})\n\n var.op = op\n return op\n\n\nclass NormalInitializer(Initializer):\n \"\"\"Implements the Random Normal(Gaussian) distribution initializer\n\n Args:\n loc (float): mean of the normal distribution\n scale (float): standard deviation of the normal distribution\n seed (int): random seed\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n x = fluid.data(name=\"data\", shape=[None, 32, 32], dtype=\"float32\")\n fc = fluid.layers.fc(input=x, size=10,\n param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))\n\n \"\"\"\n\n def __init__(self, loc=0.0, scale=1.0, seed=0):\n assert loc is not None\n assert scale is not None\n assert seed is not None\n super(NormalInitializer, self).__init__()\n self._mean = loc\n self._std_dev = scale\n self._seed = seed\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with Normal distribution.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n assert isinstance(block, framework.Block)\n\n check_variable_and_dtype(var, \"Out\",\n [\"uint16\", \"float16\", \"float32\", \"float64\"],\n \"guassian_random\")\n\n # to be compatible of fp16 initalizers\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n out_dtype = VarDesc.VarType.FP32\n out_var = block.create_var(\n name=unique_name.generate(\".\".join(\n ['normal_init', var.name, 'tmp'])),\n shape=var.shape,\n dtype=out_dtype,\n type=VarDesc.VarType.LOD_TENSOR,\n persistable=False)\n else:\n out_dtype = var.dtype\n out_var = var\n\n if self._seed == 0:\n self._seed = block.program.random_seed\n\n if in_dygraph_mode():\n place = _current_expected_place()\n out_var = _C_ops.final_state_gaussian_random(\n var.shape, self._mean, self._std_dev, self._seed, out_dtype,\n place)\n\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n var_tmp = _C_ops.final_state_cast(out_var, var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n\n if _in_legacy_dygraph():\n out_var = _C_ops.gaussian_random(\n 'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean,\n 'std', self._std_dev, 'seed', self._seed, 'use_mkldnn', False)\n\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,\n 'out_dtype', var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n else:\n op = block.append_op(\n type=\"gaussian_random\",\n outputs={\"Out\": out_var},\n attrs={\n \"shape\": var.shape,\n \"dtype\": out_dtype,\n \"mean\": self._mean,\n \"std\": self._std_dev,\n \"seed\": self._seed,\n \"use_mkldnn\": False\n },\n stop_gradient=True)\n\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n block.append_op(\n type=\"cast\",\n inputs={\"X\": out_var},\n outputs={\"Out\": var},\n attrs={\"in_dtype\": out_var.dtype,\n \"out_dtype\": var.dtype})\n var.op = op\n return op\n\n\nclass TruncatedNormalInitializer(Initializer):\n \"\"\"Implements the Random TruncatedNormal(Gaussian) distribution initializer\n\n Args:\n loc (float): mean of the normal distribution\n scale (float): standard deviation of the normal distribution\n seed (int): random seed\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n x = fluid.data(name='x', shape=[None, 1], dtype='float32')\n fc = fluid.layers.fc(input=x, size=10,\n param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0))\n \"\"\"\n\n def __init__(self, loc=0.0, scale=1.0, seed=0):\n assert loc is not None\n assert scale is not None\n assert seed is not None\n super(TruncatedNormalInitializer, self).__init__()\n self._mean = loc\n self._std_dev = scale\n self._seed = seed\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with TruncatedNormal distribution.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n assert isinstance(var, framework.Variable)\n assert isinstance(block, framework.Block)\n\n if self._seed == 0:\n self._seed = block.program.random_seed\n\n # to be compatible of fp16 initalizers\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n out_dtype = VarDesc.VarType.FP32\n out_var = block.create_var(\n name=unique_name.generate(\".\".join(\n ['truncated_gaussian_random', var.name, 'tmp'])),\n shape=var.shape,\n dtype=out_dtype,\n type=VarDesc.VarType.LOD_TENSOR,\n persistable=False)\n else:\n out_dtype = var.dtype\n out_var = var\n\n if in_dygraph_mode():\n out_var = _C_ops.final_state_truncated_gaussian_random(\n var.shape, self._mean, self._std_dev, self._seed, out_dtype,\n _current_expected_place())\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n var_tmp = _C_ops.final_state_cast(out_var, var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n\n if _in_legacy_dygraph():\n out_var = _C_ops.truncated_gaussian_random(\n 'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean,\n 'std', self._std_dev, 'seed', self._seed)\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,\n 'out_dtype', var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n else:\n op = block.append_op(\n type=\"truncated_gaussian_random\",\n outputs={\"Out\": out_var},\n attrs={\n \"shape\": var.shape,\n \"dtype\": out_dtype,\n \"mean\": self._mean,\n \"std\": self._std_dev,\n \"seed\": self._seed\n },\n stop_gradient=True)\n\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n block.append_op(\n type=\"cast\",\n inputs={\"X\": out_var},\n outputs={\"Out\": var},\n attrs={\"in_dtype\": out_var.dtype,\n \"out_dtype\": var.dtype})\n var.op = op\n return op\n\n\nclass XavierInitializer(Initializer):\n r\"\"\"\n This class implements the Xavier weight initializer from the paper\n `Understanding the difficulty of training deep feedforward neural\n networks <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_\n by Xavier Glorot and Yoshua Bengio.\n\n This initializer is designed to keep the scale of the gradients\n approximately same in all the layers. In case of Uniform distribution,\n the range is [-x, x], where\n\n .. math::\n\n x = \\sqrt{\\\\frac{6.0}{fan\\_in + fan\\_out}}\n\n In case of Normal distribution, the mean is 0 and the standard deviation\n is\n\n .. math::\n\n \\sqrt{\\\\frac{2.0}{fan\\_in + fan\\_out}}\n\n\n Args:\n uniform (bool,default True): whether to use uniform ,if False use normal distribution\n fan_in (float,default None): fan_in for Xavier initialization. If None, it is\n inferred from the variable.\n fan_out (float,default None): fan_out for Xavier initialization. If None, it is\n inferred from the variable.\n seed (int): random seed\n\n Note:\n It is recommended to set fan_in and fan_out to None for most cases.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n queries = fluid.data(name='x', shape=[None,1], dtype='float32')\n fc = fluid.layers.fc(\n input=queries, size=10,\n param_attr=fluid.initializer.Xavier(uniform=False))\n\n \"\"\"\n\n def __init__(self, uniform=True, fan_in=None, fan_out=None, seed=0):\n assert uniform is not None\n assert seed is not None\n super(XavierInitializer, self).__init__()\n self._uniform = uniform\n self._fan_in = fan_in\n self._fan_out = fan_out\n self._seed = seed\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with Xavier initialization.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n assert isinstance(block, framework.Block)\n check_variable_and_dtype(var, \"Out\",\n [\"uint16\", \"float16\", \"float32\", \"float64\"],\n \"xavier_init\")\n\n f_in, f_out = self._compute_fans(var)\n\n # If fan_in and fan_out are passed, use them\n fan_in = f_in if self._fan_in is None else self._fan_in\n fan_out = f_out if self._fan_out is None else self._fan_out\n\n if self._seed == 0:\n self._seed = block.program.random_seed\n\n # to be compatible of fp16 initalizers\n if var.dtype == VarDesc.VarType.FP16 or (\n var.dtype == VarDesc.VarType.BF16 and not self._uniform):\n out_dtype = VarDesc.VarType.FP32\n out_var = block.create_var(\n name=unique_name.generate(\".\".join(\n ['xavier_init', var.name, 'tmp'])),\n shape=var.shape,\n dtype=out_dtype,\n type=VarDesc.VarType.LOD_TENSOR,\n persistable=False)\n else:\n out_dtype = var.dtype\n out_var = var\n\n if framework._non_static_mode():\n if self._uniform:\n limit = math.sqrt(6.0 / float(fan_in + fan_out))\n out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',\n -limit, 'max', limit, 'seed',\n self._seed, 'dtype', out_dtype)\n else:\n std = math.sqrt(2.0 / float(fan_in + fan_out))\n\n if in_dygraph_mode():\n place = _current_expected_place()\n out_var = _C_ops.final_state_gaussian_random(\n out_var.shape, 0.0, std, self._seed, out_dtype, place)\n else:\n out_var = _C_ops.gaussian_random(\n 'shape', out_var.shape, 'dtype', out_dtype, 'mean', 0.0,\n 'std', std, 'seed', self._seed)\n\n if var.dtype == VarDesc.VarType.FP16 or (\n var.dtype == VarDesc.VarType.BF16 and not self._uniform):\n var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,\n 'out_dtype', var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n else:\n if self._uniform:\n limit = math.sqrt(6.0 / float(fan_in + fan_out))\n op = block.append_op(\n type=\"uniform_random\",\n inputs={},\n outputs={\"Out\": out_var},\n attrs={\n \"shape\": out_var.shape,\n \"dtype\": out_dtype,\n \"min\": -limit,\n \"max\": limit,\n \"seed\": self._seed\n },\n stop_gradient=True)\n else:\n std = math.sqrt(2.0 / float(fan_in + fan_out))\n op = block.append_op(\n type=\"gaussian_random\",\n outputs={\"Out\": out_var},\n attrs={\n \"shape\": out_var.shape,\n \"dtype\": out_dtype,\n \"mean\": 0.0,\n \"std\": std,\n \"seed\": self._seed\n },\n stop_gradient=True)\n\n if var.dtype == VarDesc.VarType.FP16 or (\n var.dtype == VarDesc.VarType.BF16 and not self._uniform):\n block.append_op(\n type=\"cast\",\n inputs={\"X\": out_var},\n outputs={\"Out\": var},\n attrs={\"in_dtype\": out_var.dtype,\n \"out_dtype\": var.dtype})\n\n var.op = op\n return op\n\n\nclass MSRAInitializer(Initializer):\n r\"\"\"Implements the MSRA initializer a.k.a. Kaiming Initializer\n\n This class implements the weight initialization from the paper\n `Delving Deep into Rectifiers: Surpassing Human-Level Performance on\n ImageNet Classification <https://arxiv.org/abs/1502.01852>`_\n by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a\n robust initialization method that particularly considers the rectifier\n nonlinearities. In case of Uniform distribution, the range is [-x, x], where\n\n .. math::\n\n x = \\sqrt{\\\\frac{6.0}{fan\\_in}}\n\n In case of Normal distribution, the mean is 0 and the standard deviation\n is\n\n .. math::\n\n \\sqrt{\\\\frac{2.0}{fan\\_in}}\n\n Args:\n uniform (bool): whether to use uniform or normal distribution\n fan_in (float32|None): fan_in for MSRAInitializer. If None, it is\\\n inferred from the variable. default is None.\n seed (int32): random seed\n\n Note:\n It is recommended to set fan_in to None for most cases.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n paddle.enable_static()\n x = fluid.data(name=\"data\", shape=[8, 32, 32], dtype=\"float32\")\n fc = fluid.layers.fc(input=x, size=10,\n param_attr=fluid.initializer.MSRA(uniform=False))\n\n \"\"\"\n\n def __init__(self, uniform=True, fan_in=None, seed=0):\n \"\"\"Constructor for MSRAInitializer\n \"\"\"\n assert uniform is not None\n assert seed is not None\n super(MSRAInitializer, self).__init__()\n self._uniform = uniform\n self._fan_in = fan_in\n self._seed = seed\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with MSRA initialization.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n assert isinstance(var, framework.Variable)\n assert isinstance(block, framework.Block)\n f_in, f_out = self._compute_fans(var)\n\n # If fan_in is passed, use it\n fan_in = f_in if self._fan_in is None else self._fan_in\n\n if self._seed == 0:\n self._seed = block.program.random_seed\n\n # to be compatible of fp16 initalizers\n if var.dtype == VarDesc.VarType.FP16 or (\n var.dtype == VarDesc.VarType.BF16 and not self._uniform):\n out_dtype = VarDesc.VarType.FP32\n out_var = block.create_var(\n name=unique_name.generate(\".\".join(\n ['masra_init', var.name, 'tmp'])),\n shape=var.shape,\n dtype=out_dtype,\n type=VarDesc.VarType.LOD_TENSOR,\n persistable=False)\n else:\n out_dtype = var.dtype\n out_var = var\n\n if framework._non_static_mode():\n if self._uniform:\n limit = math.sqrt(6.0 / float(fan_in))\n out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',\n -limit, 'max', limit, 'seed',\n self._seed, 'dtype',\n int(out_dtype))\n else:\n std = math.sqrt(2.0 / float(fan_in))\n if in_dygraph_mode():\n place = _current_expected_place()\n out_var = _C_ops.final_state_gaussian_random(\n out_var.shape, 0.0, std, self._seed, out_dtype, place)\n else:\n out_var = _C_ops.gaussian_random(\n 'shape', out_var.shape, 'dtype',\n int(out_dtype), 'mean', 0.0, 'std', std, 'seed',\n self._seed)\n\n if var.dtype == VarDesc.VarType.FP16 or (\n var.dtype == VarDesc.VarType.BF16 and not self._uniform):\n var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,\n 'out_dtype', var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n else:\n if self._uniform:\n limit = math.sqrt(6.0 / float(fan_in))\n op = block.append_op(\n type=\"uniform_random\",\n inputs={},\n outputs={\"Out\": out_var},\n attrs={\n \"shape\": out_var.shape,\n \"dtype\": int(out_dtype),\n \"min\": -limit,\n \"max\": limit,\n \"seed\": self._seed\n },\n stop_gradient=True)\n\n else:\n std = math.sqrt(2.0 / float(fan_in))\n op = block.append_op(\n type=\"gaussian_random\",\n outputs={\"Out\": out_var},\n attrs={\n \"shape\": out_var.shape,\n \"dtype\": int(out_dtype),\n \"mean\": 0.0,\n \"std\": std,\n \"seed\": self._seed\n },\n stop_gradient=True)\n\n if var.dtype == VarDesc.VarType.FP16 or (\n var.dtype == VarDesc.VarType.BF16 and not self._uniform):\n block.append_op(\n type=\"cast\",\n inputs={\"X\": out_var},\n outputs={\"Out\": var},\n attrs={\"in_dtype\": out_var.dtype,\n \"out_dtype\": var.dtype})\n\n var.op = op\n return op\n\n\nclass BilinearInitializer(Initializer):\n \"\"\"\n This initializer can be used in transposed convolution operator to\n act as upsampling. Users can upsample a feature map with shape of\n (B, C, H, W) by any integer factor. The usage is:\n\n Examples:\n\n .. code-block:: python\n\n import math\n\n import paddle\n import paddle.nn as nn\n from paddle.regularizer import L2Decay\n\n factor = 2\n C = 2\n B = 8\n H = W = 32\n w_attr = paddle.ParamAttr(learning_rate=0.,\n regularizer=L2Decay(0.),\n initializer=nn.initializer.Bilinear())\n data = paddle.rand([B, 3, H, W], dtype='float32')\n conv_up = nn.Conv2DTranspose(3,\n out_channels=C,\n kernel_size=2 * factor - factor % 2,\n padding=int(\n math.ceil((factor - 1) / 2.)),\n stride=factor,\n weight_attr=w_attr,\n bias_attr=False)\n x = conv_up(data)\n\n Where, `out_channels=C` and `groups=C` means this is channel-wise transposed\n convolution. The filter shape will be (C, 1, K, K) where K is `kernel_size`,\n This initializer will set a (K, K) interpolation kernel for every channel\n of the filter identically. The resulting shape of the output feature map\n will be (B, C, factor * H, factor * W). Note that the learning rate and the\n weight decay are set to 0 in order to keep coefficient values of bilinear\n interpolation unchanged during training.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor for BilinearInitializer.\n \"\"\"\n super(BilinearInitializer, self).__init__()\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with Bilinear initialization.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n if not isinstance(var, framework.Variable):\n raise ValueError(\"var must be framework.Variable.\")\n\n if not isinstance(block, framework.Block):\n raise ValueError(\"block must be framework.Block.\")\n\n shape = var.shape\n if len(shape) != 4:\n raise ValueError(\"the length of shape must be 4.\")\n if shape[2] != shape[3]:\n raise ValueError(\"shape[2] must be equal to shape[3].\")\n\n weight = np.zeros(np.prod(var.shape), dtype='float32')\n size = shape[3]\n # factor\n f = np.ceil(size / 2.)\n # center\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(np.prod(shape)):\n x = i % size\n y = (i / size) % size\n weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))\n weight = np.reshape(weight, shape)\n\n # to be compatible of fp16 initalizers\n if var.dtype in [\n VarDesc.VarType.FP16, VarDesc.VarType.BF16, VarDesc.VarType.FP64\n ]:\n out_dtype = VarDesc.VarType.FP32\n out_var = block.create_var(\n name=unique_name.generate(\".\".join(\n ['bilinear_init', var.name, 'tmp'])),\n shape=var.shape,\n dtype=out_dtype,\n type=VarDesc.VarType.LOD_TENSOR,\n persistable=False)\n else:\n out_dtype = var.dtype\n out_var = var\n\n if out_dtype == VarDesc.VarType.FP32:\n value_name = \"fp32_values\"\n values = [float(v) for v in weight.flat]\n else:\n raise TypeError(\"Unsupported dtype %s\", var.dtype)\n\n if np.prod(shape) > 1024 * 1024:\n raise ValueError(\"The size of input is too big. \")\n\n if framework._non_static_mode():\n _C_ops.assign_value(out_var, 'shape',\n list(shape), 'dtype', out_dtype, value_name,\n values)\n if var.dtype in [\n VarDesc.VarType.FP16, VarDesc.VarType.BF16,\n VarDesc.VarType.FP64\n ]:\n var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,\n 'out_dtype', var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n else:\n op = block.append_op(\n type='assign_value',\n outputs={'Out': [out_var]},\n attrs={\n 'dtype': out_dtype,\n 'shape': list(shape),\n value_name: values\n })\n\n if var.dtype in [\n VarDesc.VarType.FP16, VarDesc.VarType.BF16,\n VarDesc.VarType.FP64\n ]:\n block.append_op(\n type=\"cast\",\n inputs={\"X\": out_var},\n outputs={\"Out\": var},\n attrs={\"in_dtype\": out_var.dtype,\n \"out_dtype\": var.dtype})\n\n var.op = op\n return op\n\n\nclass NumpyArrayInitializer(Initializer):\n \"\"\"Init an parameter with an numpy array\n This op initialize the variable by numpy array.\n\n Args:\n value (numpy): numpy array to initialize the variable\n\n Returns:\n A Tensor variable initialized by numpy.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n x = fluid.data(name=\"x\", shape=[2, 1], dtype='float32')\n fc = fluid.layers.fc(input=x, size=10,\n param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))\n \"\"\"\n\n def __init__(self, value):\n import numpy\n assert isinstance(value, numpy.ndarray)\n super(NumpyArrayInitializer, self).__init__()\n self._value = value\n\n def __call__(self, var, block=None):\n \"\"\"Initialize the input tensor with Numpy array.\n\n Args:\n var(Tensor): Tensor that needs to be initialized.\n block(Block, optional): The block in which initialization ops\n should be added. Used in static graph only, default None.\n\n Returns:\n The initialization op\n \"\"\"\n block = self._check_block(block)\n\n assert isinstance(var, framework.Variable)\n assert isinstance(block, framework.Block)\n\n # to be compatible of fp16 initalizers\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n out_dtype = VarDesc.VarType.FP32\n np_value = self._value.astype(\"float32\")\n out_var = block.create_var(\n name=unique_name.generate(\".\".join(\n ['numpy_array_init', var.name, 'tmp'])),\n shape=var.shape,\n dtype=out_dtype,\n type=VarDesc.VarType.LOD_TENSOR,\n persistable=False)\n else:\n out_var = var\n out_dtype = var.dtype\n np_value = self._value\n\n if out_dtype == VarDesc.VarType.FP32:\n value_name = \"fp32_values\"\n values = [float(v) for v in np_value.flat]\n elif out_dtype == VarDesc.VarType.INT32:\n value_name = \"int32_values\"\n values = [int(v) for v in np_value.flat]\n else:\n raise ValueError(\"Unsupported dtype %s\", self._value.dtype)\n if self._value.size > 1024 * 1024 * 1024:\n raise ValueError(\"The size of input is too big. Please consider \"\n \"saving it to file and 'load_op' to load it\")\n\n if framework._non_static_mode():\n _C_ops.assign_value(out_var, 'shape',\n list(self._value.shape), 'dtype', out_dtype,\n value_name, values)\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,\n 'out_dtype', var.dtype)\n var_tmp._share_underline_tensor_to(var)\n else:\n out_var._share_underline_tensor_to(var)\n return None\n else:\n op = block.append_op(\n type='assign_value',\n outputs={'Out': out_var},\n attrs={\n 'dtype': out_dtype,\n 'shape': list(self._value.shape),\n value_name: values\n },\n stop_gradient=True)\n\n if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:\n block.append_op(\n type=\"cast\",\n inputs={\"X\": out_var},\n outputs={\"Out\": var},\n attrs={\"in_dtype\": out_var.dtype,\n \"out_dtype\": var.dtype})\n\n var.op = op\n return op\n\n\ndef set_global_initializer(weight_init, bias_init=None):\n \"\"\"\n This API is used to set up global model parameter initializer in framework.\n\n After this API is invoked, the global initializer will takes effect in subsequent code.\n\n The model parameters include ``weight`` and ``bias`` . In the framework, they correspond \n to ``paddle.ParamAttr`` , which is inherited from ``paddle.Tensor`` , and is a persistable Variable.\n This API only takes effect for model parameters, not for variables created through apis such as \n :ref:`api_fluid_layers_create_global_var` , :ref:`api_fluid_layers_create_tensor`.\n \n If the initializer is also set up by ``param_attr`` or ``bias_attr`` when creating a network layer,\n the global initializer setting here will not take effect because it has a lower priority.\n\n If you want to cancel the global initializer in framework, please set global initializer to ``None`` .\n\n Args:\n weight_init (Initializer): set the global initializer for ``weight`` of model parameters.\n bias_init (Initializer, optional): set the global initializer for ``bias`` of model parameters. \n Default: None.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n\n nn.initializer.set_global_initializer(nn.initializer.Uniform(), nn.initializer.Constant())\n x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)\n\n # The weight of conv1 is initialized by Uniform\n # The bias of conv1 is initialized by Constant\n conv1 = nn.Conv2D(4, 6, (3, 3))\n y_var1 = conv1(x_var)\n\n # If set param_attr/bias_attr too, global initializer will not take effect\n # The weight of conv2 is initialized by Xavier\n # The bias of conv2 is initialized by Normal\n conv2 = nn.Conv2D(4, 6, (3, 3), \n weight_attr=nn.initializer.XavierUniform(),\n bias_attr=nn.initializer.Normal())\n y_var2 = conv2(x_var)\n\n # Cancel the global initializer in framework, it will takes effect in subsequent code\n nn.initializer.set_global_initializer(None)\n \"\"\"\n\n check_type(weight_init, 'weight_init', (Initializer, type(None)),\n 'set_global_initializer')\n global _global_weight_initializer_\n _global_weight_initializer_ = weight_init\n\n check_type(bias_init, 'bias_init', (Initializer, type(None)),\n 'set_global_initializer')\n global _global_bias_initializer_\n _global_bias_initializer_ = bias_init\n\n\ndef _global_weight_initializer():\n \"\"\"\n Return the global weight initializer, The user doesn't need to use it.\n \"\"\"\n return _global_weight_initializer_\n\n\ndef _global_bias_initializer():\n \"\"\"\n Return the global weight initializer, The user doesn't need to use it.\n \"\"\"\n return _global_bias_initializer_\n\n\ndef calculate_gain(nonlinearity, param=None):\n \"\"\"\n Get the recommended ``gain`` value of some nonlinearity function. ``gain`` value can be used in some \n ``paddle.nn.initializer`` api to adjust the initialization value.\n\n Args:\n nonlinearity(str): name of nonlinearity activation function. If it is a linear function, such as: \n `linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose` , 1.0 will be returned.\n param(bool|int|float, optional): optional parameter for somme nonlinearity function. Now, it only applies to \n 'leaky_relu'. Default: None, it will be calculated as 0.01 in the formula.\n\n Returns:\n A float value, which is the recommended gain for this nonlinearity function.\n\n Examples:\n .. code-block:: python\n\n import paddle\n gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3\n gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2))\n\n \"\"\"\n if param is None:\n param = 0.01\n else:\n assert isinstance(param, (bool, int, float))\n param = float(param)\n recommended_gain = {\n 'sigmoid': 1,\n 'linear': 1,\n 'conv1d': 1,\n 'conv2d': 1,\n 'conv3d': 1,\n 'conv1d_transpose': 1,\n 'conv2d_transpose': 1,\n 'conv3d_transpose': 1,\n 'tanh': 5.0 / 3,\n 'relu': math.sqrt(2.0),\n 'leaky_relu': math.sqrt(2.0 / (1 + param**2)),\n 'selu': 3.0 / 4\n }\n if nonlinearity in recommended_gain.keys():\n return recommended_gain[nonlinearity]\n else:\n raise ValueError(\"nonlinearity function {} is not suppported now.\".\n format(nonlinearity))\n\n\n# We short the class name, since users will use the initializer with the package\n# name. The sample code:\n#\n# import paddle.fluid as fluid\n#\n# hidden = fluid.layers.fc(...,\n# param_attr=ParamAttr(fluid.initializer.Xavier()))\n#\n# It is no need to add an `Initializer` as the class suffix\nConstant = ConstantInitializer\nUniform = UniformInitializer\nNormal = NormalInitializer\nTruncatedNormal = TruncatedNormalInitializer\nXavier = XavierInitializer\nMSRA = MSRAInitializer\nBilinear = BilinearInitializer\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO: define the distribution functions\n# __all__ = ['Categorical',\n# 'MultivariateNormalDiag',\n# 'Normal',\n# 'sampling_id',\n# 'Uniform']\n\nfrom __future__ import print_function\n\nimport math\nimport warnings\n\nimport numpy as np\nimport paddle\nfrom paddle import _C_ops\nfrom paddle.fluid import core\nfrom paddle.fluid.data_feeder import (check_dtype, check_type,\n check_variable_and_dtype, convert_dtype)\nfrom paddle.fluid.framework import _non_static_mode, in_dygraph_mode\nfrom paddle.fluid.layers import (control_flow, elementwise_add, elementwise_div,\n elementwise_mul, elementwise_sub, nn, ops,\n tensor)\nfrom paddle.tensor import arange, concat, gather_nd, multinomial\n\n\nclass Distribution(object):\n \"\"\"\n The abstract base class for probability distributions. Functions are \n implemented in specific distributions.\n\n Args:\n batch_shape(Sequence[int], optional): independent, not identically \n distributed draws, aka a \"collection\" or \"bunch\" of distributions.\n event_shape(Sequence[int], optional): the shape of a single \n draw from the distribution; it may be dependent across dimensions. \n For scalar distributions, the event shape is []. For n-dimension \n multivariate distribution, the event shape is [n].\n \"\"\"\n\n def __init__(self, batch_shape=(), event_shape=()):\n\n self._batch_shape = batch_shape if isinstance(\n batch_shape, tuple) else tuple(batch_shape)\n self._event_shape = event_shape if isinstance(\n event_shape, tuple) else tuple(event_shape)\n\n super(Distribution, self).__init__()\n\n @property\n def batch_shape(self):\n \"\"\"Returns batch shape of distribution\n\n Returns:\n Sequence[int]: batch shape\n \"\"\"\n return self._batch_shape\n\n @property\n def event_shape(self):\n \"\"\"Returns event shape of distribution\n\n Returns:\n Sequence[int]: event shape\n \"\"\"\n return self._event_shape\n\n @property\n def mean(self):\n \"\"\"Mean of distribution\"\"\"\n raise NotImplementedError\n\n @property\n def variance(self):\n \"\"\"Variance of distribution\"\"\"\n raise NotImplementedError\n\n def sample(self, shape=()):\n \"\"\"Sampling from the distribution.\"\"\"\n raise NotImplementedError\n\n def rsample(self, shape=()):\n \"\"\"reparameterized sample\"\"\"\n raise NotImplementedError\n\n def entropy(self):\n \"\"\"The entropy of the distribution.\"\"\"\n raise NotImplementedError\n\n def kl_divergence(self, other):\n \"\"\"The KL-divergence between self distributions and other.\"\"\"\n raise NotImplementedError\n\n def prob(self, value):\n \"\"\"Probability density/mass function evaluated at value.\n\n Args:\n value (Tensor): value which will be evaluated\n \"\"\"\n return self.log_prob(value).exp()\n\n def log_prob(self, value):\n \"\"\"Log probability density/mass function.\"\"\"\n raise NotImplementedError\n\n def probs(self, value):\n \"\"\"Probability density/mass function.\n \n .. note:: \n \n This method will be deprecated in the future, please use `prob` \n instead.\n \"\"\"\n raise NotImplementedError\n\n def _extend_shape(self, sample_shape):\n \"\"\"compute shape of the sample \n\n Args:\n sample_shape (Tensor): sample shape\n\n Returns:\n Tensor: generated sample data shape\n \"\"\"\n return sample_shape + self._batch_shape + self._event_shape\n\n def _validate_args(self, *args):\n \"\"\"\n Argument validation for distribution args\n Args:\n value (float, list, numpy.ndarray, Tensor)\n Raises\n ValueError: if one argument is Tensor, all arguments should be Tensor\n \"\"\"\n is_variable = False\n is_number = False\n for arg in args:\n if isinstance(arg, tensor.Variable):\n is_variable = True\n else:\n is_number = True\n\n if is_variable and is_number:\n raise ValueError(\n 'if one argument is Tensor, all arguments should be Tensor')\n\n return is_variable\n\n def _to_tensor(self, *args):\n \"\"\"\n Argument convert args to Tensor\n\n Args:\n value (float, list, numpy.ndarray, Tensor)\n Returns:\n Tensor of args.\n \"\"\"\n numpy_args = []\n variable_args = []\n tmp = 0.\n\n for arg in args:\n if isinstance(arg, float):\n arg = [arg]\n if not isinstance(arg, (list, tuple, np.ndarray, tensor.Variable)):\n raise TypeError(\n \"Type of input args must be float, list, numpy.ndarray or Tensor, but received type {}\".\n format(type(arg)))\n\n arg_np = np.array(arg)\n arg_dtype = arg_np.dtype\n if str(arg_dtype) != 'float32':\n if str(arg_dtype) != 'float64':\n # \"assign\" op doesn't support float64. if dtype is float64, float32 variable will be generated\n # and converted to float64 later using \"cast\".\n warnings.warn(\n \"data type of argument only support float32 and float64, your argument will be convert to float32.\"\n )\n arg_np = arg_np.astype('float32')\n # tmp is used to support broadcast, it summarizes shapes of all the args and get the mixed shape.\n tmp = tmp + arg_np\n numpy_args.append(arg_np)\n\n dtype = tmp.dtype\n for arg in numpy_args:\n arg_broadcasted, _ = np.broadcast_arrays(arg, tmp)\n arg_variable = tensor.create_tensor(dtype=dtype)\n tensor.assign(arg_broadcasted, arg_variable)\n variable_args.append(arg_variable)\n\n return tuple(variable_args)\n\n def _check_values_dtype_in_probs(self, param, value):\n \"\"\"\n Log_prob and probs methods have input ``value``, if value's dtype is different from param,\n convert value's dtype to be consistent with param's dtype.\n\n Args:\n param (Tensor): low and high in Uniform class, loc and scale in Normal class.\n value (Tensor): The input tensor.\n\n Returns:\n value (Tensor): Change value's dtype if value's dtype is different from param.\n \"\"\"\n if _non_static_mode():\n if value.dtype != param.dtype and convert_dtype(\n value.dtype) in ['float32', 'float64']:\n warnings.warn(\n \"dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted.\"\n )\n return _C_ops.cast(value, 'in_dtype', value.dtype, 'out_dtype',\n param.dtype)\n return value\n\n check_variable_and_dtype(value, 'value', ['float32', 'float64'],\n 'log_prob')\n if value.dtype != param.dtype:\n warnings.warn(\n \"dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted.\"\n )\n return tensor.cast(value, dtype=param.dtype)\n return value\n\n def _probs_to_logits(self, probs, is_binary=False):\n r\"\"\"\n Converts probabilities into logits. For the binary, probs denotes the \n probability of occurrence of the event indexed by `1`. For the \n multi-dimensional, values of last axis denote the probabilities of \n occurrence of each of the events.\n \"\"\"\n return (paddle.log(probs) - paddle.log1p(-probs)) \\\n if is_binary else paddle.log(probs)\n\n def _logits_to_probs(self, logits, is_binary=False):\n r\"\"\"\n Converts logits into probabilities. For the binary, each value denotes \n log odds, whereas for the multi-dimensional case, the values along the \n last dimension denote the log probabilities of the events.\n \"\"\"\n return paddle.nn.functional.sigmoid(logits) \\\n if is_binary else paddle.nn.functional.softmax(logits, axis=-1)\n"
] | [
[
"numpy.random.rand"
],
[
"numpy.random.get_state",
"numpy.nditer",
"numpy.random.seed",
"numpy.abs",
"numpy.asarray",
"numpy.reshape",
"numpy.allclose",
"numpy.array_equal",
"numpy.logical_and",
"numpy.uint32",
"numpy.dtype",
"numpy.max",
"numpy.random.set_state",
"numpy.argmax",
"numpy.transpose",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
],
[
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.concatenate",
"numpy.ceil",
"numpy.array",
"numpy.prod"
],
[
"numpy.array",
"numpy.random.random",
"numpy.allclose",
"numpy.random.seed"
],
[
"numpy.random.random",
"numpy.unique",
"numpy.zeros_like",
"numpy.array",
"numpy.sum",
"numpy.random.randint"
],
[
"numpy.prod"
],
[
"numpy.logspace",
"numpy.arange",
"numpy.array",
"numpy.array_equal"
],
[
"numpy.product",
"numpy.reshape",
"numpy.stack",
"numpy.concatenate",
"numpy.transpose",
"numpy.zeros"
],
[
"numpy.reshape",
"numpy.ceil",
"numpy.prod"
],
[
"numpy.array",
"numpy.broadcast_arrays"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luckmoon/nmt | [
"4f6a4acf8d8e086f9d894444a2877ac1f0856ad0",
"4f6a4acf8d8e086f9d894444a2877ac1f0856ad0",
"4f6a4acf8d8e086f9d894444a2877ac1f0856ad0"
] | [
"nmt/utils/iterator_utils_test.py",
"nmt/model_helper.py",
"nmt/attention_model.py"
] | [
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for iterator_utils.py\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import lookup_ops\n\nfrom ..utils import iterator_utils\n\n\nclass IteratorUtilsTest(tf.test.TestCase):\n\n def testGetIterator(self):\n tf.set_random_seed(1)\n tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([\"a\", \"b\", \"c\", \"eos\", \"sos\"]))\n src_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"f e a g\", \"c c a\", \"d\", \"c a\"]))\n tgt_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"c c\", \"a b\", \"\", \"b c\"]))\n hparams = tf.contrib.training.HParams(\n random_seed=3,\n num_buckets=5,\n eos=\"eos\",\n sos=\"sos\")\n batch_size = 2\n src_max_len = 3\n iterator = iterator_utils.get_iterator(\n src_dataset=src_dataset,\n tgt_dataset=tgt_dataset,\n src_vocab_table=src_vocab_table,\n tgt_vocab_table=tgt_vocab_table,\n batch_size=batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n src_max_len=src_max_len,\n reshuffle_each_iteration=False)\n table_initializer = tf.tables_initializer()\n source = iterator.source\n target_input = iterator.target_input\n target_output = iterator.target_output\n src_seq_len = iterator.source_sequence_length\n tgt_seq_len = iterator.target_sequence_length\n self.assertEqual([None, None], source.shape.as_list())\n self.assertEqual([None, None], target_input.shape.as_list())\n self.assertEqual([None, None], target_output.shape.as_list())\n self.assertEqual([None], src_seq_len.shape.as_list())\n self.assertEqual([None], tgt_seq_len.shape.as_list())\n with self.test_session() as sess:\n sess.run(table_initializer)\n sess.run(iterator.initializer)\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[-1, -1, 0], # \"f\" == unknown, \"e\" == unknown, a\n [2, 0, 3]], # c a eos -- eos is padding\n source_v)\n self.assertAllEqual([3, 2], src_len_v)\n self.assertAllEqual(\n [[4, 2, 2], # sos c c\n [4, 1, 2]], # sos b c\n target_input_v)\n self.assertAllEqual(\n [[2, 2, 3], # c c eos\n [1, 2, 3]], # b c eos\n target_output_v)\n self.assertAllEqual([3, 3], tgt_len_v)\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[2, 2, 0]], # c c a\n source_v)\n self.assertAllEqual([3], src_len_v)\n self.assertAllEqual(\n [[4, 0, 1]], # sos a b\n target_input_v)\n self.assertAllEqual(\n [[0, 1, 3]], # a b eos\n target_output_v)\n self.assertAllEqual([3], tgt_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(source)\n\n def testGetIteratorWithShard(self):\n tf.set_random_seed(1)\n tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([\"a\", \"b\", \"c\", \"eos\", \"sos\"]))\n src_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"c c a\", \"f e a g\", \"d\", \"c a\"]))\n tgt_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"a b\", \"c c\", \"\", \"b c\"]))\n hparams = tf.contrib.training.HParams(\n random_seed=3,\n num_buckets=5,\n eos=\"eos\",\n sos=\"sos\")\n batch_size = 2\n src_max_len = 3\n iterator = iterator_utils.get_iterator(\n src_dataset=src_dataset,\n tgt_dataset=tgt_dataset,\n src_vocab_table=src_vocab_table,\n tgt_vocab_table=tgt_vocab_table,\n batch_size=batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n src_max_len=src_max_len,\n num_shards=2,\n shard_index=1,\n reshuffle_each_iteration=False)\n table_initializer = tf.tables_initializer()\n source = iterator.source\n target_input = iterator.target_input\n target_output = iterator.target_output\n src_seq_len = iterator.source_sequence_length\n tgt_seq_len = iterator.target_sequence_length\n self.assertEqual([None, None], source.shape.as_list())\n self.assertEqual([None, None], target_input.shape.as_list())\n self.assertEqual([None, None], target_output.shape.as_list())\n self.assertEqual([None], src_seq_len.shape.as_list())\n self.assertEqual([None], tgt_seq_len.shape.as_list())\n with self.test_session() as sess:\n sess.run(table_initializer)\n sess.run(iterator.initializer)\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[-1, -1, 0], # \"f\" == unknown, \"e\" == unknown, a\n [2, 0, 3]], # c a eos -- eos is padding\n source_v)\n self.assertAllEqual([3, 2], src_len_v)\n self.assertAllEqual(\n [[4, 2, 2], # sos c c\n [4, 1, 2]], # sos b c\n target_input_v)\n self.assertAllEqual(\n [[2, 2, 3], # c c eos\n [1, 2, 3]], # b c eos\n target_output_v)\n self.assertAllEqual([3, 3], tgt_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(source)\n\n def testGetIteratorWithSkipCount(self):\n tf.set_random_seed(1)\n tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([\"a\", \"b\", \"c\", \"eos\", \"sos\"]))\n src_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"c a\", \"c c a\", \"d\", \"f e a g\"]))\n tgt_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"b c\", \"a b\", \"\", \"c c\"]))\n hparams = tf.contrib.training.HParams(\n random_seed=3,\n num_buckets=5,\n eos=\"eos\",\n sos=\"sos\")\n batch_size = 2\n src_max_len = 3\n skip_count = tf.placeholder(shape=(), dtype=tf.int64)\n iterator = iterator_utils.get_iterator(\n src_dataset=src_dataset,\n tgt_dataset=tgt_dataset,\n src_vocab_table=src_vocab_table,\n tgt_vocab_table=tgt_vocab_table,\n batch_size=batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n src_max_len=src_max_len,\n skip_count=skip_count,\n reshuffle_each_iteration=False)\n table_initializer = tf.tables_initializer()\n source = iterator.source\n target_input = iterator.target_input\n target_output = iterator.target_output\n src_seq_len = iterator.source_sequence_length\n tgt_seq_len = iterator.target_sequence_length\n self.assertEqual([None, None], source.shape.as_list())\n self.assertEqual([None, None], target_input.shape.as_list())\n self.assertEqual([None, None], target_output.shape.as_list())\n self.assertEqual([None], src_seq_len.shape.as_list())\n self.assertEqual([None], tgt_seq_len.shape.as_list())\n with self.test_session() as sess:\n sess.run(table_initializer)\n sess.run(iterator.initializer, feed_dict={skip_count: 3})\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[-1, -1, 0]], # \"f\" == unknown, \"e\" == unknown, a\n source_v)\n self.assertAllEqual([3], src_len_v)\n self.assertAllEqual(\n [[4, 2, 2]], # sos c c\n target_input_v)\n self.assertAllEqual(\n [[2, 2, 3]], # c c eos\n target_output_v)\n self.assertAllEqual([3], tgt_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(source)\n\n # Re-init iterator with skip_count=0.\n sess.run(iterator.initializer, feed_dict={skip_count: 0})\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[2, 0, 3], # c a eos -- eos is padding\n [-1, -1, 0]], # \"f\" == unknown, \"e\" == unknown, a\n source_v)\n self.assertAllEqual([2, 3], src_len_v)\n self.assertAllEqual(\n [[4, 1, 2], # sos b c\n [4, 2, 2]], # sos c c\n target_input_v)\n self.assertAllEqual(\n [[1, 2, 3], # b c eos\n [2, 2, 3]], # c c eos\n target_output_v)\n self.assertAllEqual([3, 3], tgt_len_v)\n\n (source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (\n sess.run((source, src_seq_len, target_input, target_output,\n tgt_seq_len)))\n self.assertAllEqual(\n [[2, 2, 0]], # c c a\n source_v)\n self.assertAllEqual([3], src_len_v)\n self.assertAllEqual(\n [[4, 0, 1]], # sos a b\n target_input_v)\n self.assertAllEqual(\n [[0, 1, 3]], # a b eos\n target_output_v)\n self.assertAllEqual([3], tgt_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run(source)\n\n def testGetInferIterator(self):\n src_vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([\"a\", \"b\", \"c\", \"eos\", \"sos\"]))\n src_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"c c a\", \"c a\", \"d\", \"f e a g\"]))\n hparams = tf.contrib.training.HParams(\n random_seed=3,\n eos=\"eos\",\n sos=\"sos\")\n batch_size = 2\n src_max_len = 3\n iterator = iterator_utils.get_infer_iterator(\n src_dataset=src_dataset,\n src_vocab_table=src_vocab_table,\n batch_size=batch_size,\n eos=hparams.eos,\n src_max_len=src_max_len)\n table_initializer = tf.tables_initializer()\n source = iterator.source\n seq_len = iterator.source_sequence_length\n self.assertEqual([None, None], source.shape.as_list())\n self.assertEqual([None], seq_len.shape.as_list())\n with self.test_session() as sess:\n sess.run(table_initializer)\n sess.run(iterator.initializer)\n\n (source_v, seq_len_v) = sess.run((source, seq_len))\n self.assertAllEqual(\n [[2, 2, 0], # c c a\n [2, 0, 3]], # c a eos\n source_v)\n self.assertAllEqual([3, 2], seq_len_v)\n\n (source_v, seq_len_v) = sess.run((source, seq_len))\n self.assertAllEqual(\n [[-1, 3, 3], # \"d\" == unknown, eos eos\n [-1, -1, 0]], # \"f\" == unknown, \"e\" == unknown, a\n source_v)\n self.assertAllEqual([1, 3], seq_len_v)\n\n with self.assertRaisesOpError(\"End of sequence\"):\n sess.run((source, seq_len))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utility functions for building models.\"\"\"\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport time\nimport numpy as np\nimport six\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import lookup_ops\nfrom .utils import iterator_utils\nfrom .utils import misc_utils as utils\nfrom .utils import vocab_utils\n\n__all__ = [\n \"get_initializer\", \"get_device_str\", \"create_train_model\",\n \"create_eval_model\", \"create_infer_model\",\n \"create_emb_for_encoder_and_decoder\", \"create_rnn_cell\", \"gradient_clip\",\n \"create_or_load_model\", \"load_model\", \"avg_checkpoints\",\n \"compute_perplexity\"\n]\n\n# If a vocab size is greater than this value, put the embedding on cpu instead\nVOCAB_SIZE_THRESHOLD_CPU = 50000\n\n\ndef get_initializer(init_op, seed=None, init_weight=None):\n \"\"\"Create an initializer. init_weight is only for uniform.\"\"\"\n if init_op == \"uniform\":\n assert init_weight\n return tf.random_uniform_initializer(\n -init_weight, init_weight, seed=seed)\n elif init_op == \"glorot_normal\":\n return tf.keras.initializers.glorot_normal(\n seed=seed)\n elif init_op == \"glorot_uniform\":\n return tf.keras.initializers.glorot_uniform(\n seed=seed)\n else:\n raise ValueError(\"Unknown init_op %s\" % init_op)\n\n\ndef get_device_str(device_id, num_gpus):\n \"\"\"Return a device string for multi-GPU setup.\"\"\"\n if num_gpus == 0:\n return \"/cpu:0\"\n device_str_output = \"/gpu:%d\" % (device_id % num_gpus)\n return device_str_output\n\n\nclass ExtraArgs(collections.namedtuple(\n \"ExtraArgs\", (\"single_cell_fn\", \"model_device_fn\",\n \"attention_mechanism_fn\", \"encoder_emb_lookup_fn\"))):\n pass\n\n\nclass TrainModel(\n collections.namedtuple(\"TrainModel\", (\"graph\", \"model\", \"iterator\",\n \"skip_count_placeholder\"))):\n pass\n\n\ndef create_train_model(\n model_creator, hparams, scope=None, num_workers=1, jobid=0,\n extra_args=None):\n \"\"\"Create train graph, model, and iterator.\"\"\"\n src_file = \"%s.%s\" % (hparams.train_prefix, hparams.src)\n tgt_file = \"%s.%s\" % (hparams.train_prefix, hparams.tgt)\n src_vocab_file = hparams.src_vocab_file\n tgt_vocab_file = hparams.tgt_vocab_file\n\n graph = tf.Graph()\n\n with graph.as_default(), tf.container(scope or \"train\"):\n src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(\n src_vocab_file, tgt_vocab_file, hparams.share_vocab)\n\n src_dataset = tf.data.TextLineDataset(tf.gfile.Glob(src_file))\n tgt_dataset = tf.data.TextLineDataset(tf.gfile.Glob(tgt_file))\n skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)\n\n iterator = iterator_utils.get_iterator(\n src_dataset,\n tgt_dataset,\n src_vocab_table,\n tgt_vocab_table,\n batch_size=hparams.batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n src_max_len=hparams.src_max_len,\n tgt_max_len=hparams.tgt_max_len,\n skip_count=skip_count_placeholder,\n num_shards=num_workers,\n shard_index=jobid,\n use_char_encode=hparams.use_char_encode)\n\n # Note: One can set model_device_fn to\n # `tf.train.replica_device_setter(ps_tasks)` for distributed training.\n model_device_fn = None\n if extra_args: model_device_fn = extra_args.model_device_fn\n with tf.device(model_device_fn):\n model = model_creator(\n hparams,\n iterator=iterator,\n mode=tf.contrib.learn.ModeKeys.TRAIN,\n source_vocab_table=src_vocab_table,\n target_vocab_table=tgt_vocab_table,\n scope=scope,\n extra_args=extra_args)\n\n return TrainModel(\n graph=graph,\n model=model,\n iterator=iterator,\n skip_count_placeholder=skip_count_placeholder)\n\n\nclass EvalModel(\n collections.namedtuple(\"EvalModel\",\n (\"graph\", \"model\", \"src_file_placeholder\",\n \"tgt_file_placeholder\", \"iterator\"))):\n pass\n\n\ndef create_eval_model(model_creator, hparams, scope=None, extra_args=None):\n \"\"\"Create train graph, model, src/tgt file holders, and iterator.\"\"\"\n src_vocab_file = hparams.src_vocab_file\n tgt_vocab_file = hparams.tgt_vocab_file\n graph = tf.Graph()\n\n with graph.as_default(), tf.container(scope or \"eval\"):\n src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(\n src_vocab_file, tgt_vocab_file, hparams.share_vocab)\n reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(\n tgt_vocab_file, default_value=vocab_utils.UNK)\n\n src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)\n tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)\n src_dataset = tf.data.TextLineDataset(src_file_placeholder)\n tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)\n iterator = iterator_utils.get_iterator(\n src_dataset,\n tgt_dataset,\n src_vocab_table,\n tgt_vocab_table,\n hparams.batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n src_max_len=hparams.src_max_len_infer,\n tgt_max_len=hparams.tgt_max_len_infer,\n use_char_encode=hparams.use_char_encode)\n model = model_creator(\n hparams,\n iterator=iterator,\n mode=tf.contrib.learn.ModeKeys.EVAL,\n source_vocab_table=src_vocab_table,\n target_vocab_table=tgt_vocab_table,\n reverse_target_vocab_table=reverse_tgt_vocab_table,\n scope=scope,\n extra_args=extra_args)\n return EvalModel(\n graph=graph,\n model=model,\n src_file_placeholder=src_file_placeholder,\n tgt_file_placeholder=tgt_file_placeholder,\n iterator=iterator)\n\n\nclass InferModel(\n collections.namedtuple(\"InferModel\",\n (\"graph\", \"model\", \"src_placeholder\",\n \"batch_size_placeholder\", \"iterator\"))):\n pass\n\n\ndef create_infer_model(model_creator, hparams, scope=None, extra_args=None):\n \"\"\"Create inference model.\"\"\"\n graph = tf.Graph()\n src_vocab_file = hparams.src_vocab_file\n tgt_vocab_file = hparams.tgt_vocab_file\n\n with graph.as_default(), tf.container(scope or \"infer\"):\n src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(\n src_vocab_file, tgt_vocab_file, hparams.share_vocab)\n reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(\n tgt_vocab_file, default_value=vocab_utils.UNK)\n\n src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)\n batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)\n\n src_dataset = tf.data.Dataset.from_tensor_slices(\n src_placeholder)\n iterator = iterator_utils.get_infer_iterator(\n src_dataset,\n src_vocab_table,\n batch_size=batch_size_placeholder,\n eos=hparams.eos,\n src_max_len=hparams.src_max_len_infer,\n use_char_encode=hparams.use_char_encode)\n model = model_creator(\n hparams,\n iterator=iterator,\n mode=tf.contrib.learn.ModeKeys.INFER,\n source_vocab_table=src_vocab_table,\n target_vocab_table=tgt_vocab_table,\n reverse_target_vocab_table=reverse_tgt_vocab_table,\n scope=scope,\n extra_args=extra_args)\n return InferModel(\n graph=graph,\n model=model,\n src_placeholder=src_placeholder,\n batch_size_placeholder=batch_size_placeholder,\n iterator=iterator)\n\n\ndef _get_embed_device(vocab_size):\n \"\"\"Decide on which device to place an embed matrix given its vocab size.\"\"\"\n if vocab_size > VOCAB_SIZE_THRESHOLD_CPU:\n return \"/cpu:0\"\n else:\n return \"/gpu:0\"\n\n\ndef _create_pretrained_emb_from_txt(\n vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32,\n scope=None):\n \"\"\"Load pretrain embeding from embed_file, and return an embedding matrix.\n\n Args:\n embed_file: Path to a Glove formated embedding txt file.\n num_trainable_tokens: Make the first n tokens in the vocab file as trainable\n variables. Default is 3, which is \"<unk>\", \"<s>\" and \"</s>\".\n \"\"\"\n vocab, _ = vocab_utils.load_vocab(vocab_file)\n trainable_tokens = vocab[:num_trainable_tokens]\n\n utils.print_out(\"# Using pretrained embedding: %s.\" % embed_file)\n utils.print_out(\" with trainable tokens: \")\n\n emb_dict, emb_size = vocab_utils.load_embed_txt(embed_file)\n for token in trainable_tokens:\n utils.print_out(\" %s\" % token)\n if token not in emb_dict:\n emb_dict[token] = [0.0] * emb_size\n\n emb_mat = np.array(\n [emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())\n emb_mat = tf.constant(emb_mat)\n emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [-1, -1])\n with tf.variable_scope(scope or \"pretrain_embeddings\", dtype=dtype) as scope:\n with tf.device(_get_embed_device(num_trainable_tokens)):\n emb_mat_var = tf.get_variable(\n \"emb_mat_var\", [num_trainable_tokens, emb_size])\n return tf.concat([emb_mat_var, emb_mat_const], 0)\n\n\ndef _create_or_load_embed(embed_name, vocab_file, embed_file,\n vocab_size, embed_size, dtype):\n \"\"\"Create a new or load an existing embedding matrix.\"\"\"\n if vocab_file and embed_file:\n embedding = _create_pretrained_emb_from_txt(vocab_file, embed_file)\n else:\n with tf.device(_get_embed_device(vocab_size)):\n embedding = tf.get_variable(\n embed_name, [vocab_size, embed_size], dtype)\n return embedding\n\n\ndef create_emb_for_encoder_and_decoder(share_vocab,\n src_vocab_size,\n tgt_vocab_size,\n src_embed_size,\n tgt_embed_size,\n dtype=tf.float32,\n num_enc_partitions=0,\n num_dec_partitions=0,\n src_vocab_file=None,\n tgt_vocab_file=None,\n src_embed_file=None,\n tgt_embed_file=None,\n use_char_encode=False,\n scope=None):\n \"\"\"Create embedding matrix for both encoder and decoder.\n\n Args:\n share_vocab: A boolean. Whether to share embedding matrix for both\n encoder and decoder.\n src_vocab_size: An integer. The source vocab size.\n tgt_vocab_size: An integer. The target vocab size.\n src_embed_size: An integer. The embedding dimension for the encoder's\n embedding.\n tgt_embed_size: An integer. The embedding dimension for the decoder's\n embedding.\n dtype: dtype of the embedding matrix. Default to float32.\n num_enc_partitions: number of partitions used for the encoder's embedding\n vars.\n num_dec_partitions: number of partitions used for the decoder's embedding\n vars.\n scope: VariableScope for the created subgraph. Default to \"embedding\".\n\n Returns:\n embedding_encoder: Encoder's embedding matrix.\n embedding_decoder: Decoder's embedding matrix.\n\n Raises:\n ValueError: if use share_vocab but source and target have different vocab\n size.\n \"\"\"\n if num_enc_partitions <= 1:\n enc_partitioner = None\n else:\n # Note: num_partitions > 1 is required for distributed training due to\n # embedding_lookup tries to colocate single partition-ed embedding variable\n # with lookup ops. This may cause embedding variables being placed on worker\n # jobs.\n enc_partitioner = tf.fixed_size_partitioner(num_enc_partitions)\n\n if num_dec_partitions <= 1:\n dec_partitioner = None\n else:\n # Note: num_partitions > 1 is required for distributed training due to\n # embedding_lookup tries to colocate single partition-ed embedding variable\n # with lookup ops. This may cause embedding variables being placed on worker\n # jobs.\n dec_partitioner = tf.fixed_size_partitioner(num_dec_partitions)\n\n if src_embed_file and enc_partitioner:\n raise ValueError(\n \"Can't set num_enc_partitions > 1 when using pretrained encoder \"\n \"embedding\")\n\n if tgt_embed_file and dec_partitioner:\n raise ValueError(\n \"Can't set num_dec_partitions > 1 when using pretrained decdoer \"\n \"embedding\")\n\n with tf.variable_scope(\n scope or \"embeddings\", dtype=dtype, partitioner=enc_partitioner) as scope:\n # Share embedding\n if share_vocab:\n if src_vocab_size != tgt_vocab_size:\n raise ValueError(\"Share embedding but different src/tgt vocab sizes\"\n \" %d vs. %d\" % (src_vocab_size, tgt_vocab_size))\n assert src_embed_size == tgt_embed_size\n utils.print_out(\"# Use the same embedding for source and target\")\n vocab_file = src_vocab_file or tgt_vocab_file\n embed_file = src_embed_file or tgt_embed_file\n\n embedding_encoder = _create_or_load_embed(\n \"embedding_share\", vocab_file, embed_file,\n src_vocab_size, src_embed_size, dtype)\n embedding_decoder = embedding_encoder\n else:\n if not use_char_encode:\n with tf.variable_scope(\"encoder\", partitioner=enc_partitioner):\n embedding_encoder = _create_or_load_embed(\n \"embedding_encoder\", src_vocab_file, src_embed_file,\n src_vocab_size, src_embed_size, dtype)\n else:\n embedding_encoder = None\n\n with tf.variable_scope(\"decoder\", partitioner=dec_partitioner):\n embedding_decoder = _create_or_load_embed(\n \"embedding_decoder\", tgt_vocab_file, tgt_embed_file,\n tgt_vocab_size, tgt_embed_size, dtype)\n\n return embedding_encoder, embedding_decoder\n\n\ndef _single_cell(unit_type, num_units, forget_bias, dropout, mode,\n residual_connection=False, device_str=None, residual_fn=None):\n \"\"\"Create an instance of a single RNN cell.\"\"\"\n # dropout (= 1 - keep_prob) is set to 0 during eval and infer\n dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0\n\n # Cell Type\n if unit_type == \"lstm\":\n utils.print_out(\" LSTM, forget_bias=%g\" % forget_bias, new_line=False)\n single_cell = tf.contrib.rnn.BasicLSTMCell(\n num_units,\n forget_bias=forget_bias)\n elif unit_type == \"gru\":\n utils.print_out(\" GRU\", new_line=False)\n single_cell = tf.contrib.rnn.GRUCell(num_units)\n elif unit_type == \"layer_norm_lstm\":\n utils.print_out(\" Layer Normalized LSTM, forget_bias=%g\" % forget_bias,\n new_line=False)\n single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(\n num_units,\n forget_bias=forget_bias,\n layer_norm=True)\n elif unit_type == \"nas\":\n utils.print_out(\" NASCell\", new_line=False)\n single_cell = tf.contrib.rnn.NASCell(num_units)\n else:\n raise ValueError(\"Unknown unit type %s!\" % unit_type)\n\n # Dropout (= 1 - keep_prob)\n if dropout > 0.0:\n single_cell = tf.contrib.rnn.DropoutWrapper(\n cell=single_cell, input_keep_prob=(1.0 - dropout))\n utils.print_out(\" %s, dropout=%g \" % (type(single_cell).__name__, dropout),\n new_line=False)\n\n # Residual\n if residual_connection:\n single_cell = tf.contrib.rnn.ResidualWrapper(\n single_cell, residual_fn=residual_fn)\n utils.print_out(\" %s\" % type(single_cell).__name__, new_line=False)\n\n # Device Wrapper\n if device_str:\n single_cell = tf.contrib.rnn.DeviceWrapper(single_cell, device_str)\n utils.print_out(\" %s, device=%s\" %\n (type(single_cell).__name__, device_str), new_line=False)\n\n return single_cell\n\n\ndef _cell_list(unit_type, num_units, num_layers, num_residual_layers,\n forget_bias, dropout, mode, num_gpus, base_gpu=0,\n single_cell_fn=None, residual_fn=None):\n \"\"\"Create a list of RNN cells.\"\"\"\n if not single_cell_fn:\n single_cell_fn = _single_cell\n\n # Multi-GPU\n cell_list = []\n for i in range(num_layers):\n utils.print_out(\" cell %d\" % i, new_line=False)\n single_cell = single_cell_fn(\n unit_type=unit_type,\n num_units=num_units,\n forget_bias=forget_bias,\n dropout=dropout,\n mode=mode,\n residual_connection=(i >= num_layers - num_residual_layers),\n device_str=get_device_str(i + base_gpu, num_gpus),\n residual_fn=residual_fn\n )\n utils.print_out(\"\")\n cell_list.append(single_cell)\n\n return cell_list\n\n\ndef create_rnn_cell(unit_type, num_units, num_layers, num_residual_layers,\n forget_bias, dropout, mode, num_gpus, base_gpu=0,\n single_cell_fn=None):\n \"\"\"Create multi-layer RNN cell.\n\n Args:\n unit_type: string representing the unit type, i.e. \"lstm\".\n num_units: the depth of each unit.\n num_layers: number of cells.\n num_residual_layers: Number of residual layers from top to bottom. For\n example, if `num_layers=4` and `num_residual_layers=2`, the last 2 RNN\n cells in the returned list will be wrapped with `ResidualWrapper`.\n forget_bias: the initial forget bias of the RNNCell(s).\n dropout: floating point value between 0.0 and 1.0:\n the probability of dropout. this is ignored if `mode != TRAIN`.\n mode: either tf.contrib.learn.TRAIN/EVAL/INFER\n num_gpus: The number of gpus to use when performing round-robin\n placement of layers.\n base_gpu: The gpu device id to use for the first RNN cell in the\n returned list. The i-th RNN cell will use `(base_gpu + i) % num_gpus`\n as its device id.\n single_cell_fn: allow for adding customized cell.\n When not specified, we default to model_helper._single_cell\n Returns:\n An `RNNCell` instance.\n \"\"\"\n cell_list = _cell_list(unit_type=unit_type,\n num_units=num_units,\n num_layers=num_layers,\n num_residual_layers=num_residual_layers,\n forget_bias=forget_bias,\n dropout=dropout,\n mode=mode,\n num_gpus=num_gpus,\n base_gpu=base_gpu,\n single_cell_fn=single_cell_fn)\n\n if len(cell_list) == 1: # Single layer.\n return cell_list[0]\n else: # Multi layers\n return tf.contrib.rnn.MultiRNNCell(cell_list)\n\n\ndef gradient_clip(gradients, max_gradient_norm):\n \"\"\"Clipping gradients of a model.\"\"\"\n clipped_gradients, gradient_norm = tf.clip_by_global_norm(\n gradients, max_gradient_norm)\n gradient_norm_summary = [tf.summary.scalar(\"grad_norm\", gradient_norm)]\n gradient_norm_summary.append(\n tf.summary.scalar(\"clipped_gradient\", tf.global_norm(clipped_gradients)))\n\n return clipped_gradients, gradient_norm_summary, gradient_norm\n\n\ndef print_variables_in_ckpt(ckpt_path):\n \"\"\"Print a list of variables in a checkpoint together with their shapes.\"\"\"\n utils.print_out(\"# Variables in ckpt %s\" % ckpt_path)\n reader = tf.train.NewCheckpointReader(ckpt_path)\n variable_map = reader.get_variable_to_shape_map()\n for key in sorted(variable_map.keys()):\n utils.print_out(\" %s: %s\" % (key, variable_map[key]))\n\n\ndef load_model(model, ckpt_path, session, name):\n \"\"\"Load model from a checkpoint.\"\"\"\n start_time = time.time()\n try:\n model.saver.restore(session, ckpt_path)\n except tf.errors.NotFoundError as e:\n utils.print_out(\"Can't load checkpoint\")\n print_variables_in_ckpt(ckpt_path)\n utils.print_out(\"%s\" % str(e))\n\n session.run(tf.tables_initializer())\n utils.print_out(\n \" loaded %s model parameters from %s, time %.2fs\" %\n (name, ckpt_path, time.time() - start_time))\n return model\n\n\ndef avg_checkpoints(model_dir, num_last_checkpoints, global_step,\n global_step_name):\n \"\"\"Average the last N checkpoints in the model_dir.\"\"\"\n checkpoint_state = tf.train.get_checkpoint_state(model_dir)\n if not checkpoint_state:\n utils.print_out(\"# No checkpoint file found in directory: %s\" % model_dir)\n return None\n\n # Checkpoints are ordered from oldest to newest.\n checkpoints = (\n checkpoint_state.all_model_checkpoint_paths[-num_last_checkpoints:])\n\n if len(checkpoints) < num_last_checkpoints:\n utils.print_out(\n \"# Skipping averaging checkpoints because not enough checkpoints is \"\n \"avaliable.\"\n )\n return None\n\n avg_model_dir = os.path.join(model_dir, \"avg_checkpoints\")\n if not tf.gfile.Exists(avg_model_dir):\n utils.print_out(\n \"# Creating new directory %s for saving averaged checkpoints.\" %\n avg_model_dir)\n tf.gfile.MakeDirs(avg_model_dir)\n\n utils.print_out(\"# Reading and averaging variables in checkpoints:\")\n var_list = tf.contrib.framework.list_variables(checkpoints[0])\n var_values, var_dtypes = {}, {}\n for (name, shape) in var_list:\n if name != global_step_name:\n var_values[name] = np.zeros(shape)\n\n for checkpoint in checkpoints:\n utils.print_out(\" %s\" % checkpoint)\n reader = tf.contrib.framework.load_checkpoint(checkpoint)\n for name in var_values:\n tensor = reader.get_tensor(name)\n var_dtypes[name] = tensor.dtype\n var_values[name] += tensor\n\n for name in var_values:\n var_values[name] /= len(checkpoints)\n\n # Build a graph with same variables in the checkpoints, and save the averaged\n # variables into the avg_model_dir.\n with tf.Graph().as_default():\n tf_vars = [\n tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])\n for v in var_values\n ]\n\n placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]\n assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]\n global_step_var = tf.Variable(\n global_step, name=global_step_name, trainable=False)\n saver = tf.train.Saver(tf.all_variables())\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for p, assign_op, (name, value) in zip(placeholders, assign_ops,\n six.iteritems(var_values)):\n sess.run(assign_op, {p: value})\n\n # Use the built saver to save the averaged checkpoint. Only keep 1\n # checkpoint and the best checkpoint will be moved to avg_best_metric_dir.\n saver.save(\n sess,\n os.path.join(avg_model_dir, \"translate.ckpt\"))\n\n return avg_model_dir\n\n\ndef create_or_load_model(model, model_dir, session, name):\n \"\"\"Create translation model and initialize or load parameters in session.\"\"\"\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step\n\n\ndef compute_perplexity(model, sess, name):\n \"\"\"Compute perplexity of the output of the model.\n\n Args:\n model: model for compute perplexity.\n sess: tensorflow session to use.\n name: name of the batch.\n\n Returns:\n The perplexity of the eval outputs.\n \"\"\"\n total_loss = 0\n total_predict_count = 0\n start_time = time.time()\n\n while True:\n try:\n output_tuple = model.eval(sess)\n total_loss += output_tuple.eval_loss * output_tuple.batch_size\n total_predict_count += output_tuple.predict_count\n except tf.errors.OutOfRangeError:\n break\n\n perplexity = utils.safe_exp(total_loss / total_predict_count)\n utils.print_time(\" eval %s: perplexity %.2f\" % (name, perplexity),\n start_time)\n return perplexity\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Attention-based sequence-to-sequence model with dynamic RNN support.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom . import model\nfrom . import model_helper\n\n__all__ = [\"AttentionModel\"]\n\n\nclass AttentionModel(model.Model):\n \"\"\"Sequence-to-sequence dynamic model with attention.\n\n This class implements a multi-layer recurrent neural network as encoder,\n and an attention-based decoder. This is the same as the model described in\n (Luong et al., EMNLP'2015) paper: https://arxiv.org/pdf/1508.04025v5.pdf.\n This class also allows to use GRU cells in addition to LSTM cells with\n support for dropout.\n \"\"\"\n\n def __init__(self,\n hparams,\n mode,\n iterator,\n source_vocab_table,\n target_vocab_table,\n reverse_target_vocab_table=None,\n scope=None,\n extra_args=None):\n self.has_attention = hparams.attention_architecture and hparams.attention\n\n # Set attention_mechanism_fn\n if self.has_attention:\n if extra_args and extra_args.attention_mechanism_fn:\n self.attention_mechanism_fn = extra_args.attention_mechanism_fn\n else:\n self.attention_mechanism_fn = create_attention_mechanism\n\n super(AttentionModel, self).__init__(\n hparams=hparams,\n mode=mode,\n iterator=iterator,\n source_vocab_table=source_vocab_table,\n target_vocab_table=target_vocab_table,\n reverse_target_vocab_table=reverse_target_vocab_table,\n scope=scope,\n extra_args=extra_args)\n\n def _prepare_beam_search_decoder_inputs(\n self, beam_width, memory, source_sequence_length, encoder_state):\n memory = tf.contrib.seq2seq.tile_batch(\n memory, multiplier=beam_width)\n source_sequence_length = tf.contrib.seq2seq.tile_batch(\n source_sequence_length, multiplier=beam_width)\n encoder_state = tf.contrib.seq2seq.tile_batch(\n encoder_state, multiplier=beam_width)\n batch_size = self.batch_size * beam_width\n return memory, source_sequence_length, encoder_state, batch_size\n\n def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,\n source_sequence_length):\n \"\"\"Build a RNN cell with attention mechanism that can be used by decoder.\"\"\"\n # No Attention\n if not self.has_attention:\n return super(AttentionModel, self)._build_decoder_cell(\n hparams, encoder_outputs, encoder_state, source_sequence_length)\n elif hparams.attention_architecture != \"standard\":\n raise ValueError(\n \"Unknown attention architecture %s\" % hparams.attention_architecture)\n\n num_units = hparams.num_units\n num_layers = self.num_decoder_layers\n num_residual_layers = self.num_decoder_residual_layers\n infer_mode = hparams.infer_mode\n\n dtype = tf.float32\n\n # Ensure memory is batch-major\n if self.time_major:\n memory = tf.transpose(encoder_outputs, [1, 0, 2])\n else:\n memory = encoder_outputs\n\n if (self.mode == tf.contrib.learn.ModeKeys.INFER and\n infer_mode == \"beam_search\"):\n memory, source_sequence_length, encoder_state, batch_size = (\n self._prepare_beam_search_decoder_inputs(\n hparams.beam_width, memory, source_sequence_length,\n encoder_state))\n else:\n batch_size = self.batch_size\n\n # Attention\n attention_mechanism = self.attention_mechanism_fn(\n hparams.attention, num_units, memory, source_sequence_length, self.mode)\n\n cell = model_helper.create_rnn_cell(\n unit_type=hparams.unit_type,\n num_units=num_units,\n num_layers=num_layers,\n num_residual_layers=num_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n num_gpus=self.num_gpus,\n mode=self.mode,\n single_cell_fn=self.single_cell_fn)\n\n # Only generate alignment in greedy INFER mode.\n alignment_history = (self.mode == tf.contrib.learn.ModeKeys.INFER and\n infer_mode != \"beam_search\")\n cell = tf.contrib.seq2seq.AttentionWrapper(\n cell,\n attention_mechanism,\n attention_layer_size=num_units,\n alignment_history=alignment_history,\n output_attention=hparams.output_attention,\n name=\"attention\")\n\n # TODO(thangluong): do we need num_layers, num_gpus?\n cell = tf.contrib.rnn.DeviceWrapper(cell,\n model_helper.get_device_str(\n num_layers - 1, self.num_gpus))\n\n if hparams.pass_hidden_state:\n decoder_initial_state = cell.zero_state(batch_size, dtype).clone(\n cell_state=encoder_state)\n else:\n decoder_initial_state = cell.zero_state(batch_size, dtype)\n\n return cell, decoder_initial_state\n\n def _get_infer_summary(self, hparams):\n if not self.has_attention or hparams.infer_mode == \"beam_search\":\n return tf.no_op()\n return _create_attention_images_summary(self.final_context_state)\n\n\ndef create_attention_mechanism(attention_option, num_units, memory,\n source_sequence_length, mode):\n \"\"\"Create attention mechanism based on the attention_option.\"\"\"\n del mode # unused\n\n # Mechanism\n if attention_option == \"luong\":\n attention_mechanism = tf.contrib.seq2seq.LuongAttention(\n num_units, memory, memory_sequence_length=source_sequence_length)\n elif attention_option == \"scaled_luong\":\n attention_mechanism = tf.contrib.seq2seq.LuongAttention(\n num_units,\n memory,\n memory_sequence_length=source_sequence_length,\n scale=True)\n elif attention_option == \"bahdanau\":\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(\n num_units, memory, memory_sequence_length=source_sequence_length)\n elif attention_option == \"normed_bahdanau\":\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(\n num_units,\n memory,\n memory_sequence_length=source_sequence_length,\n normalize=True)\n else:\n raise ValueError(\"Unknown attention option %s\" % attention_option)\n\n return attention_mechanism\n\n\ndef _create_attention_images_summary(final_context_state):\n \"\"\"create attention image and attention summary.\"\"\"\n attention_images = (final_context_state.alignment_history.stack())\n # Reshape to (batch, src_seq_len, tgt_seq_len,1)\n attention_images = tf.expand_dims(\n tf.transpose(attention_images, [1, 2, 0]), -1)\n # Scale to range [0, 255]\n attention_images *= 255\n attention_summary = tf.summary.image(\"attention_images\", attention_images)\n return attention_summary\n"
] | [
[
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.placeholder",
"tensorflow.set_random_seed",
"tensorflow.tables_initializer",
"tensorflow.contrib.training.HParams"
],
[
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.keras.initializers.glorot_normal",
"tensorflow.gfile.Exists",
"tensorflow.gfile.MakeDirs",
"tensorflow.data.TextLineDataset",
"tensorflow.summary.scalar",
"tensorflow.contrib.framework.list_variables",
"tensorflow.Graph",
"tensorflow.all_variables",
"tensorflow.contrib.rnn.LayerNormBasicLSTMCell",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"tensorflow.contrib.rnn.MultiRNNCell",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.contrib.rnn.NASCell",
"numpy.zeros",
"tensorflow.contrib.rnn.DeviceWrapper",
"tensorflow.contrib.framework.load_checkpoint",
"tensorflow.placeholder",
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_file",
"tensorflow.global_variables_initializer",
"tensorflow.train.NewCheckpointReader",
"tensorflow.contrib.rnn.ResidualWrapper",
"tensorflow.gfile.Glob",
"tensorflow.keras.initializers.glorot_uniform",
"tensorflow.global_norm",
"tensorflow.container",
"tensorflow.train.get_checkpoint_state",
"tensorflow.constant",
"tensorflow.train.latest_checkpoint",
"tensorflow.slice",
"tensorflow.fixed_size_partitioner",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.contrib.rnn.DropoutWrapper",
"tensorflow.assign",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.tables_initializer"
],
[
"tensorflow.transpose",
"tensorflow.contrib.seq2seq.LuongAttention",
"tensorflow.summary.image",
"tensorflow.contrib.seq2seq.BahdanauAttention",
"tensorflow.contrib.seq2seq.tile_batch",
"tensorflow.no_op",
"tensorflow.contrib.seq2seq.AttentionWrapper"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
OlegSomov/light-motion-analysis | [
"4f510250aaa32929a6ccff3c796b53151addb9e9"
] | [
"misc/graph.py"
] | [
"import os\nimport matplotlib\nimport json\nfrom datetime import datetime\nfrom matplotlib import pyplot\n\n\ndef show_results_graph(timer, name=None):\n with (open('light_plot.json', 'r')) as f:\n data = json.load(f)\n\n with (open('light_plot_imporved.json', 'r')) as f:\n data_improved = json.load(f)\n\n os.remove('light_plot.json')\n os.remove('light_plot_imporved.json')\n x = []\n y = []\n x_improved = []\n y_improved = []\n\n for item in data:\n date = datetime.strptime(item['x'], \"%Y-%m-%d %H:%M:%S\")\n x.append(date)\n if item['y'] == 1:\n y.append(item['y'] + 0.1) # to distinct normal light and improved light states\n else:\n y.append(item['y'])\n\n for item in data_improved:\n date = datetime.strptime(item['x'], \"%Y-%m-%d %H:%M:%S\")\n x_improved.append(date)\n y_improved.append(item['y'])\n\n dates_normal = matplotlib.dates.date2num(x)\n dates_improved = matplotlib.dates.date2num(x_improved)\n\n matplotlib.pyplot.plot_date(dates_normal, y, 'b-', label=\"Regular data\", linewidth=2)\n matplotlib.pyplot.plot_date(dates_improved, y_improved, 'b-', color=\"red\", label=\"Possible improvement\", linewidth=2)\n pyplot.title(\"Compare actual data and possible improvement ({} minutes)\".format(timer))\n pyplot.legend()\n if name:\n pyplot.savefig(\"result.png\")\n pyplot.show()\n"
] | [
[
"matplotlib.pyplot.plot_date",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.dates.date2num",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wanghongsheng01/framework_enflame | [
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"d2096ae14cf847509394a3b717021e2bd1d72f62",
"d2096ae14cf847509394a3b717021e2bd1d72f62",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526",
"debf613e05e3f5ea8084c3e79b60d0dd9e349526"
] | [
"oneflow/python/test/ops/test_function_input_output.py",
"oneflow/python/test/ops/test_binary_elementwise_ops.py",
"oneflow/python/test/ops/test_all_reduce_group.py",
"oneflow/python/test/ops/test_gelu.py",
"oneflow/python/test/ops/test_gather_nd.py",
"oneflow/python/test/ops/test_nn_conv2d_padding_dynamic.py",
"oneflow/python/test/ops/test_sort.py",
"oneflow/python/test/ops/test_partial_fc.py",
"oneflow/python/test/ops/test_constant_like.py",
"oneflow/python/test/ops/test_sqrt.py",
"oneflow/python/test/ops/test_dropout.py",
"oneflow/python/test/ops/test_demo_matmul.py"
] | [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\nimport oneflow._oneflow_internal\nfrom typing import Tuple\n\n\[email protected]_unless_1n4d()\nclass TestFunctionInputOutput(flow.unittest.TestCase):\n def test_FixedTensorDef(test_case):\n @flow.global_function()\n def Foo(x: oft.Numpy.Placeholder((2, 5))):\n return x\n\n data = np.ones((2, 5), dtype=np.float32)\n of_ret = Foo(data).get()\n test_case.assertEqual(of_ret.numpy().max(), 1)\n test_case.assertEqual(of_ret.numpy().min(), 1)\n test_case.assertTrue(np.allclose(of_ret.numpy(), data))\n\n def test_FixedTensorDef_2_device(test_case):\n flow.config.gpu_device_num(2)\n\n @flow.global_function()\n def Foo(x: oft.Numpy.Placeholder((2, 5))):\n return x\n\n data = np.ones((2, 5), dtype=np.float32)\n of_ret = Foo(data).get()\n test_case.assertEqual(of_ret.numpy().max(), 1)\n test_case.assertEqual(of_ret.numpy().min(), 1)\n test_case.assertTrue(np.allclose(of_ret.numpy(), data))\n\n def test_MirroredTensorDef(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_logical_view(flow.scope.mirrored_view())\n\n @flow.global_function(function_config=func_config)\n def Foo(x: oft.ListNumpy.Placeholder((2, 5))):\n return x\n\n data = np.ones((1, 5), dtype=np.float32)\n ndarray_list = Foo([data]).get().numpy_list()\n test_case.assertEqual(len(ndarray_list), 1)\n test_case.assertTrue(np.allclose(ndarray_list[0], data))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport oneflow as flow\nfrom collections import OrderedDict\nimport oneflow.typing as oft\n\nimport test_global_storage\nfrom test_util import (\n GenArgDict,\n GenArgList,\n type_name_to_flow_type,\n type_name_to_np_type,\n)\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef RunOneflowBinaryOp(device_type, flow_op, x, y, data_type):\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n\n flow_type = type_name_to_flow_type[data_type]\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def FlowJob(\n x: oft.Numpy.Placeholder(x.shape, dtype=flow_type),\n y: oft.Numpy.Placeholder(y.shape, dtype=flow_type),\n ):\n with flow.scope.placement(device_type, \"0:0\"):\n x += flow.get_variable(\n name=\"x\",\n shape=x.shape,\n dtype=flow_type,\n initializer=flow.zeros_initializer(),\n trainable=True,\n )\n y += flow.get_variable(\n name=\"y\",\n shape=y.shape,\n dtype=flow_type,\n initializer=flow.zeros_initializer(),\n trainable=True,\n )\n loss = flow_op(x, y)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch_diff(y, test_global_storage.Setter(\"y_diff\"))\n\n return loss\n\n # Oneflow\n out = FlowJob(x, y).get().numpy()\n x_diff = test_global_storage.Get(\"x_diff\")\n y_diff = test_global_storage.Get(\"y_diff\")\n return out, x_diff, y_diff\n\n\ndef RunTensorFlowBinaryOp(tf_op, x, y):\n # TensorFlow\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(x)\n y = tf.Variable(y)\n out = tf_op(x, y)\n x_diff = tape.gradient(out, x)\n y_diff = tape.gradient(out, y)\n return out.numpy(), x_diff, y_diff\n\n\ndef compare_with_tensorflow(\n test_case,\n device_type,\n flow_op,\n tf_op,\n x_shape,\n y_shape,\n data_type,\n x_minval=-10,\n x_maxval=10,\n y_minval=-10,\n y_maxval=10,\n compare_grad=True,\n out_rtol=1e-5,\n out_atol=1e-5,\n diff_rtol=1e-5,\n diff_atol=1e-5,\n):\n test_case.assertTrue(device_type in [\"gpu\", \"cpu\"])\n\n np_type = type_name_to_np_type[data_type]\n x = np.random.uniform(low=x_minval, high=x_maxval, size=x_shape).astype(np_type)\n y = np.random.uniform(low=y_minval, high=y_maxval, size=y_shape).astype(np_type)\n\n of_out, of_x_diff, of_y_diff, = RunOneflowBinaryOp(\n device_type, flow_op, x, y, data_type\n )\n tf_out, tf_x_diff, tf_y_diff = RunTensorFlowBinaryOp(tf_op, x, y)\n\n test_case.assertTrue(\n np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)\n )\n if compare_grad:\n test_case.assertTrue(\n np.allclose(\n of_x_diff,\n tf_x_diff.numpy(),\n rtol=diff_rtol,\n atol=diff_atol,\n equal_nan=True,\n )\n )\n test_case.assertTrue(\n np.allclose(\n of_y_diff,\n tf_y_diff.numpy(),\n rtol=diff_rtol,\n atol=diff_atol,\n equal_nan=True,\n )\n )\n flow.clear_default_session()\n\n\[email protected]_unless_1n1d()\nclass TestBinaryElementwiseOps(flow.unittest.TestCase):\n def test_floordiv(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"flow_op\"] = [flow.math.floordiv]\n arg_dict[\"tf_op\"] = [tf.math.floordiv]\n arg_dict[\"x_shape\"] = [(5, 5,)]\n arg_dict[\"y_shape\"] = [(5, 5,)]\n arg_dict[\"data_type\"] = [\"float32\", \"double\"]\n arg_dict[\"x_minval\"] = [-10]\n arg_dict[\"x_maxval\"] = [10]\n arg_dict[\"y_minval\"] = [1]\n arg_dict[\"y_maxval\"] = [10]\n arg_dict[\"compare_grad\"] = [False]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_pow(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"flow_op\"] = [flow.math.pow]\n arg_dict[\"tf_op\"] = [tf.math.pow]\n arg_dict[\"x_shape\"] = [(5, 5,)]\n arg_dict[\"y_shape\"] = [(5, 5,)]\n arg_dict[\"data_type\"] = [\"float32\", \"double\"]\n arg_dict[\"x_minval\"] = [1]\n arg_dict[\"x_maxval\"] = [5]\n arg_dict[\"y_minval\"] = [1]\n arg_dict[\"y_maxval\"] = [5]\n arg_dict[\"compare_grad\"] = [True]\n\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_xdivy(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"flow_op\"] = [flow.math.xdivy]\n arg_dict[\"tf_op\"] = [tf.math.xdivy]\n arg_dict[\"x_shape\"] = [(5, 5,)]\n arg_dict[\"y_shape\"] = [(5, 5,)]\n arg_dict[\"data_type\"] = [\"float32\", \"double\"]\n arg_dict[\"x_minval\"] = [1]\n arg_dict[\"x_maxval\"] = [100]\n arg_dict[\"y_minval\"] = [1]\n arg_dict[\"y_maxval\"] = [10]\n arg_dict[\"compare_grad\"] = [True]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_xlogy(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"flow_op\"] = [flow.math.xlogy]\n arg_dict[\"tf_op\"] = [tf.math.xlogy]\n arg_dict[\"x_shape\"] = [(5, 5,)]\n arg_dict[\"y_shape\"] = [(5, 5,)]\n arg_dict[\"data_type\"] = [\"float32\", \"double\"]\n arg_dict[\"x_minval\"] = [1]\n arg_dict[\"x_maxval\"] = [5]\n arg_dict[\"y_minval\"] = [1]\n arg_dict[\"y_maxval\"] = [5]\n arg_dict[\"compare_grad\"] = [True]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_atan2(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"flow_op\"] = [flow.math.atan2]\n arg_dict[\"tf_op\"] = [tf.math.atan2]\n arg_dict[\"x_shape\"] = [(5, 5,)]\n arg_dict[\"y_shape\"] = [(5, 5,)]\n arg_dict[\"data_type\"] = [\"float32\", \"double\"]\n arg_dict[\"x_minval\"] = [1]\n arg_dict[\"x_maxval\"] = [5]\n arg_dict[\"y_minval\"] = [1]\n arg_dict[\"y_maxval\"] = [5]\n arg_dict[\"compare_grad\"] = [True]\n\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nfrom test_util import GenArgList\nimport unittest\nimport os\n\n\ndef do_test(test_case, mirrored):\n flow.clear_default_session()\n flow.config.gpu_device_num(2)\n func_config = flow.FunctionConfig()\n\n if mirrored:\n func_config.default_logical_view(flow.scope.mirrored_view())\n else:\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def Foo():\n w = flow.get_variable(\"w\", (10,), initializer=flow.constant_initializer(1))\n lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [5])\n flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(w)\n return w\n\n r1 = Foo().get().numpy()\n test_case.assertTrue(np.all(r1 == 1.0))\n r2 = Foo().get().numpy()\n test_case.assertTrue(np.all(r2 == 0.5))\n\n\[email protected]_unless_1n2d()\nclass TestAllReduceGroup(flow.unittest.TestCase):\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_variable_as_loss_on_two_device(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"mirrored\"] = [True, False]\n for arg in GenArgList(arg_dict):\n do_test(test_case, *arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport math\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nfrom test_util import GenArgDict, RunOneflowOp\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef tf_gelu(x):\n inv_sqrt2 = math.sqrt(0.5)\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(x)\n y = 0.5 * x * (1 + tf.math.erf(inv_sqrt2 * x))\n x_diff = tape.gradient(y, x)\n return y.numpy(), x_diff.numpy()\n\n\[email protected]_unless_1n1d()\nclass TestGelu(flow.unittest.TestCase):\n def test_gelu(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"flow_op\"] = [flow.math.gelu]\n arg_dict[\"flow_args\"] = [[]]\n arg_dict[\"x\"] = [\n np.random.uniform(low=-100, high=100, size=(10, 20, 30, 40)).astype(\n np.float32\n )\n ]\n for arg in GenArgDict(arg_dict):\n of_y, of_x_diff = RunOneflowOp(**arg)\n tf_y, tf_x_diff = tf_gelu(arg[\"x\"])\n\n assert np.allclose(of_y, tf_y, rtol=1e-5, atol=1e-5)\n assert np.allclose(of_x_diff, tf_x_diff, rtol=1e-5, atol=1e-5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport numpy as np\nimport unittest\nfrom collections import OrderedDict\nimport oneflow as flow\nfrom test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n\n\ndef _random_inputs(x_shape, x_dtype, index_shape, index_dtype):\n assert isinstance(x_shape, (tuple, list))\n assert isinstance(index_shape, (tuple, list))\n assert index_dtype == np.int32 or index_dtype == np.int64\n\n if x_dtype == np.float32 or x_dtype == np.double:\n x = np.random.rand(*x_shape).astype(x_dtype)\n elif x_dtype == np.int32 or x_dtype == np.int64 or x_dtype == np.int8:\n x = np.random.randint(low=0, high=100, size=x_shape).astype(x_dtype)\n else:\n raise NotImplementedError(\"{}\".format(x_dtype))\n\n index = []\n index_rows = np.prod(index_shape[:-1])\n index_cols = index_shape[-1]\n for col in range(index_cols):\n index_col = np.random.randint(\n low=0, high=x_shape[col], size=(index_rows,), dtype=index_dtype\n ).reshape(index_shape[:-1])\n index.append(index_col)\n index = np.stack(index, axis=len(index_shape) - 1)\n return x, index\n\n\ndef _make_gather_nd_fn(\n x_shape,\n index_shape,\n x_dtype,\n index_type,\n device_type,\n device_num,\n dynamic,\n need_grad,\n comp_diff_fn,\n):\n assert device_num >= 1\n fn_type = \"train\" if need_grad else \"predict\"\n\n if device_type == \"gpu\":\n flow.config.gpu_device_num(device_num)\n elif device_type == \"cpu\":\n flow.config.cpu_device_num(device_num)\n else:\n raise ValueError\n\n func_config = flow.FunctionConfig()\n func_config.default_data_type(x_dtype)\n func_config.default_placement_scope(\n flow.scope.placement(device_type, \"0:0-{}\".format(device_num - 1))\n )\n if dynamic:\n func_config.default_logical_view(flow.scope.mirrored_view())\n else:\n func_config.default_logical_view(flow.scope.consistent_view())\n\n def do_gather_nd(x, index):\n x_var = flow.get_variable(\n \"params\",\n shape=(1,),\n dtype=x_dtype,\n initializer=flow.constant_initializer(0, x_dtype),\n )\n x = x + flow.cast_to_current_logical_view(x_var)\n y = flow.gather_nd(x, index)\n if need_grad:\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0\n ).minimize(y)\n if callable(comp_diff_fn):\n flow.watch_diff(x, comp_diff_fn)\n return y\n\n if dynamic:\n\n @flow.global_function(type=fn_type, function_config=func_config)\n def gather_nd_fn(\n x: flow.typing.ListNumpy.Placeholder(x_shape, dtype=x_dtype),\n index: flow.typing.ListNumpy.Placeholder(index_shape, dtype=index_type),\n ) -> flow.typing.ListNumpy:\n return do_gather_nd(x, index)\n\n else:\n\n @flow.global_function(type=fn_type, function_config=func_config)\n def gather_nd_fn(\n x: flow.typing.Numpy.Placeholder(x_shape, dtype=x_dtype),\n index: flow.typing.Numpy.Placeholder(index_shape, dtype=index_type),\n ) -> flow.typing.Numpy:\n return do_gather_nd(x, index)\n\n return gather_nd_fn\n\n\ndef _gather_nd_np(x, index, require_grad=False, init_grad_value=1.0):\n ndim = index.shape[-1]\n assert ndim <= x.ndim\n indices = []\n for dim in range(ndim):\n indices.append(index[..., dim])\n\n y = x[tuple(indices)]\n dy = None\n dx = None\n if require_grad:\n dy = np.zeros(shape=y.shape, dtype=np.float32)\n dy.fill(init_grad_value)\n dx = np.zeros(shape=x.shape, dtype=np.float32)\n flat_index = index.reshape(-1, ndim)\n flat_dy = dy.reshape(-1, *y.shape[(index.ndim - 1) :])\n for i, nd_index in enumerate(flat_index):\n if dx.ndim == ndim:\n ravel_index = np.ravel_multi_index(nd_index, dx.shape)\n dx_partial = np.zeros(shape=dx.shape, dtype=np.float32)\n np.put(dx_partial, ravel_index, flat_dy[i])\n dx += dx_partial\n else:\n dx[tuple(nd_index)] += flat_dy[i]\n\n return y, dx\n\n\ndef _is_floating_dtype(dtype):\n if dtype in (\"float32\", \"double\", \"float16\"):\n return True\n\n return False\n\n\ndef _compare_with_np(\n test_case,\n shape,\n index_shape,\n dynamic_shape=None,\n dynamic_index_shape=None,\n dtype=\"float32\",\n index_dtype=\"int32\",\n device_type=\"gpu\",\n device_num=1,\n dynamic=False,\n):\n x_is_floating = _is_floating_dtype(dtype)\n need_grad = True if x_is_floating else False\n x_of_dtype = type_name_to_flow_type[dtype]\n index_of_dtype = type_name_to_flow_type[index_dtype]\n x_dtype = type_name_to_np_type[dtype]\n index_dtype = type_name_to_np_type[index_dtype]\n\n if dynamic_shape is None:\n dynamic_shape = shape\n else:\n dynamic = True\n\n if dynamic_index_shape is None:\n dynamic_index_shape = index_shape\n else:\n dynamic = True\n\n if dynamic:\n x, index, y, dx = [], [], [], []\n for _ in range(device_num):\n x_, index_ = _random_inputs(\n dynamic_shape, x_dtype, dynamic_index_shape, index_dtype\n )\n y_, dx_ = _gather_nd_np(x_, index_, need_grad)\n x.append(x_)\n index.append(index_)\n y.append(y_)\n dx.append(dx_)\n\n def comp_diff(dx_blob: flow.typing.ListNumpy):\n for dx_blob_, dx_ in zip(dx_blob, dx):\n test_case.assertTrue(np.array_equal(dx_blob_, dx_))\n\n else:\n x, index = _random_inputs(\n dynamic_shape, x_dtype, dynamic_index_shape, index_dtype\n )\n y, dx = _gather_nd_np(x, index, need_grad)\n\n def comp_diff(dx_blob: flow.typing.Numpy):\n test_case.assertTrue(np.array_equal(dx_blob, dx))\n\n flow.clear_default_session()\n gather_nd_fn = _make_gather_nd_fn(\n shape,\n index_shape,\n x_of_dtype,\n index_of_dtype,\n device_type,\n device_num,\n dynamic,\n need_grad,\n comp_diff if device_num == 1 else None,\n )\n ret_y = gather_nd_fn(x, index)\n\n if dynamic:\n for ret_y_, y_ in zip(ret_y, y):\n test_case.assertTrue(np.array_equal(ret_y_, y_))\n else:\n test_case.assertTrue(np.array_equal(ret_y, y))\n\n\[email protected]_unless_1n1d()\nclass TestGatherNd(flow.unittest.TestCase):\n def test_gather_nd(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10,)]\n arg_dict[\"index_shape\"] = [(5, 1)]\n arg_dict[\"dtype\"] = [\"float32\", \"int32\", \"double\"]\n arg_dict[\"index_dtype\"] = [\"int32\", \"int64\"]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"dynamic\"] = [False, True]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_gather_nd_case_1(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(20, 10, 10, 3, 3)]\n arg_dict[\"index_shape\"] = [(2, 3, 3)]\n arg_dict[\"device_type\"] = [\"gpu\"]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n def test_gather_nd_case_2(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(10, 8, 4)]\n arg_dict[\"index_shape\"] = [(2, 2)]\n arg_dict[\"dtype\"] = [\"float32\", \"int32\"]\n arg_dict[\"index_dtype\"] = [\"int32\", \"int64\"]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"dynamic\"] = [True]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_gather_nd_case_3(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(32, 60, 80, 25)]\n arg_dict[\"index_shape\"] = [(128, 2)]\n arg_dict[\"device_type\"] = [\"gpu\"]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_gather_nd_case_4(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(128, 64, 2, 16, 7)]\n arg_dict[\"index_shape\"] = [(30, 10, 3)]\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"dynamic\"] = [True]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n def test_with_dynamic_x(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(32, 16)]\n arg_dict[\"dynamic_shape\"] = [(30, 15)]\n arg_dict[\"index_shape\"] = [(12, 1)]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n def test_with_dynamic_index(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(25, 10)]\n arg_dict[\"index_shape\"] = [(15, 1)]\n arg_dict[\"dynamic_index_shape\"] = [(11, 1)]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n def test_with_empty_index(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(12, 13, 7)]\n arg_dict[\"index_shape\"] = [(5, 10, 2)]\n arg_dict[\"dynamic_index_shape\"] = [(5, 0, 2)]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n\n# @flow.unittest.skip_unless_1n4d()\n# TODO(zhangwenxiao, jiangxuefei): refine in multi-client\[email protected](True, \"skip for now because of single-client tensor_list removed\")\nclass TestGatherNdParallel(flow.unittest.TestCase):\n def test_case_1(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(12, 5)]\n arg_dict[\"index_shape\"] = [(4, 8, 2)]\n arg_dict[\"dtype\"] = [\"float32\", \"int32\", \"double\"]\n arg_dict[\"index_dtype\"] = [\"int32\", \"int64\"]\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"device_num\"] = [4]\n arg_dict[\"dynamic\"] = [True, False]\n for arg in GenArgDict(arg_dict):\n _compare_with_np(test_case, **arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nfrom test_util import GenArgList\nimport oneflow.typing as oft\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\nglobal_storage = {}\n\n\ndef global_storage_setter(name):\n global global_storage\n\n def _set(x):\n global_storage[name] = x\n\n return _set\n\n\ndef compare_with_tensorflow(\n device_type,\n x_shape,\n filters,\n kernel_size,\n groups,\n of_padding=\"SAME\",\n tf_padding=\"SAME\",\n stride=1,\n data_format=\"NCHW\",\n):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.mirrored_view())\n\n if data_format == \"NCHW\":\n xy_data_transpose = (0, 2, 3, 1)\n weight_data_transpose = (2, 3, 1, 0)\n else:\n xy_data_transpose = (0, 1, 2, 3)\n weight_data_transpose = (1, 2, 3, 0)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def DynamicConvJob(x: oft.ListNumpy.Placeholder((10, 3, 100, 100))):\n with flow.scope.placement(device_type, \"0:0\"):\n x_var = flow.get_variable(\n name=\"v1\",\n shape=(1,),\n dtype=flow.float,\n initializer=flow.zeros_initializer(),\n )\n x_var = flow.cast_to_current_logical_view(x_var)\n x += x_var\n if data_format == \"NCHW\":\n weight_shape = (filters, x_shape[1] // groups, kernel_size, kernel_size)\n else:\n weight_shape = (filters, kernel_size, kernel_size, x_shape[3] // groups)\n weight = flow.get_variable(\n \"conv-weight\",\n shape=weight_shape,\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=0, maxval=100),\n )\n weight = flow.cast_to_current_logical_view(weight)\n loss = flow.nn.conv2d(\n x,\n weight,\n strides=[stride, stride],\n padding=of_padding,\n data_format=data_format,\n dilations=[1, 1],\n groups=groups,\n )\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n\n flow.watch(x, global_storage_setter(\"x\"))\n flow.watch_diff(x, global_storage_setter(\"x_diff\"))\n flow.watch(weight, global_storage_setter(\"weight\"))\n flow.watch_diff(weight, global_storage_setter(\"weight_diff\"))\n flow.watch(loss, global_storage_setter(\"loss\"))\n flow.watch_diff(loss, global_storage_setter(\"loss_diff\"))\n\n return loss\n\n # OneFlow\n data = [np.random.rand(*x_shape).astype(np.float32)]\n of_out = DynamicConvJob(data).get().numpy_list()[0]\n # TensorFlow\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(data[0].transpose(xy_data_transpose))\n assert groups > 0\n assert x_shape[1] % groups == 0\n assert filters % groups == 0\n weight = tf.Variable(\n global_storage[\"weight\"].numpy().transpose(weight_data_transpose)\n )\n\n tf_out = tf.nn.conv2d(\n x,\n weight,\n strides=[1, stride, stride, 1],\n padding=tf_padding,\n data_format=\"NHWC\",\n )\n\n idx = np.where(np.abs(of_out.transpose(xy_data_transpose) - tf_out.numpy()) > 5e-4)\n assert np.allclose(\n of_out.transpose(xy_data_transpose), tf_out.numpy(), rtol=1e-3, atol=1e-3,\n )\n\n loss_diff = global_storage[\"loss_diff\"].numpy_list()[0].transpose(xy_data_transpose)\n tf_x_diff = tape.gradient(tf_out, x, loss_diff)\n tf_weight_diff = tape.gradient(tf_out, weight, loss_diff)\n rtol = 1e-4\n atol = 1e-4\n if device_type == \"cpu\":\n rtol *= 100\n atol *= 100\n assert np.allclose(\n global_storage[\"x_diff\"].numpy_list()[0].transpose(xy_data_transpose),\n tf_x_diff.numpy(),\n rtol=rtol,\n atol=atol,\n ), (\n global_storage[\"x_diff\"].numpy_list()[0].transpose(xy_data_transpose)\n - tf_x_diff.numpy()\n )\n assert np.allclose(\n global_storage[\"weight_diff\"].numpy().transpose(weight_data_transpose),\n tf_weight_diff.numpy(),\n rtol=5e-3,\n atol=5e-3,\n )\n\n\[email protected]_unless_1n1d()\[email protected](\"skip_for_ci\")\nclass TestNnConv2dPaddingDynamic(flow.unittest.TestCase):\n def test_padding_valid(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"x_shape\"] = [(10, 3, 10, 10), (10, 3, 11, 11)]\n arg_dict[\"filters\"] = [64]\n arg_dict[\"kernel_size\"] = [3, 2]\n arg_dict[\"groups\"] = [1]\n arg_dict[\"of_padding\"] = [\"VALID\"]\n arg_dict[\"tf_padding\"] = [\"VALID\"]\n arg_dict[\"stride\"] = [1, 2]\n arg_dict[\"data_format\"] = [\"NCHW\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_padding_same(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"x_shape\"] = [(10, 3, 10, 10), (10, 3, 11, 11)]\n arg_dict[\"filters\"] = [64]\n arg_dict[\"kernel_size\"] = [3, 2]\n arg_dict[\"groups\"] = [1]\n arg_dict[\"of_padding\"] = [\"SAME_UPPER\"]\n arg_dict[\"tf_padding\"] = [\"SAME\"]\n arg_dict[\"stride\"] = [1, 2]\n arg_dict[\"data_format\"] = [\"NCHW\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_pad_list1(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"x_shape\"] = [(10, 3, 10, 10), (10, 3, 11, 11)]\n arg_dict[\"filters\"] = [64]\n arg_dict[\"kernel_size\"] = [3, 2]\n arg_dict[\"groups\"] = [1]\n arg_dict[\"of_padding\"] = [[[0, 0], [0, 0], [0, 1], [1, 0]]]\n arg_dict[\"tf_padding\"] = [[[0, 0], [0, 1], [1, 0], [0, 0]]]\n arg_dict[\"stride\"] = [1, 2]\n arg_dict[\"data_format\"] = [\"NCHW\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_pad_list2(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"x_shape\"] = [(10, 3, 10, 10), (10, 3, 11, 11)]\n arg_dict[\"filters\"] = [64]\n arg_dict[\"kernel_size\"] = [3, 2]\n arg_dict[\"groups\"] = [1]\n arg_dict[\"of_padding\"] = [[[0, 0], [0, 0], [1, 1], [1, 1]]]\n arg_dict[\"tf_padding\"] = [[[0, 0], [1, 1], [1, 1], [0, 0]]]\n arg_dict[\"stride\"] = [1, 2]\n arg_dict[\"data_format\"] = [\"NCHW\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_pad_list3(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"x_shape\"] = [(10, 3, 10, 10), (10, 3, 11, 11)]\n arg_dict[\"filters\"] = [64]\n arg_dict[\"kernel_size\"] = [3, 2]\n arg_dict[\"groups\"] = [1]\n arg_dict[\"of_padding\"] = [[[0, 0], [0, 0], [1, 0], [1, 0]]]\n arg_dict[\"tf_padding\"] = [[[0, 0], [1, 0], [1, 0], [0, 0]]]\n arg_dict[\"stride\"] = [1, 2]\n arg_dict[\"data_format\"] = [\"NCHW\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n def test_pad_list4(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"x_shape\"] = [(10, 3, 10, 10), (10, 3, 11, 11)]\n arg_dict[\"filters\"] = [64]\n arg_dict[\"kernel_size\"] = [3, 2]\n arg_dict[\"groups\"] = [1]\n arg_dict[\"of_padding\"] = [[[0, 0], [0, 0], [10, 2], [10, 2]]]\n arg_dict[\"tf_padding\"] = [[[0, 0], [10, 2], [10, 2], [0, 0]]]\n arg_dict[\"stride\"] = [1, 2]\n arg_dict[\"data_format\"] = [\"NCHW\"]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nfrom collections import OrderedDict\n\nimport os\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nfrom test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\nimport oneflow.typing as oft\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef compare_with_tensorflow(device_type, in_shape, axis, direction, data_type):\n assert device_type in [\"gpu\", \"cpu\"]\n assert data_type in [\"float32\", \"double\", \"int8\", \"int32\", \"int64\"]\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_logical_view(flow.scope.mirrored_view())\n func_config.default_data_type(flow.float)\n\n @flow.global_function(function_config=func_config)\n def SortJob(\n input: oft.ListNumpy.Placeholder(\n tuple([dim + 10 for dim in in_shape]),\n dtype=type_name_to_flow_type[data_type],\n )\n ):\n with flow.scope.placement(device_type, \"0:0\"):\n return flow.sort(input, axis, direction)\n\n input = (np.random.random(in_shape) * 100).astype(type_name_to_np_type[data_type])\n # OneFlow\n of_out = SortJob([input]).get().numpy_list()[0]\n # TensorFlow\n tf_out = tf.sort(input, axis, direction)\n\n assert np.array_equal(of_out, tf_out.numpy())\n\n\ndef gen_arg_list():\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"in_shape\"] = [(10,), (10, 10, 20)]\n arg_dict[\"axis\"] = [-1]\n arg_dict[\"direction\"] = [\"ASCENDING\", \"DESCENDING\"]\n arg_dict[\"data_type\"] = [\"float32\", \"double\"]\n\n return GenArgList(arg_dict)\n\n\ndef gen_arg_list_for_test_axis():\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"in_shape\"] = [(10, 10, 20)]\n arg_dict[\"axis\"] = [-2, 0, 2]\n arg_dict[\"direction\"] = [\"ASCENDING\", \"DESCENDING\"]\n arg_dict[\"data_type\"] = [\"int32\", \"int64\"]\n\n return GenArgList(arg_dict)\n\n\[email protected]_unless_1n1d()\nclass TestSort(flow.unittest.TestCase):\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_sort(test_case):\n for arg in gen_arg_list():\n compare_with_tensorflow(*arg)\n for arg in gen_arg_list_for_test_axis():\n compare_with_tensorflow(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport os\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\nfrom collections import OrderedDict\n\nfrom test_util import GenArgList\nimport test_global_storage\nfrom test_util import type_name_to_flow_type\nfrom test_util import type_name_to_np_type\n\n\ndef compare_with_np(device_type, label_type, num_classes, num_sample, batch_size):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n if device_type == \"cpu\":\n flow.config.gpu_device_num(0)\n flow.config.cpu_device_num(4)\n else:\n flow.config.gpu_device_num(4)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.indexed_slices_optimizer_conf(dict(include_op_names=dict(op_name=[])))\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def PartialFcJob(\n labels: oft.Numpy.Placeholder(\n (batch_size,), dtype=type_name_to_flow_type[label_type]\n )\n ):\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x-weight\",\n shape=(num_classes, 128),\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n with flow.scope.placement(device_type, \"0:0-3\"):\n lebels_distribute = flow.distribute.broadcast()\n weight_distribute = flow.distribute.split(0)\n (\n maped_label,\n sampled_label,\n sampled_weight,\n ) = flow.distributed_partial_fc_sample(\n weight=x.with_distribute(weight_distribute),\n label=labels.with_distribute(lebels_distribute),\n num_sample=num_sample,\n )\n with flow.scope.placement(device_type, \"0:0\"):\n sampled_weight = flow.identity(sampled_weight)\n loss = flow.math.square(sampled_weight)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n\n flow.watch(x, test_global_storage.Setter(\"x\"))\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch_diff(\n sampled_weight, test_global_storage.Setter(\"sampled_weight_diff\")\n )\n return x, maped_label, sampled_label, sampled_weight\n\n # fake labels\n labels = np.random.randint(0, num_classes, size=(batch_size,)).astype(\n type_name_to_np_type[label_type]\n )\n\n # OneFlow\n weight, maped_label, sampled_label, sampled_weight = PartialFcJob(labels).get()\n\n gpu_num = 4\n device_class_num = num_classes // gpu_num\n device_num_sample = num_sample // gpu_num\n global_sample_labels_list = []\n np_mapped_label = []\n label_map = {}\n for i in range(gpu_num):\n lower = i * device_class_num\n upper = (i + 1) * device_class_num\n condition = (labels >= lower) & (labels < upper)\n local_label = labels[condition]\n local_label = np.unique(local_label).astype(np.int32)\n\n idx_start = int(i * device_num_sample)\n idx_end = int((i + 1) * device_num_sample)\n local_sample_labels = sampled_label[idx_start:idx_end]\n global_sample_labels = local_sample_labels\n global_sample_labels_list.append(global_sample_labels)\n\n assert (\n np.all((local_sample_labels >= lower) & (local_sample_labels < upper))\n == True\n )\n assert len(local_sample_labels) == len(np.unique(local_sample_labels))\n assert (\n np.array_equal(local_label, global_sample_labels[0 : len(local_label)])\n == True\n )\n for j in range(len(global_sample_labels)):\n label_map[global_sample_labels[j]] = j + idx_start\n\n for i in range(len(labels)):\n np_mapped_label.append(label_map[labels[i]])\n assert np.array_equal(np.array(np_mapped_label), maped_label.numpy()) == True\n\n global_sample_label = np.array(global_sample_labels_list).flatten().astype(np.int32)\n np_sample_weight = weight[global_sample_label]\n assert np.array_equal(sampled_weight.numpy(), np_sample_weight) == True\n\n sampled_weight_diff = test_global_storage.Get(\"sampled_weight_diff\")\n\n np_weight_diff = np.zeros(weight.shape)\n for i in range(len(global_sample_label)):\n np_weight_diff[global_sample_label[i]] = sampled_weight_diff[i]\n\n x_diff = test_global_storage.Get(\"x_diff\")\n\n assert np.array_equal(test_global_storage.Get(\"x_diff\"), np_weight_diff) == True\n\n\nflow.clear_default_session()\n\n\[email protected]_unless_1n4d()\nclass TestPartialFc(flow.unittest.TestCase):\n def test_partial_fc1(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"label_type\"] = [\"int32\"]\n arg_dict[\"num_classes\"] = [85744]\n arg_dict[\"num_sample\"] = [8600]\n arg_dict[\"batch_size\"] = [512]\n for arg in GenArgList(arg_dict):\n compare_with_np(*arg)\n\n def test_partial_fc2(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"label_type\"] = [\"int32\"]\n arg_dict[\"num_classes\"] = [200]\n arg_dict[\"num_sample\"] = [64]\n arg_dict[\"batch_size\"] = [32]\n for arg in GenArgList(arg_dict):\n compare_with_np(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\nimport unittest\nimport os\n\n\ndef _check(test_case, x, y, value, dtype=None):\n np_constant_like = np.full(x.shape, value)\n test_case.assertTrue(np.array_equal(np_constant_like, y))\n\n\ndef _run_test(test_case, x, value, dtype=None, device=\"gpu\"):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(function_config=func_config)\n def ConstantLikeJob(x: oft.Numpy.Placeholder(x.shape)):\n return flow.constant_like(x, value=value, dtype=dtype)\n\n y = ConstantLikeJob(x).get()\n _check(test_case, x, y.numpy(), value, dtype=dtype)\n\n\[email protected]_unless_1n1d()\nclass TestConstantLike(flow.unittest.TestCase):\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_constant_like_gpu_float(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 1.0, flow.float, \"gpu\")\n\n def test_constant_like_cpu_float(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 2.0, flow.float, \"cpu\")\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_constant_like_gpu_double(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 3.0, flow.double, \"gpu\")\n\n def test_constant_like_cpu_double(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 4.0, flow.double, \"cpu\")\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_constant_like_gpu_int8(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 5.0, flow.int8, \"gpu\")\n\n def test_constant_like_cpu_int8(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 6.0, flow.int8, \"cpu\")\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_constant_like_gpu_int32(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 7.0, flow.int32, \"gpu\")\n\n def test_constant_like_cpu_int32(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 8.0, flow.int32, \"cpu\")\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_constant_like_gpu_int64(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 9.0, flow.int64, \"gpu\")\n\n def test_constant_like_cpu_int64(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 10.0, flow.int64, \"cpu\")\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_constant_like_gpu(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 11.0, device=\"gpu\")\n\n def test_constant_like_cpu(test_case):\n x = np.random.rand(10, 3, 32, 1024).astype(np.float32)\n _run_test(test_case, x, 12.0, device=\"cpu\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nfrom test_util import CompareOpWithTensorFlow, GenArgDict\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\[email protected]_unless_1n1d()\nclass TestSqrt(flow.unittest.TestCase):\n def test_sqrt(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"flow_op\"] = [flow.math.sqrt]\n arg_dict[\"tf_op\"] = [tf.math.sqrt]\n arg_dict[\"input_shape\"] = [(10, 20, 30)]\n arg_dict[\"input_minval\"] = [0]\n arg_dict[\"input_maxval\"] = [100]\n for arg in GenArgDict(arg_dict):\n CompareOpWithTensorFlow(**arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport os\nimport shutil\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport test_global_storage\nfrom test_util import GenArgList, type_name_to_flow_type\n\n\ndef of_run(device_type, x_shape, data_type, rate, seed):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n\n if data_type == \"float16\":\n dtype = flow.float\n else:\n dtype = type_name_to_flow_type[data_type]\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def DropoutJob():\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x\",\n shape=x_shape,\n dtype=dtype,\n initializer=flow.random_uniform_initializer(minval=-1, maxval=1),\n trainable=True,\n )\n if data_type == \"float16\":\n x = flow.cast(flow.cast(x, flow.float16), dtype)\n of_out = flow.cast(\n flow.nn.dropout(\n flow.cast(x, flow.float16), rate=rate, seed=seed, name=\"dropout\"\n ),\n dtype,\n )\n else:\n of_out = flow.nn.dropout(x, rate=rate, seed=seed, name=\"dropout\")\n loss = flow.math.square(of_out)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n\n flow.watch(x, test_global_storage.Setter(\"x\"))\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch(of_out, test_global_storage.Setter(\"out\"))\n flow.watch_diff(of_out, test_global_storage.Setter(\"out_diff\"))\n\n return loss\n\n # OneFlow\n of_out = DropoutJob().get()\n\n of_out = test_global_storage.Get(\"out\")\n out_diff = test_global_storage.Get(\"out_diff\")\n assert np.allclose(\n [1 - np.count_nonzero(of_out) / of_out.size], [rate], atol=rate / 5\n )\n x = test_global_storage.Get(\"x\")\n x_diff = test_global_storage.Get(\"x_diff\")\n out_scale = of_out[np.where(of_out != 0)] / x[np.where(of_out != 0)]\n diff_scale = x_diff[np.where(of_out != 0)] / out_diff[np.where(of_out != 0)]\n assert np.allclose(out_scale, 1.0 / (1.0 - rate), atol=1e-5)\n assert np.allclose(diff_scale, 1.0 / (1.0 - rate), atol=1e-5)\n\n\ndef of_run_module(device_type, x_shape, data_type, rate, seed):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n dtype = type_name_to_flow_type[data_type]\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def DropoutJob() -> flow.typing.Numpy:\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x\",\n shape=x_shape,\n dtype=dtype,\n initializer=flow.ones_initializer(),\n trainable=True,\n )\n of_out = flow.nn.dropout(x, rate=rate, seed=seed, name=\"dropout\")\n loss = flow.math.square(of_out)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n return of_out\n\n of_out = DropoutJob()\n of_out2 = DropoutJob()\n\n return of_out, of_out2\n\n\[email protected]_unless_1n1d()\nclass TestDropout(flow.unittest.TestCase):\n def test_dropout(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"x_shape\"] = [(100, 100, 10, 20)]\n arg_dict[\"data_type\"] = [\"float32\", \"double\", \"float16\"]\n arg_dict[\"rate\"] = [0.75]\n arg_dict[\"seed\"] = [12345, None]\n for arg in GenArgList(arg_dict):\n if arg[0] == \"cpu\" and arg[2] == \"float16\":\n continue\n of_run(*arg)\n\n def test_dropout_module(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"x_shape\"] = [(2, 2, 2, 2)]\n arg_dict[\"data_type\"] = [\"float32\"]\n arg_dict[\"rate\"] = [0.75]\n arg_dict[\"seed\"] = [12345]\n\n literals = {\n \"cpu\": [\n np.array(\n [\n 4.0,\n 4.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 4.0,\n 0.0,\n 0.0,\n 0.0,\n 4.0,\n 4.0,\n 0.0,\n 0.0,\n 4.0,\n ]\n ),\n np.array(\n [\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 4.0,\n 4.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 4.0,\n 0.0,\n 0.0,\n ]\n ),\n ],\n \"gpu\": [\n np.array(\n [\n 4.0,\n 4.0,\n 0.0,\n 4.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 4.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n ),\n np.array(\n [\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 4.0,\n 4.0,\n 0.0,\n ]\n ),\n ],\n }\n\n for arg in GenArgList(arg_dict):\n of_out_a, of_out_b = of_run_module(*arg)\n test_case.assertEqual(\n (np.abs(literals[arg[0]][0] - of_out_a.flatten()) < 10e-7).all(), True\n )\n test_case.assertEqual(\n (np.abs(literals[arg[0]][1] - of_out_b.flatten()) < 10e-7).all(), True\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport oneflow as flow\nimport oneflow.typing as tp\nimport numpy as np\n\n\[email protected]_unless_1n2d()\nclass TestDemoMatmul(flow.unittest.TestCase):\n def test_watch(test_case):\n flow.config.gpu_device_num(2)\n flow.config.enable_debug_mode(True)\n\n expected = np.array(\n [[30, 30, 30, 30], [30, 30, 30, 30], [30, 30, 30, 30], [30, 30, 30, 30],]\n ).astype(np.float32)\n\n def Watch(x: tp.Numpy):\n test_case.assertTrue(np.allclose(x, expected))\n\n @flow.global_function()\n def Matmul(\n x: tp.Numpy.Placeholder((4, 4), dtype=flow.float32),\n y: tp.Numpy.Placeholder((4, 4), dtype=flow.float32),\n ) -> tp.Numpy:\n s = flow.matmul(x, y) # model parallel\n flow.watch(s, Watch)\n z = flow.matmul(s, x) # data parallel\n return z\n\n x = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],]).astype(\n np.float32\n )\n\n y = np.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4],]).astype(\n np.float32\n )\n Matmul(x, y)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.allclose",
"numpy.ones"
],
[
"numpy.allclose",
"tensorflow.Variable",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_physical_devices",
"numpy.random.uniform",
"tensorflow.GradientTape"
],
[
"numpy.all"
],
[
"tensorflow.math.erf",
"numpy.allclose",
"tensorflow.Variable",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_physical_devices",
"numpy.random.uniform",
"tensorflow.GradientTape"
],
[
"numpy.array_equal",
"numpy.put",
"numpy.random.rand",
"numpy.prod",
"numpy.ravel_multi_index",
"numpy.zeros",
"numpy.random.randint"
],
[
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.GradientTape",
"numpy.random.rand",
"tensorflow.nn.conv2d"
],
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.sort",
"numpy.random.random",
"tensorflow.config.experimental.set_memory_growth"
],
[
"numpy.unique",
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.random.rand",
"numpy.array_equal",
"numpy.full"
],
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.config.experimental.set_memory_growth"
],
[
"numpy.array",
"numpy.where",
"numpy.allclose",
"numpy.count_nonzero"
],
[
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ConsultingMD/covid-data-public | [
"2b7091f7cc3877df45a7887709e999b0ebdf30ec"
] | [
"scripts/update_forecast_hub.py"
] | [
"import enum\nfrom typing import Any\n\nimport click\nimport pandas as pd\nimport numpy as np\nimport structlog\nimport pathlib\nimport pydantic\nimport datetime\n\nimport zoltpy.util\n\nfrom covidactnow.datapublic import common_init, common_df\nfrom scripts import helpers\n\n\nfrom covidactnow.datapublic.common_fields import (\n GetByValueMixin,\n CommonFields,\n FieldNameAndCommonField,\n)\n\nDATA_ROOT = pathlib.Path(__file__).parent.parent / \"data\"\n\n_logger = structlog.get_logger(__name__)\n\n\nclass ForecastModel(enum.Enum):\n \"\"\"\"\"\"\n\n ENSEMBLE = \"COVIDhub-ensemble\"\n BASELINE = \"COVIDhub-baseline\"\n GOOGLE = \"Google_Harvard-CPF\"\n\n\nclass Fields(GetByValueMixin, FieldNameAndCommonField, enum.Enum):\n MODEL_ABBR = \"model_abbr\", CommonFields.MODEL_ABBR\n REGION = \"unit\", CommonFields.FIPS\n FORECAST_DATE = \"forecast_date\", CommonFields.FORECAST_DATE\n TARGET_DATE = \"target_date\", CommonFields.DATE\n QUANTILE = \"quantile\", CommonFields.QUANTILE\n WEEKLY_NEW_CASES = \"case\", CommonFields.WEEKLY_NEW_CASES\n WEEKLY_NEW_DEATHS = \"death\", CommonFields.WEEKLY_NEW_DEATHS\n\n\nclass ForecastHubUpdater(pydantic.BaseModel):\n \"\"\"Updates Forecast Lab Data Set with the Latest Available Forecast\n \"\"\"\n\n FORECAST_PROJECT_NAME = \"COVID-19 Forecasts\"\n RAW_CSV_FILENAME = \"raw.csv\"\n\n conn: Any # A valid zoltpy connection\n\n model: ForecastModel # The model to cache from Zoltar\n\n raw_data_root: pathlib.Path\n\n timeseries_output_path: pathlib.Path\n\n @classmethod\n def make_with_data_root(\n cls, model: ForecastModel, conn: Any, data_root: pathlib.Path,\n ) -> \"ForecastHubUpdater\":\n return cls(\n model=model,\n conn=conn,\n raw_data_root=data_root / \"forecast-hub\",\n timeseries_output_path=data_root / \"forecast-hub\" / \"timeseries-common.csv\",\n )\n\n @property\n def raw_path(self):\n return self.raw_data_root / self.RAW_CSV_FILENAME\n\n def write_version_file(self, forecast_date) -> None:\n stamp = datetime.datetime.utcnow().isoformat()\n version_path = self.raw_data_root / \"version.txt\"\n with version_path.open(\"w\") as vf:\n vf.write(f\"Updated on {stamp}\\n\")\n vf.write(f\"Using forecast from {forecast_date}\\n\")\n\n def update_source_data(self):\n \"\"\"\n See https://github.com/reichlab/zoltpy/tree/master for instructions.\n\n Note: Requires environment variables for Z_USERNAME and Z_PASSWORD with correct\n permissions.\n \"\"\"\n _logger.info(f\"Updating {self.model.name} from ForecastHub\")\n latest_forecast_date = get_latest_forecast_date(\n self.conn, self.FORECAST_PROJECT_NAME, self.model.value\n )\n # TODO: Save a call to the Forecast Hub by checking if latest_forecast_date is newer than\n # the current one saved in version.txt. We expect the cache to be invalidated only once a\n # week.\n ensemble = zoltpy.util.download_forecast(\n self.conn, self.FORECAST_PROJECT_NAME, self.model.value, latest_forecast_date\n )\n df = zoltpy.util.dataframe_from_json_io_dict(ensemble)\n df[\"forecast_date\"] = pd.to_datetime(latest_forecast_date)\n df[\"model_abbr\"] = self.model.value\n df.to_csv(self.raw_path, index=False)\n self.write_version_file(forecast_date=latest_forecast_date)\n\n def load_source_data(self) -> pd.DataFrame:\n _logger.info(\"Updating ForecastHub Ensemble dataset.\")\n data = pd.read_csv(\n self.raw_path, parse_dates=[\"forecast_date\"], dtype={\"unit\": str}, low_memory=False\n )\n return data\n\n @staticmethod\n def transform(df: pd.DataFrame) -> pd.DataFrame:\n df[\"target_date\"] = df.apply(\n lambda x: x.forecast_date + pd.Timedelta(weeks=int(x.target.split(\" \")[0])),\n axis=\"columns\",\n )\n # The targets have the form \"X wk inc/cum cases/deaths\"\n # Take the final split (death/cases) and use that as target type\n df[\"target_type\"] = df.target.str.split(\" \").str[-1]\n # Take the penultimate split (inc/cum) and use that as aggregation type\n df[\"target_summation\"] = df.target.str.split(\" \").str[-2]\n\n masks = [\n df[\"unit\"] != \"US\", # Drop the national forecast\n df[\"quantile\"].notna(), # Point forecasts are duplicate of quantile = 0.5\n df[\"target_summation\"] == \"inc\", # Only return incidence values\n # Some models return both incidence and cumulative values\n # Only keep incidence targets (drop cumulative targets)\n df[\"target_date\"] <= df[\"forecast_date\"] + pd.Timedelta(weeks=4)\n # Time Horizon - Only keep up to 4 week forecasts.\n # Almost all forecasts only provide 4 wks.\n ]\n mask = np.logical_and.reduce(masks)\n\n # The raw data is in long form and we need to pivot this to create a column for\n # WEEKLY_NEW_CASES and WEEKLY_NEW_DEATHS. \"target_type\" has either death or cases. \"value\"\n # has the predicted value. The rest of the columns create a unique index. For right now only\n # one model and one forecast_date are being served, but we need to maintain the option of\n # multiple values.\n COLUMNS = [\n Fields.MODEL_ABBR,\n Fields.REGION,\n Fields.FORECAST_DATE,\n Fields.TARGET_DATE,\n \"target_type\",\n Fields.QUANTILE,\n \"value\",\n ]\n df = df[mask][COLUMNS].copy()\n df = df.set_index(\n [\n Fields.MODEL_ABBR,\n Fields.REGION,\n Fields.FORECAST_DATE,\n Fields.TARGET_DATE,\n Fields.QUANTILE,\n ]\n )\n pivot = df.pivot(columns=\"target_type\")\n pivot = pivot.droplevel(level=0, axis=1).reset_index()\n # This cleans up a MultiIndex Column that is an artifact of the pivot in preparation for a\n # standard csv dump.\n\n # Rename and remove any columns without a CommonField\n data = helpers.rename_fields(pivot, Fields, set(), _logger)\n\n # Need to make the quantiles into a wide form for easier downstream processing\n # Mangling the column names into f\"weekly_new_{cases/deaths}_{quantile}\". This\n # would be a good candidate to handle in long/tidy-form and we could remove both pivots.\n # Using common_field because this is done after helpers.rename_fields\n\n # TODO(michael): Not sure why pylint is confused about the common_field member not existing.\n # pylint: disable=no-member\n wide_df = data.set_index(\n [\n Fields.REGION.common_field,\n Fields.TARGET_DATE.common_field,\n Fields.MODEL_ABBR.common_field,\n Fields.FORECAST_DATE.common_field,\n ]\n ).pivot(columns=Fields.QUANTILE.common_field)\n\n # TODO: Once requirements have settled, explicitly pass only the quantiles needed.\n wide_df.columns = [x[0] + \"_\" + str(x[1]) for x in wide_df.columns.to_flat_index()]\n wide_df = wide_df.reset_index()\n return wide_df\n\n\ndef get_latest_forecast_date(conn, project_name: str, model_abbr: str) -> str:\n \"\"\"\n Return the date string 'YYYY-MM-DD' of the latest submitted forecast for a given model in a\n given zoltar project\n\n https://github.com/reichlab/zoltpy/issues/42\n\n\n Return the str date representation of the latest forecast if available, else the empty string.\n \"\"\"\n\n project = [project for project in conn.projects if project.name == project_name][0]\n model = [model for model in project.models if model.abbreviation == model_abbr][0]\n latest_forecast_date = model.latest_forecast.timezero.timezero_date\n # Note: model.latest_forecast.timezero.timezero_date is of type datetime.datetime or None\n if latest_forecast_date:\n _logger.info(f\"Latest forecast for {model_abbr} is {latest_forecast_date}\")\n return str(latest_forecast_date)\n else:\n _logger.info(f\"No forecasts found for {model_abbr} in {project_name}\")\n return \"\"\n\n\[email protected]()\[email protected](\"--fetch/--no-fetch\", default=True)\ndef main(fetch: bool):\n common_init.configure_logging()\n connection = zoltpy.util.authenticate()\n transformer = ForecastHubUpdater.make_with_data_root(\n ForecastModel.ENSEMBLE, connection, DATA_ROOT\n )\n if fetch:\n _logger.info(\"Fetching new data.\")\n transformer.update_source_data()\n\n data = transformer.load_source_data()\n data = transformer.transform(data)\n common_df.write_csv(data, transformer.timeseries_output_path, _logger)\n\n\nif __name__ == \"__main__\":\n main() # pylint: disable=no-value-for-parameter\n"
] | [
[
"pandas.to_datetime",
"numpy.logical_and.reduce",
"pandas.read_csv",
"pandas.Timedelta"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FlingeR/pandas | [
"01f399854f9febefa9e97005f3720aa312409b98",
"01f399854f9febefa9e97005f3720aa312409b98"
] | [
"pandas/core/indexes/multi.py",
"pandas/io/stata.py"
] | [
"from sys import getsizeof\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Hashable,\n Iterable,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import algos as libalgos, index as libindex, lib\nfrom pandas._libs.hashtable import duplicated_int64\nfrom pandas._typing import AnyArrayLike, ArrayLike, Scalar\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import PerformanceWarning, UnsortedIndexError\nfrom pandas.util._decorators import Appender, cache_readonly\n\nfrom pandas.core.dtypes.cast import coerce_indexer_dtype\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_platform_int,\n is_categorical_dtype,\n is_hashable,\n is_integer,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import ABCDataFrame\nfrom pandas.core.dtypes.missing import array_equivalent, isna\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import Categorical\nfrom pandas.core.arrays.categorical import factorize_from_iterables\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n InvalidIndexError,\n _index_shared_docs,\n ensure_index,\n)\nfrom pandas.core.indexes.frozen import FrozenList\nimport pandas.core.missing as missing\nfrom pandas.core.sorting import (\n get_group_index,\n indexer_from_factorized,\n lexsort_indexer,\n)\n\nfrom pandas.io.formats.printing import (\n format_object_attrs,\n format_object_summary,\n pprint_thing,\n)\n\nif TYPE_CHECKING:\n from pandas import Series # noqa:F401\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(\n dict(klass=\"MultiIndex\", target_klass=\"MultiIndex or list of tuples\")\n)\n\n\nclass MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):\n \"\"\"\n This class manages a MultiIndex by mapping label combinations to positive\n integers.\n \"\"\"\n\n _base = libindex.UInt64Engine\n\n def _codes_to_ints(self, codes):\n \"\"\"\n Transform combination(s) of uint64 in one uint64 (each), in a strictly\n monotonic way (i.e. respecting the lexicographic order of integer\n combinations): see BaseMultiIndexCodesEngine documentation.\n\n Parameters\n ----------\n codes : 1- or 2-dimensional array of dtype uint64\n Combinations of integers (one per row)\n\n Returns\n -------\n scalar or 1-dimensional array, of dtype uint64\n Integer(s) representing one combination (each).\n \"\"\"\n # Shift the representation of each level by the pre-calculated number\n # of bits:\n codes <<= self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer:\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)\n\n\nclass MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):\n \"\"\"\n This class manages those (extreme) cases in which the number of possible\n label combinations overflows the 64 bits integers, and uses an ObjectEngine\n containing Python integers.\n \"\"\"\n\n _base = libindex.ObjectEngine\n\n def _codes_to_ints(self, codes):\n \"\"\"\n Transform combination(s) of uint64 in one Python integer (each), in a\n strictly monotonic way (i.e. respecting the lexicographic order of\n integer combinations): see BaseMultiIndexCodesEngine documentation.\n\n Parameters\n ----------\n codes : 1- or 2-dimensional array of dtype uint64\n Combinations of integers (one per row)\n\n Returns\n -------\n int, or 1-dimensional array of dtype object\n Integer(s) representing one combination (each).\n \"\"\"\n # Shift the representation of each level by the pre-calculated number\n # of bits. Since this can overflow uint64, first make sure we are\n # working with Python integers:\n codes = codes.astype(\"object\") << self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer (per row):\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)\n\n\nclass MultiIndex(Index):\n \"\"\"\n A multi-level, or hierarchical, index object for pandas objects.\n\n Parameters\n ----------\n levels : sequence of arrays\n The unique labels for each level.\n codes : sequence of arrays\n Integers for each level designating which label at each location.\n\n .. versionadded:: 0.24.0\n sortorder : optional int\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : optional sequence of objects\n Names for each of the index levels. (name is accepted for compat).\n copy : bool, default False\n Copy the meta-data.\n verify_integrity : bool, default True\n Check that the levels/codes are consistent and valid.\n\n Attributes\n ----------\n names\n levels\n codes\n nlevels\n levshape\n\n Methods\n -------\n from_arrays\n from_tuples\n from_product\n from_frame\n set_levels\n set_codes\n to_frame\n to_flat_index\n is_lexsorted\n sortlevel\n droplevel\n swaplevel\n reorder_levels\n remove_unused_levels\n get_locs\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_product : Create a MultiIndex from the cartesian product\n of iterables.\n MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n Index : The base pandas Index type.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`_\n for more.\n\n Examples\n --------\n A new ``MultiIndex`` is typically constructed using one of the helper\n methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`\n and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):\n\n >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]\n >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n\n See further examples for how to construct a MultiIndex in the doc strings\n of the mentioned helper methods.\n \"\"\"\n\n _deprecations = Index._deprecations | frozenset()\n\n # initialize to zero-length tuples to make everything work\n _typ = \"multiindex\"\n _names = FrozenList()\n _levels = FrozenList()\n _codes = FrozenList()\n _comparables = [\"names\"]\n rename = Index.set_names\n\n _tuples = None\n sortorder: Optional[int]\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n levels=None,\n codes=None,\n sortorder=None,\n names=None,\n dtype=None,\n copy=False,\n name=None,\n verify_integrity: bool = True,\n _set_identity: bool = True,\n ):\n\n # compat with Index\n if name is not None:\n names = name\n if levels is None or codes is None:\n raise TypeError(\"Must pass both levels and codes\")\n if len(levels) != len(codes):\n raise ValueError(\"Length of levels and codes must be the same.\")\n if len(levels) == 0:\n raise ValueError(\"Must pass non-zero number of levels/codes\")\n\n result = object.__new__(MultiIndex)\n\n # we've already validated levels and codes, so shortcut here\n result._set_levels(levels, copy=copy, validate=False)\n result._set_codes(codes, copy=copy, validate=False)\n\n result._names = [None] * len(levels)\n if names is not None:\n # handles name validation\n result._set_names(names)\n\n if sortorder is not None:\n result.sortorder = int(sortorder)\n else:\n result.sortorder = sortorder\n\n if verify_integrity:\n new_codes = result._verify_integrity()\n result._codes = new_codes\n\n if _set_identity:\n result._reset_identity()\n\n return result\n\n def _validate_codes(self, level: List, code: List):\n \"\"\"\n Reassign code values as -1 if their corresponding levels are NaN.\n\n Parameters\n ----------\n code : list\n Code to reassign.\n level : list\n Level to check for missing values (NaN, NaT, None).\n\n Returns\n -------\n new code where code value = -1 if it corresponds\n to a level with missing values (NaN, NaT, None).\n \"\"\"\n null_mask = isna(level)\n if np.any(null_mask):\n code = np.where(null_mask[code], -1, code)\n return code\n\n def _verify_integrity(\n self, codes: Optional[List] = None, levels: Optional[List] = None\n ):\n \"\"\"\n Parameters\n ----------\n codes : optional list\n Codes to check for validity. Defaults to current codes.\n levels : optional list\n Levels to check for validity. Defaults to current levels.\n\n Raises\n ------\n ValueError\n If length of levels and codes don't match, if the codes for any\n level would exceed level bounds, or there are any duplicate levels.\n\n Returns\n -------\n new codes where code value = -1 if it corresponds to a\n NaN level.\n \"\"\"\n # NOTE: Currently does not check, among other things, that cached\n # nlevels matches nor that sortorder matches actually sortorder.\n codes = codes or self.codes\n levels = levels or self.levels\n\n if len(levels) != len(codes):\n raise ValueError(\n \"Length of levels and codes must match. NOTE: \"\n \"this index is in an inconsistent state.\"\n )\n codes_length = len(codes[0])\n for i, (level, level_codes) in enumerate(zip(levels, codes)):\n if len(level_codes) != codes_length:\n raise ValueError(\n f\"Unequal code lengths: {[len(code_) for code_ in codes]}\"\n )\n if len(level_codes) and level_codes.max() >= len(level):\n raise ValueError(\n f\"On level {i}, code max ({level_codes.max()}) >= length of \"\n f\"level ({len(level)}). NOTE: this index is in an \"\n \"inconsistent state\"\n )\n if len(level_codes) and level_codes.min() < -1:\n raise ValueError(f\"On level {i}, code value ({level_codes.min()}) < -1\")\n if not level.is_unique:\n raise ValueError(\n f\"Level values must be unique: {list(level)} on level {i}\"\n )\n if self.sortorder is not None:\n if self.sortorder > self._lexsort_depth():\n raise ValueError(\n \"Value for sortorder must be inferior or equal to actual \"\n f\"lexsort_depth: sortorder {self.sortorder} \"\n f\"with lexsort_depth {self._lexsort_depth()}\"\n )\n\n codes = [\n self._validate_codes(level, code) for level, code in zip(levels, codes)\n ]\n new_codes = FrozenList(codes)\n return new_codes\n\n @classmethod\n def from_arrays(cls, arrays, sortorder=None, names=lib.no_default):\n \"\"\"\n Convert arrays to MultiIndex.\n\n Parameters\n ----------\n arrays : list / sequence of array-likes\n Each array-like gives one level's value for each data point.\n len(arrays) is the number of levels.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]\n >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n \"\"\"\n error_msg = \"Input must be a list / sequence of array-likes.\"\n if not is_list_like(arrays):\n raise TypeError(error_msg)\n elif is_iterator(arrays):\n arrays = list(arrays)\n\n # Check if elements of array are list-like\n for array in arrays:\n if not is_list_like(array):\n raise TypeError(error_msg)\n\n # Check if lengths of all arrays are equal or not,\n # raise ValueError, if not\n for i in range(1, len(arrays)):\n if len(arrays[i]) != len(arrays[i - 1]):\n raise ValueError(\"all arrays must be same length\")\n\n codes, levels = factorize_from_iterables(arrays)\n if names is lib.no_default:\n names = [getattr(arr, \"name\", None) for arr in arrays]\n\n return MultiIndex(\n levels=levels,\n codes=codes,\n sortorder=sortorder,\n names=names,\n verify_integrity=False,\n )\n\n @classmethod\n def from_tuples(cls, tuples, sortorder=None, names=None):\n \"\"\"\n Convert list of tuples to MultiIndex.\n\n Parameters\n ----------\n tuples : list / sequence of tuple-likes\n Each tuple is the index of one row/column.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> tuples = [(1, 'red'), (1, 'blue'),\n ... (2, 'red'), (2, 'blue')]\n >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n \"\"\"\n if not is_list_like(tuples):\n raise TypeError(\"Input must be a list / sequence of tuple-likes.\")\n elif is_iterator(tuples):\n tuples = list(tuples)\n\n if len(tuples) == 0:\n if names is None:\n raise TypeError(\"Cannot infer number of levels from empty list\")\n arrays = [[]] * len(names)\n elif isinstance(tuples, (np.ndarray, Index)):\n if isinstance(tuples, Index):\n tuples = tuples._values\n\n arrays = list(lib.tuples_to_object_array(tuples).T)\n elif isinstance(tuples, list):\n arrays = list(lib.to_object_array_tuples(tuples).T)\n else:\n arrays = zip(*tuples)\n\n return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)\n\n @classmethod\n def from_product(cls, iterables, sortorder=None, names=lib.no_default):\n \"\"\"\n Make a MultiIndex from the cartesian product of multiple iterables.\n\n Parameters\n ----------\n iterables : list / sequence of iterables\n Each iterable has unique labels for each level of the index.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n .. versionchanged:: 1.0.0\n\n If not explicitly provided, names will be inferred from the\n elements of iterables if an element has a name attribute\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> numbers = [0, 1, 2]\n >>> colors = ['green', 'purple']\n >>> pd.MultiIndex.from_product([numbers, colors],\n ... names=['number', 'color'])\n MultiIndex([(0, 'green'),\n (0, 'purple'),\n (1, 'green'),\n (1, 'purple'),\n (2, 'green'),\n (2, 'purple')],\n names=['number', 'color'])\n \"\"\"\n from pandas.core.reshape.util import cartesian_product\n\n if not is_list_like(iterables):\n raise TypeError(\"Input must be a list / sequence of iterables.\")\n elif is_iterator(iterables):\n iterables = list(iterables)\n\n codes, levels = factorize_from_iterables(iterables)\n if names is lib.no_default:\n names = [getattr(it, \"name\", None) for it in iterables]\n\n codes = cartesian_product(codes)\n return MultiIndex(levels, codes, sortorder=sortorder, names=names)\n\n @classmethod\n def from_frame(cls, df, sortorder=None, names=None):\n \"\"\"\n Make a MultiIndex from a DataFrame.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n df : DataFrame\n DataFrame to be converted to MultiIndex.\n sortorder : int, optional\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list-like, optional\n If no names are provided, use the column names, or tuple of column\n names if the columns is a MultiIndex. If a sequence, overwrite\n names with the given sequence.\n\n Returns\n -------\n MultiIndex\n The MultiIndex representation of the given DataFrame.\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n\n Examples\n --------\n >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],\n ... ['NJ', 'Temp'], ['NJ', 'Precip']],\n ... columns=['a', 'b'])\n >>> df\n a b\n 0 HI Temp\n 1 HI Precip\n 2 NJ Temp\n 3 NJ Precip\n\n >>> pd.MultiIndex.from_frame(df)\n MultiIndex([('HI', 'Temp'),\n ('HI', 'Precip'),\n ('NJ', 'Temp'),\n ('NJ', 'Precip')],\n names=['a', 'b'])\n\n Using explicit names, instead of the column names\n\n >>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])\n MultiIndex([('HI', 'Temp'),\n ('HI', 'Precip'),\n ('NJ', 'Temp'),\n ('NJ', 'Precip')],\n names=['state', 'observation'])\n \"\"\"\n if not isinstance(df, ABCDataFrame):\n raise TypeError(\"Input must be a DataFrame\")\n\n column_names, columns = zip(*df.items())\n names = column_names if names is None else names\n return cls.from_arrays(columns, sortorder=sortorder, names=names)\n\n # --------------------------------------------------------------------\n\n @property\n def _values(self):\n # We override here, since our parent uses _data, which we don't use.\n return self.values\n\n @property\n def values(self):\n if self._tuples is not None:\n return self._tuples\n\n values = []\n\n for i in range(self.nlevels):\n vals = self._get_level_values(i)\n if is_categorical_dtype(vals):\n vals = vals._internal_get_values()\n if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, \"_box_values\"):\n vals = vals.astype(object)\n vals = np.array(vals, copy=False)\n values.append(vals)\n\n self._tuples = lib.fast_zip(values)\n return self._tuples\n\n @property\n def array(self):\n \"\"\"\n Raises a ValueError for `MultiIndex` because there's no single\n array backing a MultiIndex.\n\n Raises\n ------\n ValueError\n \"\"\"\n raise ValueError(\n \"MultiIndex has no single backing array. Use \"\n \"'MultiIndex.to_numpy()' to get a NumPy array of tuples.\"\n )\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n # overriding the base Index.shape definition to avoid materializing\n # the values (GH-27384, GH-27775)\n return (len(self),)\n\n def __len__(self) -> int:\n return len(self.codes[0])\n\n # --------------------------------------------------------------------\n # Levels Methods\n\n @cache_readonly\n def levels(self):\n # Use cache_readonly to ensure that self.get_locs doesn't repeatedly\n # create new IndexEngine\n # https://github.com/pandas-dev/pandas/issues/31648\n result = [\n x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)\n ]\n for level in result:\n # disallow midx.levels[0].name = \"foo\"\n level._no_setting_name = True\n return FrozenList(result)\n\n def _set_levels(\n self, levels, level=None, copy=False, validate=True, verify_integrity=False\n ):\n # This is NOT part of the levels property because it should be\n # externally not allowed to set levels. User beware if you change\n # _levels directly\n if validate:\n if len(levels) == 0:\n raise ValueError(\"Must set non-zero number of levels.\")\n if level is None and len(levels) != self.nlevels:\n raise ValueError(\"Length of levels must match number of levels.\")\n if level is not None and len(levels) != len(level):\n raise ValueError(\"Length of levels must match length of level.\")\n\n if level is None:\n new_levels = FrozenList(\n ensure_index(lev, copy=copy)._shallow_copy() for lev in levels\n )\n else:\n level_numbers = [self._get_level_number(lev) for lev in level]\n new_levels = list(self._levels)\n for lev_num, lev in zip(level_numbers, levels):\n new_levels[lev_num] = ensure_index(lev, copy=copy)._shallow_copy()\n new_levels = FrozenList(new_levels)\n\n if verify_integrity:\n new_codes = self._verify_integrity(levels=new_levels)\n self._codes = new_codes\n\n names = self.names\n self._levels = new_levels\n if any(names):\n self._set_names(names)\n\n self._tuples = None\n self._reset_cache()\n\n def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):\n \"\"\"\n Set new levels on MultiIndex. Defaults to returning new index.\n\n Parameters\n ----------\n levels : sequence or list of sequence\n New level(s) to apply.\n level : int, level name, or sequence of int/level names (default None)\n Level(s) to set (None for all levels).\n inplace : bool\n If True, mutates in place.\n verify_integrity : bool, default True\n If True, checks that levels and codes are compatible.\n\n Returns\n -------\n new index (of same type and class...etc)\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'),\n (2, 'one'), (2, 'two'),\n (3, 'one'), (3, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2]])\n MultiIndex([('a', 1),\n ('a', 2),\n ('b', 1),\n ('b', 2),\n ('c', 1),\n ('c', 2)],\n names=['foo', 'bar'])\n >>> idx.set_levels(['a', 'b', 'c'], level=0)\n MultiIndex([('a', 'one'),\n ('a', 'two'),\n ('b', 'one'),\n ('b', 'two'),\n ('c', 'one'),\n ('c', 'two')],\n names=['foo', 'bar'])\n >>> idx.set_levels(['a', 'b'], level='bar')\n MultiIndex([(1, 'a'),\n (1, 'b'),\n (2, 'a'),\n (2, 'b'),\n (3, 'a'),\n (3, 'b')],\n names=['foo', 'bar'])\n\n If any of the levels passed to ``set_levels()`` exceeds the\n existing length, all of the values from that argument will\n be stored in the MultiIndex levels, though the values will\n be truncated in the MultiIndex output.\n\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])\n MultiIndex([('a', 1),\n ('a', 2),\n ('b', 1),\n ('b', 2)],\n names=['foo', 'bar'])\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels\n FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])\n \"\"\"\n if is_list_like(levels) and not isinstance(levels, Index):\n levels = list(levels)\n\n if level is not None and not is_list_like(level):\n if not is_list_like(levels):\n raise TypeError(\"Levels must be list-like\")\n if is_list_like(levels[0]):\n raise TypeError(\"Levels must be list-like\")\n level = [level]\n levels = [levels]\n elif level is None or is_list_like(level):\n if not is_list_like(levels) or not is_list_like(levels[0]):\n raise TypeError(\"Levels must be list of lists-like\")\n\n if inplace:\n idx = self\n else:\n idx = self._shallow_copy()\n idx._reset_identity()\n idx._set_levels(\n levels, level=level, validate=True, verify_integrity=verify_integrity\n )\n if not inplace:\n return idx\n\n @property\n def nlevels(self) -> int:\n \"\"\"\n Integer number of levels in this MultiIndex.\n \"\"\"\n return len(self._levels)\n\n @property\n def levshape(self):\n \"\"\"\n A tuple with the length of each level.\n \"\"\"\n return tuple(len(x) for x in self.levels)\n\n # --------------------------------------------------------------------\n # Codes Methods\n\n @property\n def codes(self):\n return self._codes\n\n def _set_codes(\n self, codes, level=None, copy=False, validate=True, verify_integrity=False\n ):\n if validate:\n if level is None and len(codes) != self.nlevels:\n raise ValueError(\"Length of codes must match number of levels\")\n if level is not None and len(codes) != len(level):\n raise ValueError(\"Length of codes must match length of levels.\")\n\n if level is None:\n new_codes = FrozenList(\n _coerce_indexer_frozen(level_codes, lev, copy=copy).view()\n for lev, level_codes in zip(self._levels, codes)\n )\n else:\n level_numbers = [self._get_level_number(lev) for lev in level]\n new_codes = list(self._codes)\n for lev_num, level_codes in zip(level_numbers, codes):\n lev = self.levels[lev_num]\n new_codes[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy)\n new_codes = FrozenList(new_codes)\n\n if verify_integrity:\n new_codes = self._verify_integrity(codes=new_codes)\n\n self._codes = new_codes\n\n self._tuples = None\n self._reset_cache()\n\n def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):\n \"\"\"\n Set new codes on MultiIndex. Defaults to returning\n new index.\n\n .. versionadded:: 0.24.0\n\n New name for deprecated method `set_labels`.\n\n Parameters\n ----------\n codes : sequence or list of sequence\n New codes to apply.\n level : int, level name, or sequence of int/level names (default None)\n Level(s) to set (None for all levels).\n inplace : bool\n If True, mutates in place.\n verify_integrity : bool (default True)\n If True, checks that levels and codes are compatible.\n\n Returns\n -------\n new index (of same type and class...etc)\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_tuples([(1, 'one'),\n (1, 'two'),\n (2, 'one'),\n (2, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])\n MultiIndex([(2, 'one'),\n (1, 'one'),\n (2, 'two'),\n (1, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([1, 0, 1, 0], level=0)\n MultiIndex([(2, 'one'),\n (1, 'two'),\n (2, 'one'),\n (1, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([0, 0, 1, 1], level='bar')\n MultiIndex([(1, 'one'),\n (1, 'one'),\n (2, 'two'),\n (2, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])\n MultiIndex([(2, 'one'),\n (1, 'one'),\n (2, 'two'),\n (1, 'two')],\n names=['foo', 'bar'])\n \"\"\"\n if level is not None and not is_list_like(level):\n if not is_list_like(codes):\n raise TypeError(\"Codes must be list-like\")\n if is_list_like(codes[0]):\n raise TypeError(\"Codes must be list-like\")\n level = [level]\n codes = [codes]\n elif level is None or is_list_like(level):\n if not is_list_like(codes) or not is_list_like(codes[0]):\n raise TypeError(\"Codes must be list of lists-like\")\n\n if inplace:\n idx = self\n else:\n idx = self._shallow_copy()\n idx._reset_identity()\n idx._set_codes(codes, level=level, verify_integrity=verify_integrity)\n if not inplace:\n return idx\n\n # --------------------------------------------------------------------\n # Index Internals\n\n @cache_readonly\n def _engine(self):\n # Calculate the number of bits needed to represent labels in each\n # level, as log2 of their sizes (including -1 for NaN):\n sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))\n\n # Sum bit counts, starting from the _right_....\n lev_bits = np.cumsum(sizes[::-1])[::-1]\n\n # ... in order to obtain offsets such that sorting the combination of\n # shifted codes (one for each level, resulting in a unique integer) is\n # equivalent to sorting lexicographically the codes themselves. Notice\n # that each level needs to be shifted by the number of bits needed to\n # represent the _previous_ ones:\n offsets = np.concatenate([lev_bits[1:], [0]]).astype(\"uint64\")\n\n # Check the total number of bits needed for our representation:\n if lev_bits[0] > 64:\n # The levels would overflow a 64 bit uint - use Python integers:\n return MultiIndexPyIntEngine(self.levels, self.codes, offsets)\n return MultiIndexUIntEngine(self.levels, self.codes, offsets)\n\n @property\n def _constructor(self):\n return MultiIndex.from_tuples\n\n @Appender(Index._shallow_copy.__doc__)\n def _shallow_copy(self, values=None, **kwargs):\n if values is not None:\n names = kwargs.pop(\"names\", kwargs.pop(\"name\", self.names))\n # discards freq\n kwargs.pop(\"freq\", None)\n return MultiIndex.from_tuples(values, names=names, **kwargs)\n return self.copy(**kwargs)\n\n def _shallow_copy_with_infer(self, values, **kwargs):\n # On equal MultiIndexes the difference is empty.\n # Therefore, an empty MultiIndex is returned GH13490\n if len(values) == 0:\n return MultiIndex(\n levels=[[] for _ in range(self.nlevels)],\n codes=[[] for _ in range(self.nlevels)],\n **kwargs,\n )\n return self._shallow_copy(values, **kwargs)\n\n # --------------------------------------------------------------------\n\n def copy(\n self,\n names=None,\n dtype=None,\n levels=None,\n codes=None,\n deep=False,\n name=None,\n _set_identity=False,\n ):\n \"\"\"\n Make a copy of this object. Names, dtype, levels and codes can be\n passed and will be set on new copy.\n\n Parameters\n ----------\n names : sequence, optional\n dtype : numpy dtype or pandas type, optional\n levels : sequence, optional\n codes : sequence, optional\n deep : bool, default False\n name : Label\n Kept for compatibility with 1-dimensional Index. Should not be used.\n\n Returns\n -------\n MultiIndex\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n This could be potentially expensive on large MultiIndex objects.\n \"\"\"\n names = self._validate_names(name=name, names=names, deep=deep)\n if deep:\n from copy import deepcopy\n\n if levels is None:\n levels = deepcopy(self.levels)\n if codes is None:\n codes = deepcopy(self.codes)\n else:\n if levels is None:\n levels = self.levels\n if codes is None:\n codes = self.codes\n return MultiIndex(\n levels=levels,\n codes=codes,\n names=names,\n sortorder=self.sortorder,\n verify_integrity=False,\n _set_identity=_set_identity,\n )\n\n def __array__(self, dtype=None) -> np.ndarray:\n \"\"\" the array interface, return my values \"\"\"\n return self.values\n\n def view(self, cls=None):\n \"\"\" this is defined as a copy with the same identity \"\"\"\n result = self.copy()\n result._id = self._id\n return result\n\n @Appender(Index.__contains__.__doc__)\n def __contains__(self, key: Any) -> bool:\n hash(key)\n try:\n self.get_loc(key)\n return True\n except (LookupError, TypeError, ValueError):\n return False\n\n @cache_readonly\n def dtype(self) -> np.dtype:\n return np.dtype(\"O\")\n\n def _is_memory_usage_qualified(self) -> bool:\n \"\"\" return a boolean if we need a qualified .info display \"\"\"\n\n def f(l):\n return \"mixed\" in l or \"string\" in l or \"unicode\" in l\n\n return any(f(l) for l in self._inferred_type_levels)\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep: bool = False) -> int:\n # we are overwriting our base class to avoid\n # computing .values here which could materialize\n # a tuple representation unnecessarily\n return self._nbytes(deep)\n\n @cache_readonly\n def nbytes(self) -> int:\n \"\"\" return the number of bytes in the underlying data \"\"\"\n return self._nbytes(False)\n\n def _nbytes(self, deep: bool = False) -> int:\n \"\"\"\n return the number of bytes in the underlying data\n deeply introspect the level data if deep=True\n\n include the engine hashtable\n\n *this is in internal routine*\n\n \"\"\"\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def _formatter_func(self, tup):\n \"\"\"\n Formats each item in tup according to its level's formatter function.\n \"\"\"\n formatter_funcs = [level._formatter_func for level in self.levels]\n return tuple(func(val) for func, val in zip(formatter_funcs, tup))\n\n def _format_data(self, name=None):\n \"\"\"\n Return the formatted data as a unicode string\n \"\"\"\n return format_object_summary(\n self, self._formatter_func, name=name, line_break_each_value=True\n )\n\n def _format_attrs(self):\n \"\"\"\n Return a list of tuples of the (attr,formatted_value).\n \"\"\"\n return format_object_attrs(self, include_dtype=False)\n\n def _format_native_types(self, na_rep=\"nan\", **kwargs):\n new_levels = []\n new_codes = []\n\n # go through the levels and format them\n for level, level_codes in zip(self.levels, self.codes):\n level = level._format_native_types(na_rep=na_rep, **kwargs)\n # add nan values, if there are any\n mask = level_codes == -1\n if mask.any():\n nan_index = len(level)\n level = np.append(level, na_rep)\n assert not level_codes.flags.writeable # i.e. copy is needed\n level_codes = level_codes.copy() # make writeable\n level_codes[mask] = nan_index\n new_levels.append(level)\n new_codes.append(level_codes)\n\n if len(new_levels) == 1:\n # a single-level multi-index\n return Index(new_levels[0].take(new_codes[0]))._format_native_types()\n else:\n # reconstruct the multi-index\n mi = MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n return mi.values\n\n def format(\n self,\n space=2,\n sparsify=None,\n adjoin=True,\n names=False,\n na_rep=None,\n formatter=None,\n ):\n if len(self) == 0:\n return []\n\n stringified_levels = []\n for lev, level_codes in zip(self.levels, self.codes):\n na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)\n\n if len(lev) > 0:\n\n formatted = lev.take(level_codes).format(formatter=formatter)\n\n # we have some NA\n mask = level_codes == -1\n if mask.any():\n formatted = np.array(formatted, dtype=object)\n formatted[mask] = na\n formatted = formatted.tolist()\n\n else:\n # weird all NA case\n formatted = [\n pprint_thing(na if isna(x) else x, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n for x in algos.take_1d(lev._values, level_codes)\n ]\n stringified_levels.append(formatted)\n\n result_levels = []\n for lev, name in zip(stringified_levels, self.names):\n level = []\n\n if names:\n level.append(\n pprint_thing(name, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n if name is not None\n else \"\"\n )\n\n level.extend(np.array(lev, dtype=object))\n result_levels.append(level)\n\n if sparsify is None:\n sparsify = get_option(\"display.multi_sparse\")\n\n if sparsify:\n sentinel = \"\"\n # GH3547\n # use value of sparsify as sentinel, unless it's an obvious\n # \"Truthy\" value\n if sparsify not in [True, 1]:\n sentinel = sparsify\n # little bit of a kludge job for #1217\n result_levels = _sparsify(\n result_levels, start=int(names), sentinel=sentinel\n )\n\n if adjoin:\n from pandas.io.formats.format import _get_adjustment\n\n adj = _get_adjustment()\n return adj.adjoin(space, *result_levels).split(\"\\n\")\n else:\n return result_levels\n\n # --------------------------------------------------------------------\n # Names Methods\n\n def _get_names(self):\n return FrozenList(self._names)\n\n def _set_names(self, names, level=None, validate=True):\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n validate : boolean, default True\n validate that the names match level lengths\n\n Raises\n ------\n TypeError if each name is not hashable.\n\n Notes\n -----\n sets names on levels. WARNING: mutates!\n\n Note that you generally want to set this *after* changing levels, so\n that it only acts on copies\n \"\"\"\n # GH 15110\n # Don't allow a single string for names in a MultiIndex\n if names is not None and not is_list_like(names):\n raise ValueError(\"Names should be list-like for a MultiIndex\")\n names = list(names)\n\n if validate:\n if level is not None and len(names) != len(level):\n raise ValueError(\"Length of names must match length of level.\")\n if level is None and len(names) != self.nlevels:\n raise ValueError(\n \"Length of names must match number of levels in MultiIndex.\"\n )\n\n if level is None:\n level = range(self.nlevels)\n else:\n level = [self._get_level_number(lev) for lev in level]\n\n # set the name\n for lev, name in zip(level, names):\n if name is not None:\n # GH 20527\n # All items in 'names' need to be hashable:\n if not is_hashable(name):\n raise TypeError(\n f\"{type(self).__name__}.name must be a hashable type\"\n )\n self._names[lev] = name\n\n # If .levels has been accessed, the names in our cache will be stale.\n self._reset_cache()\n\n names = property(\n fset=_set_names, fget=_get_names, doc=\"\"\"\\nNames of levels in MultiIndex.\\n\"\"\"\n )\n\n # --------------------------------------------------------------------\n\n @Appender(Index._get_grouper_for_level.__doc__)\n def _get_grouper_for_level(self, mapper, level):\n indexer = self.codes[level]\n level_index = self.levels[level]\n\n if mapper is not None:\n # Handle group mapping function and return\n level_values = self.levels[level].take(indexer)\n grouper = level_values.map(mapper)\n return grouper, None, None\n\n codes, uniques = algos.factorize(indexer, sort=True)\n\n if len(uniques) > 0 and uniques[0] == -1:\n # Handle NAs\n mask = indexer != -1\n ok_codes, uniques = algos.factorize(indexer[mask], sort=True)\n\n codes = np.empty(len(indexer), dtype=indexer.dtype)\n codes[mask] = ok_codes\n codes[~mask] = -1\n\n if len(uniques) < len(level_index):\n # Remove unobserved levels from level_index\n level_index = level_index.take(uniques)\n else:\n # break references back to us so that setting the name\n # on the output of a groupby doesn't reflect back here.\n level_index = level_index.copy()\n\n if level_index._can_hold_na:\n grouper = level_index.take(codes, fill_value=True)\n else:\n grouper = level_index.take(codes)\n\n return grouper, codes, level_index\n\n @cache_readonly\n def inferred_type(self) -> str:\n return \"mixed\"\n\n def _get_level_number(self, level) -> int:\n count = self.names.count(level)\n if (count > 1) and not is_integer(level):\n raise ValueError(\n f\"The name {level} occurs multiple times, use a level number\"\n )\n try:\n level = self.names.index(level)\n except ValueError as err:\n if not is_integer(level):\n raise KeyError(f\"Level {level} not found\") from err\n elif level < 0:\n level += self.nlevels\n if level < 0:\n orig_level = level - self.nlevels\n raise IndexError(\n f\"Too many levels: Index has only {self.nlevels} levels, \"\n f\"{orig_level} is not a valid level number\"\n ) from err\n # Note: levels are zero-based\n elif level >= self.nlevels:\n raise IndexError(\n f\"Too many levels: Index has only {self.nlevels} levels, \"\n f\"not {level + 1}\"\n ) from err\n return level\n\n @property\n def _has_complex_internals(self) -> bool:\n # used to avoid libreduction code paths, which raise or require conversion\n return True\n\n @cache_readonly\n def is_monotonic_increasing(self) -> bool:\n \"\"\"\n return if the index is monotonic increasing (only equal or\n increasing) values.\n \"\"\"\n if all(x.is_monotonic for x in self.levels):\n # If each level is sorted, we can operate on the codes directly. GH27495\n return libalgos.is_lexsorted(\n [x.astype(\"int64\", copy=False) for x in self.codes]\n )\n\n # reversed() because lexsort() wants the most significant key last.\n values = [\n self._get_level_values(i).values for i in reversed(range(len(self.levels)))\n ]\n try:\n sort_order = np.lexsort(values)\n return Index(sort_order).is_monotonic\n except TypeError:\n\n # we have mixed types and np.lexsort is not happy\n return Index(self.values).is_monotonic\n\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n return if the index is monotonic decreasing (only equal or\n decreasing) values.\n \"\"\"\n # monotonic decreasing if and only if reverse is monotonic increasing\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def _inferred_type_levels(self):\n \"\"\" return a list of the inferred types, one for each level \"\"\"\n return [i.inferred_type for i in self.levels]\n\n @Appender(Index.duplicated.__doc__)\n def duplicated(self, keep=\"first\"):\n shape = map(len, self.levels)\n ids = get_group_index(self.codes, shape, sort=False, xnull=False)\n\n return duplicated_int64(ids, keep)\n\n def fillna(self, value=None, downcast=None):\n \"\"\"\n fillna is not implemented for MultiIndex\n \"\"\"\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n\n @Appender(Index.dropna.__doc__)\n def dropna(self, how=\"any\"):\n nans = [level_codes == -1 for level_codes in self.codes]\n if how == \"any\":\n indexer = np.any(nans, axis=0)\n elif how == \"all\":\n indexer = np.all(nans, axis=0)\n else:\n raise ValueError(f\"invalid how option: {how}\")\n\n new_codes = [level_codes[~indexer] for level_codes in self.codes]\n return self.copy(codes=new_codes, deep=True)\n\n def _get_level_values(self, level, unique=False):\n \"\"\"\n Return vector of label values for requested level,\n equal to the length of the index\n\n **this is an internal method**\n\n Parameters\n ----------\n level : int level\n unique : bool, default False\n if True, drop duplicated values\n\n Returns\n -------\n values : ndarray\n \"\"\"\n lev = self.levels[level]\n level_codes = self.codes[level]\n name = self._names[level]\n if unique:\n level_codes = algos.unique(level_codes)\n filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value)\n return lev._shallow_copy(filled, name=name)\n\n def get_level_values(self, level):\n \"\"\"\n Return vector of label values for requested level,\n equal to the length of the index.\n\n Parameters\n ----------\n level : int or str\n ``level`` is either the integer position of the level in the\n MultiIndex, or the name of the level.\n\n Returns\n -------\n values : Index\n Values is a level of this MultiIndex converted to\n a single :class:`Index` (or subclass thereof).\n\n Examples\n --------\n Create a MultiIndex:\n\n >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))\n >>> mi.names = ['level_1', 'level_2']\n\n Get level values by supplying level as either integer or name:\n\n >>> mi.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object', name='level_1')\n >>> mi.get_level_values('level_2')\n Index(['d', 'e', 'f'], dtype='object', name='level_2')\n \"\"\"\n level = self._get_level_number(level)\n values = self._get_level_values(level)\n return values\n\n @Appender(Index.unique.__doc__)\n def unique(self, level=None):\n\n if level is None:\n return super().unique()\n else:\n level = self._get_level_number(level)\n return self._get_level_values(level=level, unique=True)\n\n def _to_safe_for_reshape(self):\n \"\"\" convert to object if we are a categorical \"\"\"\n return self.set_levels([i._to_safe_for_reshape() for i in self.levels])\n\n def to_frame(self, index=True, name=None):\n \"\"\"\n Create a DataFrame with the levels of the MultiIndex as columns.\n\n Column ordering is determined by the DataFrame constructor with data as\n a dict.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n index : bool, default True\n Set the index of the returned DataFrame as the original MultiIndex.\n\n name : list / sequence of str, optional\n The passed names should substitute index level names.\n\n Returns\n -------\n DataFrame : a DataFrame containing the original MultiIndex data.\n\n See Also\n --------\n DataFrame\n \"\"\"\n from pandas import DataFrame\n\n if name is not None:\n if not is_list_like(name):\n raise TypeError(\"'name' must be a list / sequence of column names.\")\n\n if len(name) != len(self.levels):\n raise ValueError(\n \"'name' should have same length as number of levels on index.\"\n )\n idx_names = name\n else:\n idx_names = self.names\n\n # Guarantee resulting column order - PY36+ dict maintains insertion order\n result = DataFrame(\n {\n (level if lvlname is None else lvlname): self._get_level_values(level)\n for lvlname, level in zip(idx_names, range(len(self.levels)))\n },\n copy=False,\n )\n\n if index:\n result.index = self\n return result\n\n def to_flat_index(self):\n \"\"\"\n Convert a MultiIndex to an Index of Tuples containing the level values.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n pd.Index\n Index with the MultiIndex data represented in Tuples.\n\n Notes\n -----\n This method will simply return the caller if called by anything other\n than a MultiIndex.\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_product(\n ... [['foo', 'bar'], ['baz', 'qux']],\n ... names=['a', 'b'])\n >>> index.to_flat_index()\n Index([('foo', 'baz'), ('foo', 'qux'),\n ('bar', 'baz'), ('bar', 'qux')],\n dtype='object')\n \"\"\"\n return Index(self.values, tupleize_cols=False)\n\n @property\n def is_all_dates(self) -> bool:\n return False\n\n def is_lexsorted(self) -> bool:\n \"\"\"\n Return True if the codes are lexicographically sorted.\n\n Returns\n -------\n bool\n \"\"\"\n return self.lexsort_depth == self.nlevels\n\n @cache_readonly\n def lexsort_depth(self):\n if self.sortorder is not None:\n return self.sortorder\n\n return self._lexsort_depth()\n\n def _lexsort_depth(self) -> int:\n \"\"\"\n Compute and return the lexsort_depth, the number of levels of the\n MultiIndex that are sorted lexically\n\n Returns\n -------\n int\n \"\"\"\n int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]\n for k in range(self.nlevels, 0, -1):\n if libalgos.is_lexsorted(int64_codes[:k]):\n return k\n return 0\n\n def _sort_levels_monotonic(self):\n \"\"\"\n This is an *internal* function.\n\n Create a new MultiIndex from the current to monotonically sorted\n items IN the levels. This does not actually make the entire MultiIndex\n monotonic, JUST the levels.\n\n The resulting MultiIndex will have the same outward\n appearance, meaning the same .values and ordering. It will also\n be .equals() to the original.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],\n ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n >>> mi\n MultiIndex([('a', 'bb'),\n ('a', 'aa'),\n ('b', 'bb'),\n ('b', 'aa')],\n )\n\n >>> mi.sort_values()\n MultiIndex([('a', 'aa'),\n ('a', 'bb'),\n ('b', 'aa'),\n ('b', 'bb')],\n )\n \"\"\"\n if self.is_lexsorted() and self.is_monotonic:\n return self\n\n new_levels = []\n new_codes = []\n\n for lev, level_codes in zip(self.levels, self.codes):\n\n if not lev.is_monotonic:\n try:\n # indexer to reorder the levels\n indexer = lev.argsort()\n except TypeError:\n pass\n else:\n lev = lev.take(indexer)\n\n # indexer to reorder the level codes\n indexer = ensure_int64(indexer)\n ri = lib.get_reverse_indexer(indexer, len(indexer))\n level_codes = algos.take_1d(ri, level_codes)\n\n new_levels.append(lev)\n new_codes.append(level_codes)\n\n return MultiIndex(\n new_levels,\n new_codes,\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n\n def remove_unused_levels(self):\n \"\"\"\n Create a new MultiIndex from the current that removes\n unused levels, meaning that they are not expressed in the labels.\n\n The resulting MultiIndex will have the same outward\n appearance, meaning the same .values and ordering. It will also\n be .equals() to the original.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_product([range(2), list('ab')])\n >>> mi\n MultiIndex([(0, 'a'),\n (0, 'b'),\n (1, 'a'),\n (1, 'b')],\n )\n\n >>> mi[2:]\n MultiIndex([(1, 'a'),\n (1, 'b')],\n )\n\n The 0 from the first level is not represented\n and can be removed\n\n >>> mi2 = mi[2:].remove_unused_levels()\n >>> mi2.levels\n FrozenList([[1], ['a', 'b']])\n \"\"\"\n new_levels = []\n new_codes = []\n\n changed = False\n for lev, level_codes in zip(self.levels, self.codes):\n\n # Since few levels are typically unused, bincount() is more\n # efficient than unique() - however it only accepts positive values\n # (and drops order):\n uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1\n has_na = int(len(uniques) and (uniques[0] == -1))\n\n if len(uniques) != len(lev) + has_na:\n # We have unused levels\n changed = True\n\n # Recalculate uniques, now preserving order.\n # Can easily be cythonized by exploiting the already existing\n # \"uniques\" and stop parsing \"level_codes\" when all items\n # are found:\n uniques = algos.unique(level_codes)\n if has_na:\n na_idx = np.where(uniques == -1)[0]\n # Just ensure that -1 is in first position:\n uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]\n\n # codes get mapped from uniques to 0:len(uniques)\n # -1 (if present) is mapped to last position\n code_mapping = np.zeros(len(lev) + has_na)\n # ... and reassigned value -1:\n code_mapping[uniques] = np.arange(len(uniques)) - has_na\n\n level_codes = code_mapping[level_codes]\n\n # new levels are simple\n lev = lev.take(uniques[has_na:])\n\n new_levels.append(lev)\n new_codes.append(level_codes)\n\n result = self.view()\n\n if changed:\n result._reset_identity()\n result._set_levels(new_levels, validate=False)\n result._set_codes(new_codes, validate=False)\n\n return result\n\n # --------------------------------------------------------------------\n # Pickling Methods\n\n def __reduce__(self):\n \"\"\"Necessary for making this object picklable\"\"\"\n d = dict(\n levels=list(self.levels),\n codes=list(self.codes),\n sortorder=self.sortorder,\n names=list(self.names),\n )\n return ibase._new_Index, (type(self), d), None\n\n # --------------------------------------------------------------------\n\n def __getitem__(self, key):\n if is_scalar(key):\n key = com.cast_scalar_indexer(key)\n\n retval = []\n for lev, level_codes in zip(self.levels, self.codes):\n if level_codes[key] == -1:\n retval.append(np.nan)\n else:\n retval.append(lev[level_codes[key]])\n\n return tuple(retval)\n else:\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n sortorder = self.sortorder\n else:\n # cannot be sure whether the result will be sorted\n sortorder = None\n\n if isinstance(key, Index):\n key = np.asarray(key)\n\n new_codes = [level_codes[key] for level_codes in self.codes]\n\n return MultiIndex(\n levels=self.levels,\n codes=new_codes,\n names=self.names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):\n nv.validate_take(tuple(), kwargs)\n indices = ensure_platform_int(indices)\n taken = self._assert_take_fillable(\n self.codes,\n indices,\n allow_fill=allow_fill,\n fill_value=fill_value,\n na_value=-1,\n )\n return MultiIndex(\n levels=self.levels, codes=taken, names=self.names, verify_integrity=False\n )\n\n def _assert_take_fillable(\n self, values, indices, allow_fill=True, fill_value=None, na_value=None\n ):\n \"\"\" Internal method to handle NA filling of take \"\"\"\n # only fill if we are passing a non-None fill_value\n if allow_fill and fill_value is not None:\n if (indices < -1).any():\n msg = (\n \"When allow_fill=True and fill_value is not None, \"\n \"all indices must be >= -1\"\n )\n raise ValueError(msg)\n taken = [lab.take(indices) for lab in self.codes]\n mask = indices == -1\n if mask.any():\n masked = []\n for new_label in taken:\n label_values = new_label\n label_values[mask] = na_value\n masked.append(np.asarray(label_values))\n taken = masked\n else:\n taken = [lab.take(indices) for lab in self.codes]\n return taken\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n if not isinstance(other, (list, tuple)):\n other = [other]\n\n if all(\n (isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other\n ):\n arrays = []\n for i in range(self.nlevels):\n label = self._get_level_values(i)\n appended = [o._get_level_values(i) for o in other]\n arrays.append(label.append(appended))\n return MultiIndex.from_arrays(arrays, names=self.names)\n\n to_concat = (self.values,) + tuple(k._values for k in other)\n new_tuples = np.concatenate(to_concat)\n\n # if all(isinstance(x, MultiIndex) for x in other):\n try:\n return MultiIndex.from_tuples(new_tuples, names=self.names)\n except (TypeError, IndexError):\n return Index(new_tuples)\n\n def argsort(self, *args, **kwargs) -> np.ndarray:\n return self.values.argsort(*args, **kwargs)\n\n @Appender(_index_shared_docs[\"repeat\"] % _index_doc_kwargs)\n def repeat(self, repeats, axis=None):\n nv.validate_repeat(tuple(), dict(axis=axis))\n repeats = ensure_platform_int(repeats)\n return MultiIndex(\n levels=self.levels,\n codes=[\n level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)\n for level_codes in self.codes\n ],\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n\n def where(self, cond, other=None):\n raise NotImplementedError(\".where is not supported for MultiIndex operations\")\n\n def drop(self, codes, level=None, errors=\"raise\"):\n \"\"\"\n Make new MultiIndex with passed list of codes deleted\n\n Parameters\n ----------\n codes : array-like\n Must be a list of tuples\n level : int or level name, default None\n errors : str, default 'raise'\n\n Returns\n -------\n dropped : MultiIndex\n \"\"\"\n if level is not None:\n return self._drop_from_level(codes, level, errors)\n\n if not isinstance(codes, (np.ndarray, Index)):\n try:\n codes = com.index_labels_to_array(codes, dtype=object)\n except ValueError:\n pass\n\n inds = []\n for level_codes in codes:\n try:\n loc = self.get_loc(level_codes)\n # get_loc returns either an integer, a slice, or a boolean\n # mask\n if isinstance(loc, int):\n inds.append(loc)\n elif isinstance(loc, slice):\n inds.extend(range(loc.start, loc.stop))\n elif com.is_bool_indexer(loc):\n if self.lexsort_depth == 0:\n warnings.warn(\n \"dropping on a non-lexsorted multi-index \"\n \"without a level parameter may impact performance.\",\n PerformanceWarning,\n stacklevel=3,\n )\n loc = loc.nonzero()[0]\n inds.extend(loc)\n else:\n msg = f\"unsupported indexer of type {type(loc)}\"\n raise AssertionError(msg)\n except KeyError:\n if errors != \"ignore\":\n raise\n\n return self.delete(inds)\n\n def _drop_from_level(self, codes, level, errors=\"raise\"):\n codes = com.index_labels_to_array(codes)\n i = self._get_level_number(level)\n index = self.levels[i]\n values = index.get_indexer(codes)\n\n mask = ~algos.isin(self.codes[i], values)\n if mask.all() and errors != \"ignore\":\n raise KeyError(f\"labels {codes} not found in level\")\n\n return self[mask]\n\n def swaplevel(self, i=-2, j=-1):\n \"\"\"\n Swap level i with level j.\n\n Calling this method does not change the ordering of the values.\n\n Parameters\n ----------\n i : int, str, default -2\n First level of index to be swapped. Can pass level name as string.\n Type of parameters can be mixed.\n j : int, str, default -1\n Second level of index to be swapped. Can pass level name as string.\n Type of parameters can be mixed.\n\n Returns\n -------\n MultiIndex\n A new MultiIndex.\n\n See Also\n --------\n Series.swaplevel : Swap levels i and j in a MultiIndex.\n Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a\n particular axis.\n\n Examples\n --------\n >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],\n ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n >>> mi\n MultiIndex([('a', 'bb'),\n ('a', 'aa'),\n ('b', 'bb'),\n ('b', 'aa')],\n )\n >>> mi.swaplevel(0, 1)\n MultiIndex([('bb', 'a'),\n ('aa', 'a'),\n ('bb', 'b'),\n ('aa', 'b')],\n )\n \"\"\"\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n\n i = self._get_level_number(i)\n j = self._get_level_number(j)\n\n new_levels[i], new_levels[j] = new_levels[j], new_levels[i]\n new_codes[i], new_codes[j] = new_codes[j], new_codes[i]\n new_names[i], new_names[j] = new_names[j], new_names[i]\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n def reorder_levels(self, order):\n \"\"\"\n Rearrange levels using input order. May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n\n Returns\n -------\n MultiIndex\n \"\"\"\n order = [self._get_level_number(i) for i in order]\n if len(order) != self.nlevels:\n raise AssertionError(\n f\"Length of order must be same as number of levels ({self.nlevels}), \"\n f\"got {len(order)}\"\n )\n new_levels = [self.levels[i] for i in order]\n new_codes = [self.codes[i] for i in order]\n new_names = [self.names[i] for i in order]\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n def _get_codes_for_sorting(self):\n \"\"\"\n we categorizing our codes by using the\n available categories (all, not just observed)\n excluding any missing ones (-1); this is in preparation\n for sorting, where we need to disambiguate that -1 is not\n a valid valid\n \"\"\"\n\n def cats(level_codes):\n return np.arange(\n np.array(level_codes).max() + 1 if len(level_codes) else 0,\n dtype=level_codes.dtype,\n )\n\n return [\n Categorical.from_codes(level_codes, cats(level_codes), ordered=True)\n for level_codes in self.codes\n ]\n\n def sortlevel(self, level=0, ascending=True, sort_remaining=True):\n \"\"\"\n Sort MultiIndex at the requested level. The result will respect the\n original ordering of the associated factor at that level.\n\n Parameters\n ----------\n level : list-like, int or str, default 0\n If a string is given, must be a name of the level.\n If list-like must be names or ints of levels.\n ascending : bool, default True\n False to sort in descending order.\n Can also be a list to specify a directed ordering.\n sort_remaining : sort by the remaining levels after level\n\n Returns\n -------\n sorted_index : pd.MultiIndex\n Resulting index.\n indexer : np.ndarray\n Indices of output values in original index.\n \"\"\"\n if isinstance(level, (str, int)):\n level = [level]\n level = [self._get_level_number(lev) for lev in level]\n sortorder = None\n\n # we have a directed ordering via ascending\n if isinstance(ascending, list):\n if not len(level) == len(ascending):\n raise ValueError(\"level must have same length as ascending\")\n\n indexer = lexsort_indexer(\n [self.codes[lev] for lev in level], orders=ascending\n )\n\n # level ordering\n else:\n\n codes = list(self.codes)\n shape = list(self.levshape)\n\n # partition codes and shape\n primary = tuple(codes[lev] for lev in level)\n primshp = tuple(shape[lev] for lev in level)\n\n # Reverse sorted to retain the order of\n # smaller indices that needs to be removed\n for lev in sorted(level, reverse=True):\n codes.pop(lev)\n shape.pop(lev)\n\n if sort_remaining:\n primary += primary + tuple(codes)\n primshp += primshp + tuple(shape)\n else:\n sortorder = level[0]\n\n indexer = indexer_from_factorized(primary, primshp, compress=False)\n\n if not ascending:\n indexer = indexer[::-1]\n\n indexer = ensure_platform_int(indexer)\n new_codes = [level_codes.take(indexer) for level_codes in self.codes]\n\n new_index = MultiIndex(\n codes=new_codes,\n levels=self.levels,\n names=self.names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n return new_index, indexer\n\n def reindex(self, target, method=None, level=None, limit=None, tolerance=None):\n \"\"\"\n Create index with target's values (move/add/delete values as necessary)\n\n Returns\n -------\n new_index : pd.MultiIndex\n Resulting index\n indexer : np.ndarray or None\n Indices of output values in original index.\n\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, \"names\")\n\n if level is not None:\n if method is not None:\n raise TypeError(\"Fill method not supported if level passed\")\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n # target may be an iterator\n target = ibase._ensure_has_len(target)\n if len(target) == 0 and not isinstance(target, Index):\n idx = self.levels[level]\n attrs = idx._get_attributes_dict()\n attrs.pop(\"freq\", None) # don't preserve freq\n target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)\n else:\n target = ensure_index(target)\n target, indexer, _ = self._join_level(\n target, level, how=\"right\", return_indexers=True, keep_order=False\n )\n else:\n target = ensure_index(target)\n if self.equals(target):\n indexer = None\n else:\n if self.is_unique:\n indexer = self.get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n else:\n raise ValueError(\"cannot handle a non-unique multi-index!\")\n\n if not isinstance(target, MultiIndex):\n if indexer is None:\n target = self\n elif (indexer >= 0).all():\n target = self.take(indexer)\n else:\n # hopefully?\n target = MultiIndex.from_tuples(target)\n\n if (\n preserve_names\n and target.nlevels == self.nlevels\n and target.names != self.names\n ):\n target = target.copy(deep=False)\n target.names = self.names\n\n return target, indexer\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def get_value(self, series, key):\n # Label-based\n if not is_hashable(key) or is_iterator(key):\n # We allow tuples if they are hashable, whereas other Index\n # subclasses require scalar.\n # We have to explicitly exclude generators, as these are hashable.\n raise InvalidIndexError(key)\n\n try:\n loc = self.get_loc(key)\n except KeyError:\n if is_integer(key):\n loc = key\n else:\n raise\n\n return self._get_values_for_loc(series, loc, key)\n\n def _get_values_for_loc(self, series: \"Series\", loc, key):\n \"\"\"\n Do a positional lookup on the given Series, returning either a scalar\n or a Series.\n\n Assumes that `series.index is self`\n \"\"\"\n new_values = series._values[loc]\n if is_scalar(loc):\n return new_values\n\n new_index = self[loc]\n new_index = maybe_droplevels(new_index, key)\n new_ser = series._constructor(new_values, index=new_index, name=series.name)\n return new_ser.__finalize__(series)\n\n def _convert_listlike_indexer(self, keyarr):\n \"\"\"\n Parameters\n ----------\n keyarr : list-like\n Indexer to convert.\n\n Returns\n -------\n tuple (indexer, keyarr)\n indexer is an ndarray or None if cannot convert\n keyarr are tuple-safe keys\n \"\"\"\n indexer, keyarr = super()._convert_listlike_indexer(keyarr)\n\n # are we indexing a specific level\n if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):\n level = 0\n _, indexer = self.reindex(keyarr, level=level)\n\n # take all\n if indexer is None:\n indexer = np.arange(len(self))\n\n check = self.levels[0].get_indexer(keyarr)\n mask = check == -1\n if mask.any():\n raise KeyError(f\"{keyarr[mask]} not in index\")\n\n return indexer, keyarr\n\n def _get_partial_string_timestamp_match_key(self, key):\n \"\"\"\n Translate any partial string timestamp matches in key, returning the\n new key.\n\n Only relevant for MultiIndex.\n \"\"\"\n # GH#10331\n if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:\n # Convert key '2016-01-01' to\n # ('2016-01-01'[, slice(None, None, None)]+)\n key = tuple([key] + [slice(None)] * (len(self.levels) - 1))\n\n if isinstance(key, tuple):\n # Convert (..., '2016-01-01', ...) in tuple to\n # (..., slice('2016-01-01', '2016-01-01', None), ...)\n new_key = []\n for i, component in enumerate(key):\n if (\n isinstance(component, str)\n and self.levels[i]._supports_partial_string_indexing\n ):\n new_key.append(slice(component, component, None))\n else:\n new_key.append(component)\n key = tuple(new_key)\n\n return key\n\n @Appender(_index_shared_docs[\"get_indexer\"] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n method = missing.clean_reindex_fill_method(method)\n target = ensure_index(target)\n\n # empty indexer\n if is_list_like(target) and not len(target):\n return ensure_platform_int(np.array([]))\n\n if not isinstance(target, MultiIndex):\n try:\n target = MultiIndex.from_tuples(target)\n except (TypeError, ValueError):\n\n # let's instead try with a straight Index\n if method is None:\n return Index(self.values).get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n\n if not self.is_unique:\n raise ValueError(\"Reindexing only valid with uniquely valued Index objects\")\n\n if method == \"pad\" or method == \"backfill\":\n if tolerance is not None:\n raise NotImplementedError(\n \"tolerance not implemented yet for MultiIndex\"\n )\n indexer = self._engine.get_indexer(target, method, limit)\n elif method == \"nearest\":\n raise NotImplementedError(\n \"method='nearest' not implemented yet \"\n \"for MultiIndex; see GitHub issue 9365\"\n )\n else:\n indexer = self._engine.get_indexer(target)\n\n return ensure_platform_int(indexer)\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n return super().get_indexer_non_unique(target)\n\n def get_slice_bound(\n self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str\n ) -> int:\n \"\"\"\n For an ordered MultiIndex, compute slice bound\n that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if `side=='right') position\n of given label.\n\n Parameters\n ----------\n label : object or tuple of objects\n side : {'left', 'right'}\n kind : {'loc', 'getitem'}\n\n Returns\n -------\n int\n Index of label.\n\n Notes\n -----\n This method only works if level 0 index of the MultiIndex is lexsorted.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])\n\n Get the locations from the leftmost 'b' in the first level\n until the end of the multiindex:\n\n >>> mi.get_slice_bound('b', side=\"left\", kind=\"loc\")\n 1\n\n Like above, but if you get the locations from the rightmost\n 'b' in the first level and 'f' in the second level:\n\n >>> mi.get_slice_bound(('b','f'), side=\"right\", kind=\"loc\")\n 3\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n \"\"\"\n if not isinstance(label, tuple):\n label = (label,)\n return self._partial_tup_index(label, side=side)\n\n def slice_locs(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n For an ordered MultiIndex, compute the slice locations for input\n labels.\n\n The input labels can be tuples representing partial levels, e.g. for a\n MultiIndex with 3 levels, you can pass a single value (corresponding to\n the first level), or a 1-, 2-, or 3-tuple.\n\n Parameters\n ----------\n start : label or tuple, default None\n If None, defaults to the beginning\n end : label or tuple\n If None, defaults to the end\n step : int or None\n Slice step\n kind : string, optional, defaults None\n\n Returns\n -------\n (start, end) : (int, int)\n\n Notes\n -----\n This method only works if the MultiIndex is properly lexsorted. So,\n if only the first 2 levels of a 3-level MultiIndex are lexsorted,\n you can only pass two levels to ``.slice_locs``.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],\n ... names=['A', 'B'])\n\n Get the slice locations from the beginning of 'b' in the first level\n until the end of the multiindex:\n\n >>> mi.slice_locs(start='b')\n (1, 4)\n\n Like above, but stop at the end of 'b' in the first level and 'f' in\n the second level:\n\n >>> mi.slice_locs(start='b', end=('b', 'f'))\n (1, 3)\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n \"\"\"\n # This function adds nothing to its parent implementation (the magic\n # happens in get_slice_bound method), but it adds meaningful doc.\n return super().slice_locs(start, end, step, kind=kind)\n\n def _partial_tup_index(self, tup, side=\"left\"):\n if len(tup) > self.lexsort_depth:\n raise UnsortedIndexError(\n f\"Key length ({len(tup)}) was greater than MultiIndex lexsort depth \"\n f\"({self.lexsort_depth})\"\n )\n\n n = len(tup)\n start, end = 0, len(self)\n zipped = zip(tup, self.levels, self.codes)\n for k, (lab, lev, labs) in enumerate(zipped):\n section = labs[start:end]\n\n if lab not in lev and not isna(lab):\n if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):\n raise TypeError(f\"Level type mismatch: {lab}\")\n\n # short circuit\n loc = lev.searchsorted(lab, side=side)\n if side == \"right\" and loc >= 0:\n loc -= 1\n return start + section.searchsorted(loc, side=side)\n\n idx = self._get_loc_single_level_index(lev, lab)\n if k < n - 1:\n end = start + section.searchsorted(idx, side=\"right\")\n start = start + section.searchsorted(idx, side=\"left\")\n else:\n return start + section.searchsorted(idx, side=side)\n\n def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:\n \"\"\"\n If key is NA value, location of index unify as -1.\n\n Parameters\n ----------\n level_index: Index\n key : label\n\n Returns\n -------\n loc : int\n If key is NA value, loc is -1\n Else, location of key in index.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n \"\"\"\n if is_scalar(key) and isna(key):\n return -1\n else:\n return level_index.get_loc(key)\n\n def get_loc(self, key, method=None):\n \"\"\"\n Get location for a label or a tuple of labels as an integer, slice or\n boolean mask.\n\n Parameters\n ----------\n key : label or tuple of labels (one for each level)\n method : None\n\n Returns\n -------\n loc : int, slice object or boolean mask\n If the key is past the lexsort depth, the return may be a\n boolean mask array, otherwise it is always a slice or int.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Notes\n -----\n The key cannot be a slice, list of same-level labels, a boolean mask,\n or a sequence of such. If you want to use those, use\n :meth:`MultiIndex.get_locs` instead.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_loc('b')\n slice(1, 3, None)\n\n >>> mi.get_loc(('b', 'e'))\n 1\n \"\"\"\n if method is not None:\n raise NotImplementedError(\n \"only the default get_loc method is \"\n \"currently supported for MultiIndex\"\n )\n\n def _maybe_to_slice(loc):\n \"\"\"convert integer indexer to boolean mask or slice if possible\"\"\"\n if not isinstance(loc, np.ndarray) or loc.dtype != \"int64\":\n return loc\n\n loc = lib.maybe_indices_to_slice(loc, len(self))\n if isinstance(loc, slice):\n return loc\n\n mask = np.empty(len(self), dtype=\"bool\")\n mask.fill(False)\n mask[loc] = True\n return mask\n\n if not isinstance(key, (tuple, list)):\n # not including list here breaks some indexing, xref #30892\n loc = self._get_level_indexer(key, level=0)\n return _maybe_to_slice(loc)\n\n keylen = len(key)\n if self.nlevels < keylen:\n raise KeyError(\n f\"Key length ({keylen}) exceeds index depth ({self.nlevels})\"\n )\n\n if keylen == self.nlevels and self.is_unique:\n return self._engine.get_loc(key)\n\n # -- partial selection or non-unique index\n # break the key into 2 parts based on the lexsort_depth of the index;\n # the first part returns a continuous slice of the index; the 2nd part\n # needs linear search within the slice\n i = self.lexsort_depth\n lead_key, follow_key = key[:i], key[i:]\n start, stop = (\n self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))\n )\n\n if start == stop:\n raise KeyError(key)\n\n if not follow_key:\n return slice(start, stop)\n\n warnings.warn(\n \"indexing past lexsort depth may impact performance.\",\n PerformanceWarning,\n stacklevel=10,\n )\n\n loc = np.arange(start, stop, dtype=\"int64\")\n\n for i, k in enumerate(follow_key, len(lead_key)):\n mask = self.codes[i][loc] == self._get_loc_single_level_index(\n self.levels[i], k\n )\n if not mask.all():\n loc = loc[mask]\n if not len(loc):\n raise KeyError(key)\n\n return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)\n\n def get_loc_level(self, key, level=0, drop_level: bool = True):\n \"\"\"\n Get both the location for the requested label(s) and the\n resulting sliced index.\n\n Parameters\n ----------\n key : label or sequence of labels\n level : int/level name or list thereof, optional\n drop_level : bool, default True\n If ``False``, the resulting index will not drop any level.\n\n Returns\n -------\n loc : A 2-tuple where the elements are:\n Element 0: int, slice object or boolean array\n Element 1: The resulting sliced multiindex/index. If the key\n contains all levels, this will be ``None``.\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],\n ... names=['A', 'B'])\n\n >>> mi.get_loc_level('b')\n (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))\n\n >>> mi.get_loc_level('e', level='B')\n (array([False, True, False], dtype=bool),\n Index(['b'], dtype='object', name='A'))\n\n >>> mi.get_loc_level(['b', 'e'])\n (1, None)\n \"\"\"\n # different name to distinguish from maybe_droplevels\n def maybe_mi_droplevels(indexer, levels, drop_level: bool):\n if not drop_level:\n return self[indexer]\n # kludgearound\n orig_index = new_index = self[indexer]\n levels = [self._get_level_number(i) for i in levels]\n for i in sorted(levels, reverse=True):\n try:\n new_index = new_index.droplevel(i)\n except ValueError:\n\n # no dropping here\n return orig_index\n return new_index\n\n if isinstance(level, (tuple, list)):\n if len(key) != len(level):\n raise AssertionError(\n \"Key for location must have same length as number of levels\"\n )\n result = None\n for lev, k in zip(level, key):\n loc, new_index = self.get_loc_level(k, level=lev)\n if isinstance(loc, slice):\n mask = np.zeros(len(self), dtype=bool)\n mask[loc] = True\n loc = mask\n\n result = loc if result is None else result & loc\n\n return result, maybe_mi_droplevels(result, level, drop_level)\n\n level = self._get_level_number(level)\n\n # kludge for #1796\n if isinstance(key, list):\n key = tuple(key)\n\n if isinstance(key, tuple) and level == 0:\n\n try:\n if key in self.levels[0]:\n indexer = self._get_level_indexer(key, level=level)\n new_index = maybe_mi_droplevels(indexer, [0], drop_level)\n return indexer, new_index\n except (TypeError, InvalidIndexError):\n pass\n\n if not any(isinstance(k, slice) for k in key):\n\n # partial selection\n # optionally get indexer to avoid re-calculation\n def partial_selection(key, indexer=None):\n if indexer is None:\n indexer = self.get_loc(key)\n ilevels = [\n i for i in range(len(key)) if key[i] != slice(None, None)\n ]\n return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)\n\n if len(key) == self.nlevels and self.is_unique:\n # Complete key in unique index -> standard get_loc\n try:\n return (self._engine.get_loc(key), None)\n except KeyError as e:\n raise KeyError(key) from e\n else:\n return partial_selection(key)\n else:\n indexer = None\n for i, k in enumerate(key):\n if not isinstance(k, slice):\n k = self._get_level_indexer(k, level=i)\n if isinstance(k, slice):\n # everything\n if k.start == 0 and k.stop == len(self):\n k = slice(None, None)\n else:\n k_index = k\n\n if isinstance(k, slice):\n if k == slice(None, None):\n continue\n else:\n raise TypeError(key)\n\n if indexer is None:\n indexer = k_index\n else: # pragma: no cover\n indexer &= k_index\n if indexer is None:\n indexer = slice(None, None)\n ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]\n return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)\n else:\n indexer = self._get_level_indexer(key, level=level)\n return indexer, maybe_mi_droplevels(indexer, [level], drop_level)\n\n def _get_level_indexer(self, key, level=0, indexer=None):\n # return an indexer, boolean array or a slice showing where the key is\n # in the totality of values\n # if the indexer is provided, then use this\n\n level_index = self.levels[level]\n level_codes = self.codes[level]\n\n def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):\n # given the inputs and the codes/indexer, compute an indexer set\n # if we have a provided indexer, then this need not consider\n # the entire labels set\n\n r = np.arange(start, stop, step)\n if indexer is not None and len(indexer) != len(codes):\n\n # we have an indexer which maps the locations in the labels\n # that we have already selected (and is not an indexer for the\n # entire set) otherwise this is wasteful so we only need to\n # examine locations that are in this set the only magic here is\n # that the result are the mappings to the set that we have\n # selected\n from pandas import Series\n\n mapper = Series(indexer)\n indexer = codes.take(ensure_platform_int(indexer))\n result = Series(Index(indexer).isin(r).nonzero()[0])\n m = result.map(mapper)._ndarray_values\n\n else:\n m = np.zeros(len(codes), dtype=bool)\n m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True\n\n return m\n\n if isinstance(key, slice):\n # handle a slice, returning a slice if we can\n # otherwise a boolean indexer\n\n try:\n if key.start is not None:\n start = level_index.get_loc(key.start)\n else:\n start = 0\n if key.stop is not None:\n stop = level_index.get_loc(key.stop)\n else:\n stop = len(level_index) - 1\n step = key.step\n except KeyError:\n\n # we have a partial slice (like looking up a partial date\n # string)\n start = stop = level_index.slice_indexer(\n key.start, key.stop, key.step, kind=\"loc\"\n )\n step = start.step\n\n if isinstance(start, slice) or isinstance(stop, slice):\n # we have a slice for start and/or stop\n # a partial date slicer on a DatetimeIndex generates a slice\n # note that the stop ALREADY includes the stopped point (if\n # it was a string sliced)\n start = getattr(start, \"start\", start)\n stop = getattr(stop, \"stop\", stop)\n return convert_indexer(start, stop, step)\n\n elif level > 0 or self.lexsort_depth == 0 or step is not None:\n # need to have like semantics here to right\n # searching as when we are using a slice\n # so include the stop+1 (so we include stop)\n return convert_indexer(start, stop + 1, step)\n else:\n # sorted, so can return slice object -> view\n i = level_codes.searchsorted(start, side=\"left\")\n j = level_codes.searchsorted(stop, side=\"right\")\n return slice(i, j, step)\n\n else:\n\n code = self._get_loc_single_level_index(level_index, key)\n\n if level > 0 or self.lexsort_depth == 0:\n # Desired level is not sorted\n locs = np.array(level_codes == code, dtype=bool, copy=False)\n if not locs.any():\n # The label is present in self.levels[level] but unused:\n raise KeyError(key)\n return locs\n\n i = level_codes.searchsorted(code, side=\"left\")\n j = level_codes.searchsorted(code, side=\"right\")\n if i == j:\n # The label is present in self.levels[level] but unused:\n raise KeyError(key)\n return slice(i, j)\n\n def get_locs(self, seq):\n \"\"\"\n Get location for a sequence of labels.\n\n Parameters\n ----------\n seq : label, slice, list, mask or a sequence of such\n You should use one of the above for each level.\n If a level should not be used, set it to ``slice(None)``.\n\n Returns\n -------\n numpy.ndarray\n NumPy array of integers suitable for passing to iloc.\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_locs('b') # doctest: +SKIP\n array([1, 2], dtype=int64)\n\n >>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP\n array([1, 2], dtype=int64)\n\n >>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP\n array([2], dtype=int64)\n \"\"\"\n from pandas.core.indexes.numeric import Int64Index\n\n # must be lexsorted to at least as many levels\n true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]\n if true_slices and true_slices[-1] >= self.lexsort_depth:\n raise UnsortedIndexError(\n \"MultiIndex slicing requires the index to be lexsorted: slicing \"\n f\"on levels {true_slices}, lexsort depth {self.lexsort_depth}\"\n )\n # indexer\n # this is the list of all values that we want to select\n n = len(self)\n indexer = None\n\n def _convert_to_indexer(r):\n # return an indexer\n if isinstance(r, slice):\n m = np.zeros(n, dtype=bool)\n m[r] = True\n r = m.nonzero()[0]\n elif com.is_bool_indexer(r):\n if len(r) != n:\n raise ValueError(\n \"cannot index with a boolean indexer \"\n \"that is not the same length as the \"\n \"index\"\n )\n r = r.nonzero()[0]\n return Int64Index(r)\n\n def _update_indexer(idxr, indexer=indexer):\n if indexer is None:\n indexer = Index(np.arange(n))\n if idxr is None:\n return indexer\n return indexer & idxr\n\n for i, k in enumerate(seq):\n\n if com.is_bool_indexer(k):\n # a boolean indexer, must be the same length!\n k = np.asarray(k)\n indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer)\n\n elif is_list_like(k):\n # a collection of labels to include from this level (these\n # are or'd)\n indexers = None\n for x in k:\n try:\n idxrs = _convert_to_indexer(\n self._get_level_indexer(x, level=i, indexer=indexer)\n )\n indexers = idxrs if indexers is None else indexers | idxrs\n except KeyError:\n\n # ignore not founds\n continue\n\n if indexers is not None:\n indexer = _update_indexer(indexers, indexer=indexer)\n else:\n # no matches we are done\n return Int64Index([])._ndarray_values\n\n elif com.is_null_slice(k):\n # empty slice\n indexer = _update_indexer(None, indexer=indexer)\n\n elif isinstance(k, slice):\n\n # a slice, include BOTH of the labels\n indexer = _update_indexer(\n _convert_to_indexer(\n self._get_level_indexer(k, level=i, indexer=indexer)\n ),\n indexer=indexer,\n )\n else:\n # a single label\n indexer = _update_indexer(\n _convert_to_indexer(\n self.get_loc_level(k, level=i, drop_level=False)[0]\n ),\n indexer=indexer,\n )\n\n # empty indexer\n if indexer is None:\n return Int64Index([])._ndarray_values\n\n indexer = self._reorder_indexer(seq, indexer)\n\n return indexer._ndarray_values\n\n def _reorder_indexer(\n self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike\n ) -> ArrayLike:\n \"\"\"\n Reorder an indexer of a MultiIndex (self) so that the label are in the\n same order as given in seq\n\n Parameters\n ----------\n seq : label/slice/list/mask or a sequence of such\n indexer: an Int64Index indexer of self\n\n Returns\n -------\n indexer : a sorted Int64Index indexer of self ordered as seq\n \"\"\"\n # If the index is lexsorted and the list_like label in seq are sorted\n # then we do not need to sort\n if self.is_lexsorted():\n need_sort = False\n for i, k in enumerate(seq):\n if is_list_like(k):\n if not need_sort:\n k_codes = self.levels[i].get_indexer(k)\n k_codes = k_codes[k_codes >= 0] # Filter absent keys\n # True if the given codes are not ordered\n need_sort = (k_codes[:-1] > k_codes[1:]).any()\n # Bail out if both index and seq are sorted\n if not need_sort:\n return indexer\n\n n = len(self)\n keys: Tuple[np.ndarray, ...] = tuple()\n # For each level of the sequence in seq, map the level codes with the\n # order they appears in a list-like sequence\n # This mapping is then use to reorder the indexer\n for i, k in enumerate(seq):\n if com.is_bool_indexer(k):\n new_order = np.arange(n)[indexer]\n elif is_list_like(k):\n # Generate a map with all level codes as sorted initially\n key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(\n self.levels[i]\n )\n # Set order as given in the indexer list\n level_indexer = self.levels[i].get_indexer(k)\n level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys\n key_order_map[level_indexer] = np.arange(len(level_indexer))\n\n new_order = key_order_map[self.codes[i][indexer]]\n else:\n # For all other case, use the same order as the level\n new_order = np.arange(n)[indexer]\n keys = (new_order,) + keys\n\n # Find the reordering using lexsort on the keys mapping\n ind = np.lexsort(keys)\n return indexer[ind]\n\n def truncate(self, before=None, after=None):\n \"\"\"\n Slice index between two labels / tuples, return new MultiIndex\n\n Parameters\n ----------\n before : label or tuple, can be partial. Default None\n None defaults to start\n after : label or tuple, can be partial. Default None\n None defaults to end\n\n Returns\n -------\n truncated : MultiIndex\n \"\"\"\n if after and before and after < before:\n raise ValueError(\"after < before\")\n\n i, j = self.levels[0].slice_locs(before, after)\n left, right = self.slice_locs(before, after)\n\n new_levels = list(self.levels)\n new_levels[0] = new_levels[0][i:j]\n\n new_codes = [level_codes[left:right] for level_codes in self.codes]\n new_codes[0] = new_codes[0] - i\n\n return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False)\n\n def equals(self, other) -> bool:\n \"\"\"\n Determines if two MultiIndex objects have the same labeling information\n (the levels themselves do not necessarily have to be the same)\n\n See Also\n --------\n equal_levels\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if not isinstance(other, MultiIndex):\n # d-level MultiIndex can equal d-tuple Index\n if not is_object_dtype(other.dtype):\n if self.nlevels != other.nlevels:\n return False\n\n other_vals = com.values_from_object(ensure_index(other))\n return array_equivalent(self._ndarray_values, other_vals)\n\n if self.nlevels != other.nlevels:\n return False\n\n if len(self) != len(other):\n return False\n\n for i in range(self.nlevels):\n self_codes = self.codes[i]\n self_codes = self_codes[self_codes != -1]\n self_values = algos.take_nd(\n np.asarray(self.levels[i]._values), self_codes, allow_fill=False\n )\n\n other_codes = other.codes[i]\n other_codes = other_codes[other_codes != -1]\n other_values = algos.take_nd(\n np.asarray(other.levels[i]._values), other_codes, allow_fill=False\n )\n\n # since we use NaT both datetime64 and timedelta64\n # we can have a situation where a level is typed say\n # timedelta64 in self (IOW it has other values than NaT)\n # but types datetime64 in other (where its all NaT)\n # but these are equivalent\n if len(self_values) == 0 and len(other_values) == 0:\n continue\n\n if not array_equivalent(self_values, other_values):\n return False\n\n return True\n\n def equal_levels(self, other) -> bool:\n \"\"\"\n Return True if the levels of both MultiIndex objects are the same\n\n \"\"\"\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True\n\n # --------------------------------------------------------------------\n # Set Methods\n\n def union(self, other, sort=None):\n \"\"\"\n Form the union of two MultiIndex objects\n\n Parameters\n ----------\n other : MultiIndex or array / Index of tuples\n sort : False or None, default None\n Whether to sort the resulting Index.\n\n * None : Sort the result, except when\n\n 1. `self` and `other` are equal.\n 2. `self` has length 0.\n 3. Some values in `self` or `other` cannot be compared.\n A RuntimeWarning is issued in this case.\n\n * False : do not sort the result.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n Index\n\n >>> index.union(index2)\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_names = self._convert_can_do_setop(other)\n\n if len(other) == 0 or self.equals(other):\n return self\n\n # TODO: Index.union returns other when `len(self)` is 0.\n\n uniq_tuples = lib.fast_unique_multiple(\n [self._ndarray_values, other._ndarray_values], sort=sort\n )\n\n return MultiIndex.from_arrays(\n zip(*uniq_tuples), sortorder=0, names=result_names\n )\n\n def intersection(self, other, sort=False):\n \"\"\"\n Form the intersection of two MultiIndex objects.\n\n Parameters\n ----------\n other : MultiIndex or array / Index of tuples\n sort : False or None, default False\n Sort the resulting MultiIndex if possible\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default from ``True`` to ``False``, to match\n behaviour from before 0.24.0\n\n Returns\n -------\n Index\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_names = self._convert_can_do_setop(other)\n\n if self.equals(other):\n return self\n\n lvals = self._ndarray_values\n rvals = other._ndarray_values\n\n uniq_tuples = None # flag whether _inner_indexer was succesful\n if self.is_monotonic and other.is_monotonic:\n try:\n uniq_tuples = self._inner_indexer(lvals, rvals)[0]\n sort = False # uniq_tuples is already sorted\n except TypeError:\n pass\n\n if uniq_tuples is None:\n other_uniq = set(rvals)\n seen = set()\n uniq_tuples = [\n x for x in lvals if x in other_uniq and not (x in seen or seen.add(x))\n ]\n\n if sort is None:\n uniq_tuples = sorted(uniq_tuples)\n\n if len(uniq_tuples) == 0:\n return MultiIndex(\n levels=self.levels,\n codes=[[]] * self.nlevels,\n names=result_names,\n verify_integrity=False,\n )\n else:\n return MultiIndex.from_arrays(\n zip(*uniq_tuples), sortorder=0, names=result_names\n )\n\n def difference(self, other, sort=None):\n \"\"\"\n Compute set difference of two MultiIndex objects\n\n Parameters\n ----------\n other : MultiIndex\n sort : False or None, default None\n Sort the resulting MultiIndex if possible\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n diff : MultiIndex\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_names = self._convert_can_do_setop(other)\n\n if len(other) == 0:\n return self\n\n if self.equals(other):\n return MultiIndex(\n levels=self.levels,\n codes=[[]] * self.nlevels,\n names=result_names,\n verify_integrity=False,\n )\n\n this = self._get_unique_index()\n\n indexer = this.get_indexer(other)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n\n label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)\n difference = this.values.take(label_diff)\n if sort is None:\n difference = sorted(difference)\n\n if len(difference) == 0:\n return MultiIndex(\n levels=[[]] * self.nlevels,\n codes=[[]] * self.nlevels,\n names=result_names,\n verify_integrity=False,\n )\n else:\n return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)\n\n def _convert_can_do_setop(self, other):\n result_names = self.names\n\n if not hasattr(other, \"names\"):\n if len(other) == 0:\n other = MultiIndex(\n levels=[[]] * self.nlevels,\n codes=[[]] * self.nlevels,\n verify_integrity=False,\n )\n else:\n msg = \"other must be a MultiIndex or a list of tuples\"\n try:\n other = MultiIndex.from_tuples(other)\n except TypeError as err:\n raise TypeError(msg) from err\n else:\n result_names = self.names if self.names == other.names else None\n return other, result_names\n\n # --------------------------------------------------------------------\n\n @Appender(Index.astype.__doc__)\n def astype(self, dtype, copy=True):\n dtype = pandas_dtype(dtype)\n if is_categorical_dtype(dtype):\n msg = \"> 1 ndim Categorical are not supported at this time\"\n raise NotImplementedError(msg)\n elif not is_object_dtype(dtype):\n raise TypeError(\n f\"Setting {type(self)} dtype to anything other \"\n \"than object is not supported\"\n )\n elif copy is True:\n return self._shallow_copy()\n return self\n\n def insert(self, loc: int, item):\n \"\"\"\n Make new MultiIndex inserting new item at location\n\n Parameters\n ----------\n loc : int\n item : tuple\n Must be same length as number of levels in the MultiIndex\n\n Returns\n -------\n new_index : Index\n \"\"\"\n # Pad the key with empty strings if lower levels of the key\n # aren't specified:\n if not isinstance(item, tuple):\n item = (item,) + (\"\",) * (self.nlevels - 1)\n elif len(item) != self.nlevels:\n raise ValueError(\"Item must have length equal to number of levels.\")\n\n new_levels = []\n new_codes = []\n for k, level, level_codes in zip(item, self.levels, self.codes):\n if k not in level:\n # have to insert into level\n # must insert at end otherwise you have to recompute all the\n # other codes\n lev_loc = len(level)\n level = level.insert(lev_loc, k)\n else:\n lev_loc = level.get_loc(k)\n\n new_levels.append(level)\n new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False\n )\n\n def delete(self, loc):\n \"\"\"\n Make new index with passed location deleted\n\n Returns\n -------\n new_index : MultiIndex\n \"\"\"\n new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]\n return MultiIndex(\n levels=self.levels,\n codes=new_codes,\n names=self.names,\n verify_integrity=False,\n )\n\n def _wrap_joined_index(self, joined, other):\n names = self.names if self.names == other.names else None\n return MultiIndex.from_tuples(joined, names=names)\n\n @Appender(Index.isin.__doc__)\n def isin(self, values, level=None):\n if level is None:\n values = MultiIndex.from_tuples(values, names=self.names).values\n return algos.isin(self.values, values)\n else:\n num = self._get_level_number(level)\n levs = self.get_level_values(num)\n\n if levs.size == 0:\n return np.zeros(len(levs), dtype=np.bool_)\n return levs.isin(values)\n\n\nMultiIndex._add_numeric_methods_disabled()\nMultiIndex._add_numeric_methods_add_sub_disabled()\nMultiIndex._add_logical_methods_disabled()\n\n\ndef _sparsify(label_list, start: int = 0, sentinel=\"\"):\n pivoted = list(zip(*label_list))\n k = len(label_list)\n\n result = pivoted[: start + 1]\n prev = pivoted[start]\n\n for cur in pivoted[start + 1 :]:\n sparse_cur = []\n\n for i, (p, t) in enumerate(zip(prev, cur)):\n if i == k - 1:\n sparse_cur.append(t)\n result.append(sparse_cur)\n break\n\n if p == t:\n sparse_cur.append(sentinel)\n else:\n sparse_cur.extend(cur[i:])\n result.append(sparse_cur)\n break\n\n prev = cur\n\n return list(zip(*result))\n\n\ndef _get_na_rep(dtype) -> str:\n return {np.datetime64: \"NaT\", np.timedelta64: \"NaT\"}.get(dtype, \"NaN\")\n\n\ndef maybe_droplevels(index, key):\n \"\"\"\n Attempt to drop level or levels from the given index.\n\n Parameters\n ----------\n index: Index\n key : scalar or tuple\n\n Returns\n -------\n Index\n \"\"\"\n # drop levels\n original_index = index\n if isinstance(key, tuple):\n for _ in key:\n try:\n index = index.droplevel(0)\n except ValueError:\n # we have dropped too much, so back out\n return original_index\n else:\n try:\n index = index.droplevel(0)\n except ValueError:\n pass\n\n return index\n\n\ndef _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:\n \"\"\"\n Coerce the array_like indexer to the smallest integer dtype that can encode all\n of the given categories.\n\n Parameters\n ----------\n array_like : array-like\n categories : array-like\n copy : bool\n\n Returns\n -------\n np.ndarray\n Non-writeable.\n \"\"\"\n array_like = coerce_indexer_dtype(array_like, categories)\n if copy:\n array_like = array_like.copy()\n array_like.flags.writeable = False\n return array_like\n",
"\"\"\"\nModule contains tools for processing Stata files into DataFrames\n\nThe StataReader below was originally written by Joe Presbrey as part of PyDTA.\nIt has been extended and improved by Skipper Seabold from the Statsmodels\nproject who also developed the StataWriter and was finally added to pandas in\na once again improved version.\n\nYou can find more information on http://presbrey.mit.edu/PyDTA and\nhttps://www.statsmodels.org/devel/\n\"\"\"\nfrom collections import abc\nimport datetime\nfrom io import BytesIO, IOBase\nimport os\nfrom pathlib import Path\nimport struct\nimport sys\nfrom typing import Any, AnyStr, BinaryIO, Dict, List, Optional, Sequence, Tuple, Union\nimport warnings\n\nfrom dateutil.relativedelta import relativedelta\nimport numpy as np\n\nfrom pandas._libs.lib import infer_dtype\nfrom pandas._libs.writers import max_len_string_array\nfrom pandas._typing import FilePathOrBuffer, Label\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.common import (\n ensure_object,\n is_categorical_dtype,\n is_datetime64_dtype,\n)\n\nfrom pandas import (\n Categorical,\n DatetimeIndex,\n NaT,\n Timestamp,\n concat,\n isna,\n to_datetime,\n to_timedelta,\n)\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.indexes.base import Index\nfrom pandas.core.series import Series\n\nfrom pandas.io.common import get_filepath_or_buffer, stringify_path\n\n_version_error = (\n \"Version of given Stata file is {version}. pandas supports importing \"\n \"versions 104, 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), \"\n \"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),\"\n \"and 119 (Stata 15/16, over 32,767 variables).\"\n)\n\n_statafile_processing_params1 = \"\"\"\\\nconvert_dates : bool, default True\n Convert date variables to DataFrame time values.\nconvert_categoricals : bool, default True\n Read value labels and convert columns to Categorical/Factor variables.\"\"\"\n\n_statafile_processing_params2 = \"\"\"\\\nindex_col : str, optional\n Column to set as index.\nconvert_missing : bool, default False\n Flag indicating whether to convert missing values to their Stata\n representations. If False, missing values are replaced with nan.\n If True, columns containing missing values are returned with\n object data types and missing values are represented by\n StataMissingValue objects.\npreserve_dtypes : bool, default True\n Preserve Stata datatypes. If False, numeric data are upcast to pandas\n default types for foreign data (float64 or int64).\ncolumns : list or None\n Columns to retain. Columns will be returned in the given order. None\n returns all columns.\norder_categoricals : bool, default True\n Flag indicating whether converted categorical data are ordered.\"\"\"\n\n_chunksize_params = \"\"\"\\\nchunksize : int, default None\n Return StataReader object for iterations, returns chunks with\n given number of lines.\"\"\"\n\n_iterator_params = \"\"\"\\\niterator : bool, default False\n Return StataReader object.\"\"\"\n\n_read_stata_doc = f\"\"\"\nRead Stata file into DataFrame.\n\nParameters\n----------\nfilepath_or_buffer : str, path object or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: ``file://localhost/path/to/table.dta``.\n\n If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method,\n such as a file handler (e.g. via builtin ``open`` function)\n or ``StringIO``.\n{_statafile_processing_params1}\n{_statafile_processing_params2}\n{_chunksize_params}\n{_iterator_params}\n\nReturns\n-------\nDataFrame or StataReader\n\nSee Also\n--------\nio.stata.StataReader : Low-level reader for Stata data files.\nDataFrame.to_stata: Export Stata data files.\n\nExamples\n--------\nRead a Stata dta file:\n\n>>> df = pd.read_stata('filename.dta')\n\nRead a Stata dta file in 10,000 line chunks:\n\n>>> itr = pd.read_stata('filename.dta', chunksize=10000)\n>>> for chunk in itr:\n... do_something(chunk)\n\"\"\"\n\n_read_method_doc = f\"\"\"\\\nReads observations from Stata file, converting them into a dataframe\n\nParameters\n----------\nnrows : int\n Number of lines to read from data file, if None read whole file.\n{_statafile_processing_params1}\n{_statafile_processing_params2}\n\nReturns\n-------\nDataFrame\n\"\"\"\n\n_stata_reader_doc = f\"\"\"\\\nClass for reading Stata dta files.\n\nParameters\n----------\npath_or_buf : path (string), buffer or path object\n string, path object (pathlib.Path or py._path.local.LocalPath) or object\n implementing a binary read() functions.\n\n .. versionadded:: 0.23.0 support for pathlib, py.path.\n{_statafile_processing_params1}\n{_statafile_processing_params2}\n{_chunksize_params}\n\"\"\"\n\n\n_date_formats = [\"%tc\", \"%tC\", \"%td\", \"%d\", \"%tw\", \"%tm\", \"%tq\", \"%th\", \"%ty\"]\n\n\nstata_epoch = datetime.datetime(1960, 1, 1)\n\n\n# TODO: Add typing. As of January 2020 it is not possible to type this function since\n# mypy doesn't understand that a Series and an int can be combined using mathematical\n# operations. (+, -).\ndef _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:\n \"\"\"\n Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime\n\n Parameters\n ----------\n dates : Series\n The Stata Internal Format date to convert to datetime according to fmt\n fmt : str\n The format to convert to. Can be, tc, td, tw, tm, tq, th, ty\n Returns\n\n Returns\n -------\n converted : Series\n The converted dates\n\n Examples\n --------\n >>> dates = pd.Series([52])\n >>> _stata_elapsed_date_to_datetime_vec(dates , \"%tw\")\n 0 1961-01-01\n dtype: datetime64[ns]\n\n Notes\n -----\n datetime/c - tc\n milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day\n datetime/C - tC - NOT IMPLEMENTED\n milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds\n date - td\n days since 01jan1960 (01jan1960 = 0)\n weekly date - tw\n weeks since 1960w1\n This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.\n The datetime value is the start of the week in terms of days in the\n year, not ISO calendar weeks.\n monthly date - tm\n months since 1960m1\n quarterly date - tq\n quarters since 1960q1\n half-yearly date - th\n half-years since 1960h1 yearly\n date - ty\n years since 0000\n \"\"\"\n MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year\n MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days\n MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days\n MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000\n MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000\n\n def convert_year_month_safe(year, month) -> Series:\n \"\"\"\n Convert year and month to datetimes, using pandas vectorized versions\n when the date range falls within the range supported by pandas.\n Otherwise it falls back to a slower but more robust method\n using datetime.\n \"\"\"\n if year.max() < MAX_YEAR and year.min() > MIN_YEAR:\n return to_datetime(100 * year + month, format=\"%Y%m\")\n else:\n index = getattr(year, \"index\", None)\n return Series(\n [datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index\n )\n\n def convert_year_days_safe(year, days) -> Series:\n \"\"\"\n Converts year (e.g. 1999) and days since the start of the year to a\n datetime or datetime64 Series\n \"\"\"\n if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:\n return to_datetime(year, format=\"%Y\") + to_timedelta(days, unit=\"d\")\n else:\n index = getattr(year, \"index\", None)\n value = [\n datetime.datetime(y, 1, 1) + relativedelta(days=int(d))\n for y, d in zip(year, days)\n ]\n return Series(value, index=index)\n\n def convert_delta_safe(base, deltas, unit) -> Series:\n \"\"\"\n Convert base dates and deltas to datetimes, using pandas vectorized\n versions if the deltas satisfy restrictions required to be expressed\n as dates in pandas.\n \"\"\"\n index = getattr(deltas, \"index\", None)\n if unit == \"d\":\n if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:\n values = [base + relativedelta(days=int(d)) for d in deltas]\n return Series(values, index=index)\n elif unit == \"ms\":\n if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:\n values = [\n base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas\n ]\n return Series(values, index=index)\n else:\n raise ValueError(\"format not understood\")\n base = to_datetime(base)\n deltas = to_timedelta(deltas, unit=unit)\n return base + deltas\n\n # TODO: If/when pandas supports more than datetime64[ns], this should be\n # improved to use correct range, e.g. datetime[Y] for yearly\n bad_locs = np.isnan(dates)\n has_bad_values = False\n if bad_locs.any():\n has_bad_values = True\n data_col = Series(dates)\n data_col[bad_locs] = 1.0 # Replace with NaT\n dates = dates.astype(np.int64)\n\n if fmt.startswith((\"%tc\", \"tc\")): # Delta ms relative to base\n base = stata_epoch\n ms = dates\n conv_dates = convert_delta_safe(base, ms, \"ms\")\n elif fmt.startswith((\"%tC\", \"tC\")):\n\n warnings.warn(\"Encountered %tC format. Leaving in Stata Internal Format.\")\n conv_dates = Series(dates, dtype=np.object)\n if has_bad_values:\n conv_dates[bad_locs] = NaT\n return conv_dates\n # Delta days relative to base\n elif fmt.startswith((\"%td\", \"td\", \"%d\", \"d\")):\n base = stata_epoch\n days = dates\n conv_dates = convert_delta_safe(base, days, \"d\")\n # does not count leap days - 7 days is a week.\n # 52nd week may have more than 7 days\n elif fmt.startswith((\"%tw\", \"tw\")):\n year = stata_epoch.year + dates // 52\n days = (dates % 52) * 7\n conv_dates = convert_year_days_safe(year, days)\n elif fmt.startswith((\"%tm\", \"tm\")): # Delta months relative to base\n year = stata_epoch.year + dates // 12\n month = (dates % 12) + 1\n conv_dates = convert_year_month_safe(year, month)\n elif fmt.startswith((\"%tq\", \"tq\")): # Delta quarters relative to base\n year = stata_epoch.year + dates // 4\n quarter_month = (dates % 4) * 3 + 1\n conv_dates = convert_year_month_safe(year, quarter_month)\n elif fmt.startswith((\"%th\", \"th\")): # Delta half-years relative to base\n year = stata_epoch.year + dates // 2\n month = (dates % 2) * 6 + 1\n conv_dates = convert_year_month_safe(year, month)\n elif fmt.startswith((\"%ty\", \"ty\")): # Years -- not delta\n year = dates\n first_month = np.ones_like(dates)\n conv_dates = convert_year_month_safe(year, first_month)\n else:\n raise ValueError(f\"Date fmt {fmt} not understood\")\n\n if has_bad_values: # Restore NaT for bad values\n conv_dates[bad_locs] = NaT\n\n return conv_dates\n\n\ndef _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series:\n \"\"\"\n Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime\n\n Parameters\n ----------\n dates : Series\n Series or array containing datetime.datetime or datetime64[ns] to\n convert to the Stata Internal Format given by fmt\n fmt : str\n The format to convert to. Can be, tc, td, tw, tm, tq, th, ty\n \"\"\"\n index = dates.index\n NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000\n US_PER_DAY = NS_PER_DAY / 1000\n\n def parse_dates_safe(dates, delta=False, year=False, days=False):\n d = {}\n if is_datetime64_dtype(dates.values):\n if delta:\n time_delta = dates - stata_epoch\n d[\"delta\"] = time_delta.values.astype(np.int64) // 1000 # microseconds\n if days or year:\n # ignore since mypy reports that DatetimeIndex has no year/month\n date_index = DatetimeIndex(dates)\n d[\"year\"] = date_index.year # type: ignore\n d[\"month\"] = date_index.month # type: ignore\n if days:\n days_in_ns = dates.astype(np.int64) - to_datetime(\n d[\"year\"], format=\"%Y\"\n ).astype(np.int64)\n d[\"days\"] = days_in_ns // NS_PER_DAY\n\n elif infer_dtype(dates, skipna=False) == \"datetime\":\n if delta:\n delta = dates.values - stata_epoch\n\n def f(x: datetime.timedelta) -> float:\n return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds\n\n v = np.vectorize(f)\n d[\"delta\"] = v(delta)\n if year:\n year_month = dates.apply(lambda x: 100 * x.year + x.month)\n d[\"year\"] = year_month.values // 100\n d[\"month\"] = year_month.values - d[\"year\"] * 100\n if days:\n\n def g(x: datetime.datetime) -> int:\n return (x - datetime.datetime(x.year, 1, 1)).days\n\n v = np.vectorize(g)\n d[\"days\"] = v(dates)\n else:\n raise ValueError(\n \"Columns containing dates must contain either \"\n \"datetime64, datetime.datetime or null values.\"\n )\n\n return DataFrame(d, index=index)\n\n bad_loc = isna(dates)\n index = dates.index\n if bad_loc.any():\n dates = Series(dates)\n if is_datetime64_dtype(dates):\n dates[bad_loc] = to_datetime(stata_epoch)\n else:\n dates[bad_loc] = stata_epoch\n\n if fmt in [\"%tc\", \"tc\"]:\n d = parse_dates_safe(dates, delta=True)\n conv_dates = d.delta / 1000\n elif fmt in [\"%tC\", \"tC\"]:\n warnings.warn(\"Stata Internal Format tC not supported.\")\n conv_dates = dates\n elif fmt in [\"%td\", \"td\"]:\n d = parse_dates_safe(dates, delta=True)\n conv_dates = d.delta // US_PER_DAY\n elif fmt in [\"%tw\", \"tw\"]:\n d = parse_dates_safe(dates, year=True, days=True)\n conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7\n elif fmt in [\"%tm\", \"tm\"]:\n d = parse_dates_safe(dates, year=True)\n conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1\n elif fmt in [\"%tq\", \"tq\"]:\n d = parse_dates_safe(dates, year=True)\n conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3\n elif fmt in [\"%th\", \"th\"]:\n d = parse_dates_safe(dates, year=True)\n conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(np.int)\n elif fmt in [\"%ty\", \"ty\"]:\n d = parse_dates_safe(dates, year=True)\n conv_dates = d.year\n else:\n raise ValueError(f\"Format {fmt} is not a known Stata date format\")\n\n conv_dates = Series(conv_dates, dtype=np.float64)\n missing_value = struct.unpack(\"<d\", b\"\\x00\\x00\\x00\\x00\\x00\\x00\\xe0\\x7f\")[0]\n conv_dates[bad_loc] = missing_value\n\n return Series(conv_dates, index=index)\n\n\nexcessive_string_length_error = \"\"\"\nFixed width strings in Stata .dta files are limited to 244 (or fewer)\ncharacters. Column '{0}' does not satisfy this restriction. Use the\n'version=117' parameter to write the newer (Stata 13 and later) format.\n\"\"\"\n\n\nclass PossiblePrecisionLoss(Warning):\n pass\n\n\nprecision_loss_doc = \"\"\"\nColumn converted from %s to %s, and some data are outside of the lossless\nconversion range. This may result in a loss of precision in the saved data.\n\"\"\"\n\n\nclass ValueLabelTypeMismatch(Warning):\n pass\n\n\nvalue_label_mismatch_doc = \"\"\"\nStata value labels (pandas categories) must be strings. Column {0} contains\nnon-string labels which will be converted to strings. Please check that the\nStata data file created has not lost information due to duplicate labels.\n\"\"\"\n\n\nclass InvalidColumnName(Warning):\n pass\n\n\ninvalid_name_doc = \"\"\"\nNot all pandas column names were valid Stata variable names.\nThe following replacements have been made:\n\n {0}\n\nIf this is not what you expect, please make sure you have Stata-compliant\ncolumn names in your DataFrame (strings only, max 32 characters, only\nalphanumerics and underscores, no Stata reserved words)\n\"\"\"\n\n\ndef _cast_to_stata_types(data: DataFrame) -> DataFrame:\n \"\"\"\n Checks the dtypes of the columns of a pandas DataFrame for\n compatibility with the data types and ranges supported by Stata, and\n converts if necessary.\n\n Parameters\n ----------\n data : DataFrame\n The DataFrame to check and convert\n\n Notes\n -----\n Numeric columns in Stata must be one of int8, int16, int32, float32 or\n float64, with some additional value restrictions. int8 and int16 columns\n are checked for violations of the value restrictions and upcast if needed.\n int64 data is not usable in Stata, and so it is downcast to int32 whenever\n the value are in the int32 range, and sidecast to float64 when larger than\n this range. If the int64 values are outside of the range of those\n perfectly representable as float64 values, a warning is raised.\n\n bool columns are cast to int8. uint columns are converted to int of the\n same size if there is no loss in precision, otherwise are upcast to a\n larger type. uint64 is currently not supported since it is concerted to\n object in a DataFrame.\n \"\"\"\n ws = \"\"\n # original, if small, if large\n conversion_data = (\n (np.bool, np.int8, np.int8),\n (np.uint8, np.int8, np.int16),\n (np.uint16, np.int16, np.int32),\n (np.uint32, np.int32, np.int64),\n )\n\n float32_max = struct.unpack(\"<f\", b\"\\xff\\xff\\xff\\x7e\")[0]\n float64_max = struct.unpack(\"<d\", b\"\\xff\\xff\\xff\\xff\\xff\\xff\\xdf\\x7f\")[0]\n\n for col in data:\n dtype = data[col].dtype\n # Cast from unsupported types to supported types\n for c_data in conversion_data:\n if dtype == c_data[0]:\n if data[col].max() <= np.iinfo(c_data[1]).max:\n dtype = c_data[1]\n else:\n dtype = c_data[2]\n if c_data[2] == np.float64: # Warn if necessary\n if data[col].max() >= 2 ** 53:\n ws = precision_loss_doc.format(\"uint64\", \"float64\")\n\n data[col] = data[col].astype(dtype)\n\n # Check values and upcast if necessary\n if dtype == np.int8:\n if data[col].max() > 100 or data[col].min() < -127:\n data[col] = data[col].astype(np.int16)\n elif dtype == np.int16:\n if data[col].max() > 32740 or data[col].min() < -32767:\n data[col] = data[col].astype(np.int32)\n elif dtype == np.int64:\n if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:\n data[col] = data[col].astype(np.int32)\n else:\n data[col] = data[col].astype(np.float64)\n if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):\n ws = precision_loss_doc.format(\"int64\", \"float64\")\n elif dtype in (np.float32, np.float64):\n value = data[col].max()\n if np.isinf(value):\n raise ValueError(\n f\"Column {col} has a maximum value of infinity which is outside \"\n \"the range supported by Stata.\"\n )\n if dtype == np.float32 and value > float32_max:\n data[col] = data[col].astype(np.float64)\n elif dtype == np.float64:\n if value > float64_max:\n raise ValueError(\n f\"Column {col} has a maximum value ({value}) outside the range \"\n f\"supported by Stata ({float64_max})\"\n )\n\n if ws:\n warnings.warn(ws, PossiblePrecisionLoss)\n\n return data\n\n\nclass StataValueLabel:\n \"\"\"\n Parse a categorical column and prepare formatted output\n\n Parameters\n ----------\n catarray : Series\n Categorical Series to encode\n encoding : {\"latin-1\", \"utf-8\"}\n Encoding to use for value labels.\n \"\"\"\n\n def __init__(self, catarray: Series, encoding: str = \"latin-1\"):\n\n if encoding not in (\"latin-1\", \"utf-8\"):\n raise ValueError(\"Only latin-1 and utf-8 are supported.\")\n self.labname = catarray.name\n self._encoding = encoding\n categories = catarray.cat.categories\n self.value_labels = list(zip(np.arange(len(categories)), categories))\n self.value_labels.sort(key=lambda x: x[0])\n self.text_len = 0\n self.off: List[int] = []\n self.val: List[int] = []\n self.txt: List[bytes] = []\n self.n = 0\n\n # Compute lengths and setup lists of offsets and labels\n for vl in self.value_labels:\n category = vl[1]\n if not isinstance(category, str):\n category = str(category)\n warnings.warn(\n value_label_mismatch_doc.format(catarray.name),\n ValueLabelTypeMismatch,\n )\n category = category.encode(encoding)\n self.off.append(self.text_len)\n self.text_len += len(category) + 1 # +1 for the padding\n self.val.append(vl[0])\n self.txt.append(category)\n self.n += 1\n\n if self.text_len > 32000:\n raise ValueError(\n \"Stata value labels for a single variable must \"\n \"have a combined length less than 32,000 characters.\"\n )\n\n # Ensure int32\n self.off = np.array(self.off, dtype=np.int32)\n self.val = np.array(self.val, dtype=np.int32)\n\n # Total length\n self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len\n\n def generate_value_label(self, byteorder: str) -> bytes:\n \"\"\"\n Generate the binary representation of the value labals.\n\n Parameters\n ----------\n byteorder : str\n Byte order of the output\n\n Returns\n -------\n value_label : bytes\n Bytes containing the formatted value label\n \"\"\"\n encoding = self._encoding\n bio = BytesIO()\n null_byte = b\"\\x00\"\n\n # len\n bio.write(struct.pack(byteorder + \"i\", self.len))\n\n # labname\n labname = str(self.labname)[:32].encode(encoding)\n lab_len = 32 if encoding not in (\"utf-8\", \"utf8\") else 128\n labname = _pad_bytes(labname, lab_len + 1)\n bio.write(labname)\n\n # padding - 3 bytes\n for i in range(3):\n bio.write(struct.pack(\"c\", null_byte))\n\n # value_label_table\n # n - int32\n bio.write(struct.pack(byteorder + \"i\", self.n))\n\n # textlen - int32\n bio.write(struct.pack(byteorder + \"i\", self.text_len))\n\n # off - int32 array (n elements)\n for offset in self.off:\n bio.write(struct.pack(byteorder + \"i\", offset))\n\n # val - int32 array (n elements)\n for value in self.val:\n bio.write(struct.pack(byteorder + \"i\", value))\n\n # txt - Text labels, null terminated\n for text in self.txt:\n bio.write(text + null_byte)\n\n bio.seek(0)\n return bio.read()\n\n\nclass StataMissingValue:\n \"\"\"\n An observation's missing value.\n\n Parameters\n ----------\n value : {int, float}\n The Stata missing value code\n\n Notes\n -----\n More information: <https://www.stata.com/help.cgi?missing>\n\n Integer missing values make the code '.', '.a', ..., '.z' to the ranges\n 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...\n 2147483647 (for int32). Missing values for floating point data types are\n more complex but the pattern is simple to discern from the following table.\n\n np.float32 missing values (float in Stata)\n 0000007f .\n 0008007f .a\n 0010007f .b\n ...\n 00c0007f .x\n 00c8007f .y\n 00d0007f .z\n\n np.float64 missing values (double in Stata)\n 000000000000e07f .\n 000000000001e07f .a\n 000000000002e07f .b\n ...\n 000000000018e07f .x\n 000000000019e07f .y\n 00000000001ae07f .z\n \"\"\"\n\n # Construct a dictionary of missing values\n MISSING_VALUES: Dict[float, str] = {}\n bases = (101, 32741, 2147483621)\n for b in bases:\n # Conversion to long to avoid hash issues on 32 bit platforms #8968\n MISSING_VALUES[b] = \".\"\n for i in range(1, 27):\n MISSING_VALUES[i + b] = \".\" + chr(96 + i)\n\n float32_base = b\"\\x00\\x00\\x00\\x7f\"\n increment = struct.unpack(\"<i\", b\"\\x00\\x08\\x00\\x00\")[0]\n for i in range(27):\n key = struct.unpack(\"<f\", float32_base)[0]\n MISSING_VALUES[key] = \".\"\n if i > 0:\n MISSING_VALUES[key] += chr(96 + i)\n int_value = struct.unpack(\"<i\", struct.pack(\"<f\", key))[0] + increment\n float32_base = struct.pack(\"<i\", int_value)\n\n float64_base = b\"\\x00\\x00\\x00\\x00\\x00\\x00\\xe0\\x7f\"\n increment = struct.unpack(\"q\", b\"\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\")[0]\n for i in range(27):\n key = struct.unpack(\"<d\", float64_base)[0]\n MISSING_VALUES[key] = \".\"\n if i > 0:\n MISSING_VALUES[key] += chr(96 + i)\n int_value = struct.unpack(\"q\", struct.pack(\"<d\", key))[0] + increment\n float64_base = struct.pack(\"q\", int_value)\n\n BASE_MISSING_VALUES = {\n \"int8\": 101,\n \"int16\": 32741,\n \"int32\": 2147483621,\n \"float32\": struct.unpack(\"<f\", float32_base)[0],\n \"float64\": struct.unpack(\"<d\", float64_base)[0],\n }\n\n def __init__(self, value: Union[int, float]):\n self._value = value\n # Conversion to int to avoid hash issues on 32 bit platforms #8968\n value = int(value) if value < 2147483648 else float(value)\n self._str = self.MISSING_VALUES[value]\n\n @property\n def string(self) -> str:\n \"\"\"\n The Stata representation of the missing value: '.', '.a'..'.z'\n\n Returns\n -------\n str\n The representation of the missing value.\n \"\"\"\n return self._str\n\n @property\n def value(self) -> Union[int, float]:\n \"\"\"\n The binary representation of the missing value.\n\n Returns\n -------\n {int, float}\n The binary representation of the missing value.\n \"\"\"\n return self._value\n\n def __str__(self) -> str:\n return self.string\n\n def __repr__(self) -> str:\n return f\"{type(self)}({self})\"\n\n def __eq__(self, other: Any) -> bool:\n return (\n isinstance(other, type(self))\n and self.string == other.string\n and self.value == other.value\n )\n\n @classmethod\n def get_base_missing_value(cls, dtype: np.dtype) -> Union[int, float]:\n if dtype == np.int8:\n value = cls.BASE_MISSING_VALUES[\"int8\"]\n elif dtype == np.int16:\n value = cls.BASE_MISSING_VALUES[\"int16\"]\n elif dtype == np.int32:\n value = cls.BASE_MISSING_VALUES[\"int32\"]\n elif dtype == np.float32:\n value = cls.BASE_MISSING_VALUES[\"float32\"]\n elif dtype == np.float64:\n value = cls.BASE_MISSING_VALUES[\"float64\"]\n else:\n raise ValueError(\"Unsupported dtype\")\n return value\n\n\nclass StataParser:\n def __init__(self):\n\n # type code.\n # --------------------\n # str1 1 = 0x01\n # str2 2 = 0x02\n # ...\n # str244 244 = 0xf4\n # byte 251 = 0xfb (sic)\n # int 252 = 0xfc\n # long 253 = 0xfd\n # float 254 = 0xfe\n # double 255 = 0xff\n # --------------------\n # NOTE: the byte type seems to be reserved for categorical variables\n # with a label, but the underlying variable is -127 to 100\n # we're going to drop the label and cast to int\n self.DTYPE_MAP = dict(\n list(zip(range(1, 245), [\"a\" + str(i) for i in range(1, 245)]))\n + [\n (251, np.int8),\n (252, np.int16),\n (253, np.int32),\n (254, np.float32),\n (255, np.float64),\n ]\n )\n self.DTYPE_MAP_XML = dict(\n [\n (32768, np.uint8), # Keys to GSO\n (65526, np.float64),\n (65527, np.float32),\n (65528, np.int32),\n (65529, np.int16),\n (65530, np.int8),\n ]\n )\n self.TYPE_MAP = list(range(251)) + list(\"bhlfd\")\n self.TYPE_MAP_XML = dict(\n [\n # Not really a Q, unclear how to handle byteswap\n (32768, \"Q\"),\n (65526, \"d\"),\n (65527, \"f\"),\n (65528, \"l\"),\n (65529, \"h\"),\n (65530, \"b\"),\n ]\n )\n # NOTE: technically, some of these are wrong. there are more numbers\n # that can be represented. it's the 27 ABOVE and BELOW the max listed\n # numeric data type in [U] 12.2.2 of the 11.2 manual\n float32_min = b\"\\xff\\xff\\xff\\xfe\"\n float32_max = b\"\\xff\\xff\\xff\\x7e\"\n float64_min = b\"\\xff\\xff\\xff\\xff\\xff\\xff\\xef\\xff\"\n float64_max = b\"\\xff\\xff\\xff\\xff\\xff\\xff\\xdf\\x7f\"\n self.VALID_RANGE = {\n \"b\": (-127, 100),\n \"h\": (-32767, 32740),\n \"l\": (-2147483647, 2147483620),\n \"f\": (\n np.float32(struct.unpack(\"<f\", float32_min)[0]),\n np.float32(struct.unpack(\"<f\", float32_max)[0]),\n ),\n \"d\": (\n np.float64(struct.unpack(\"<d\", float64_min)[0]),\n np.float64(struct.unpack(\"<d\", float64_max)[0]),\n ),\n }\n\n self.OLD_TYPE_MAPPING = {\n 98: 251, # byte\n 105: 252, # int\n 108: 253, # long\n 102: 254 # float\n # don't know old code for double\n }\n\n # These missing values are the generic '.' in Stata, and are used\n # to replace nans\n self.MISSING_VALUES = {\n \"b\": 101,\n \"h\": 32741,\n \"l\": 2147483621,\n \"f\": np.float32(struct.unpack(\"<f\", b\"\\x00\\x00\\x00\\x7f\")[0]),\n \"d\": np.float64(\n struct.unpack(\"<d\", b\"\\x00\\x00\\x00\\x00\\x00\\x00\\xe0\\x7f\")[0]\n ),\n }\n self.NUMPY_TYPE_MAP = {\n \"b\": \"i1\",\n \"h\": \"i2\",\n \"l\": \"i4\",\n \"f\": \"f4\",\n \"d\": \"f8\",\n \"Q\": \"u8\",\n }\n\n # Reserved words cannot be used as variable names\n self.RESERVED_WORDS = (\n \"aggregate\",\n \"array\",\n \"boolean\",\n \"break\",\n \"byte\",\n \"case\",\n \"catch\",\n \"class\",\n \"colvector\",\n \"complex\",\n \"const\",\n \"continue\",\n \"default\",\n \"delegate\",\n \"delete\",\n \"do\",\n \"double\",\n \"else\",\n \"eltypedef\",\n \"end\",\n \"enum\",\n \"explicit\",\n \"export\",\n \"external\",\n \"float\",\n \"for\",\n \"friend\",\n \"function\",\n \"global\",\n \"goto\",\n \"if\",\n \"inline\",\n \"int\",\n \"local\",\n \"long\",\n \"NULL\",\n \"pragma\",\n \"protected\",\n \"quad\",\n \"rowvector\",\n \"short\",\n \"typedef\",\n \"typename\",\n \"virtual\",\n \"_all\",\n \"_N\",\n \"_skip\",\n \"_b\",\n \"_pi\",\n \"str#\",\n \"in\",\n \"_pred\",\n \"strL\",\n \"_coef\",\n \"_rc\",\n \"using\",\n \"_cons\",\n \"_se\",\n \"with\",\n \"_n\",\n )\n\n\nclass StataReader(StataParser, abc.Iterator):\n __doc__ = _stata_reader_doc\n\n def __init__(\n self,\n path_or_buf: FilePathOrBuffer,\n convert_dates: bool = True,\n convert_categoricals: bool = True,\n index_col: Optional[str] = None,\n convert_missing: bool = False,\n preserve_dtypes: bool = True,\n columns: Optional[Sequence[str]] = None,\n order_categoricals: bool = True,\n chunksize: Optional[int] = None,\n ):\n super().__init__()\n self.col_sizes: List[int] = []\n\n # Arguments to the reader (can be temporarily overridden in\n # calls to read).\n self._convert_dates = convert_dates\n self._convert_categoricals = convert_categoricals\n self._index_col = index_col\n self._convert_missing = convert_missing\n self._preserve_dtypes = preserve_dtypes\n self._columns = columns\n self._order_categoricals = order_categoricals\n self._encoding = \"\"\n self._chunksize = chunksize\n\n # State variables for the file\n self._has_string_data = False\n self._missing_values = False\n self._can_read_value_labels = False\n self._column_selector_set = False\n self._value_labels_read = False\n self._data_read = False\n self._dtype = None\n self._lines_read = 0\n\n self._native_byteorder = _set_endianness(sys.byteorder)\n path_or_buf = stringify_path(path_or_buf)\n if isinstance(path_or_buf, str):\n path_or_buf, encoding, _, should_close = get_filepath_or_buffer(path_or_buf)\n\n if isinstance(path_or_buf, (str, bytes)):\n self.path_or_buf = open(path_or_buf, \"rb\")\n elif isinstance(path_or_buf, IOBase):\n # Copy to BytesIO, and ensure no encoding\n contents = path_or_buf.read()\n self.path_or_buf = BytesIO(contents)\n\n self._read_header()\n self._setup_dtype()\n\n def __enter__(self) -> \"StataReader\":\n \"\"\" enter context manager \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n \"\"\" exit context manager \"\"\"\n self.close()\n\n def close(self) -> None:\n \"\"\" close the handle if its open \"\"\"\n try:\n self.path_or_buf.close()\n except IOError:\n pass\n\n def _set_encoding(self) -> None:\n \"\"\"\n Set string encoding which depends on file version\n \"\"\"\n if self.format_version < 118:\n self._encoding = \"latin-1\"\n else:\n self._encoding = \"utf-8\"\n\n def _read_header(self) -> None:\n first_char = self.path_or_buf.read(1)\n if struct.unpack(\"c\", first_char)[0] == b\"<\":\n self._read_new_header()\n else:\n self._read_old_header(first_char)\n\n self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0\n\n # calculate size of a data record\n self.col_sizes = [self._calcsize(typ) for typ in self.typlist]\n\n def _read_new_header(self) -> None:\n # The first part of the header is common to 117 - 119.\n self.path_or_buf.read(27) # stata_dta><header><release>\n self.format_version = int(self.path_or_buf.read(3))\n if self.format_version not in [117, 118, 119]:\n raise ValueError(_version_error.format(version=self.format_version))\n self._set_encoding()\n self.path_or_buf.read(21) # </release><byteorder>\n self.byteorder = self.path_or_buf.read(3) == b\"MSF\" and \">\" or \"<\"\n self.path_or_buf.read(15) # </byteorder><K>\n nvar_type = \"H\" if self.format_version <= 118 else \"I\"\n nvar_size = 2 if self.format_version <= 118 else 4\n self.nvar = struct.unpack(\n self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)\n )[0]\n self.path_or_buf.read(7) # </K><N>\n\n self.nobs = self._get_nobs()\n self.path_or_buf.read(11) # </N><label>\n self._data_label = self._get_data_label()\n self.path_or_buf.read(19) # </label><timestamp>\n self.time_stamp = self._get_time_stamp()\n self.path_or_buf.read(26) # </timestamp></header><map>\n self.path_or_buf.read(8) # 0x0000000000000000\n self.path_or_buf.read(8) # position of <map>\n\n self._seek_vartypes = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 16\n )\n self._seek_varnames = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 10\n )\n self._seek_sortlist = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 10\n )\n self._seek_formats = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 9\n )\n self._seek_value_label_names = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 19\n )\n\n # Requires version-specific treatment\n self._seek_variable_labels = self._get_seek_variable_labels()\n\n self.path_or_buf.read(8) # <characteristics>\n self.data_location = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 6\n )\n self.seek_strls = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 7\n )\n self.seek_value_labels = (\n struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 14\n )\n\n self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)\n\n self.path_or_buf.seek(self._seek_varnames)\n self.varlist = self._get_varlist()\n\n self.path_or_buf.seek(self._seek_sortlist)\n self.srtlist = struct.unpack(\n self.byteorder + (\"h\" * (self.nvar + 1)),\n self.path_or_buf.read(2 * (self.nvar + 1)),\n )[:-1]\n\n self.path_or_buf.seek(self._seek_formats)\n self.fmtlist = self._get_fmtlist()\n\n self.path_or_buf.seek(self._seek_value_label_names)\n self.lbllist = self._get_lbllist()\n\n self.path_or_buf.seek(self._seek_variable_labels)\n self._variable_labels = self._get_variable_labels()\n\n # Get data type information, works for versions 117-119.\n def _get_dtypes(\n self, seek_vartypes: int\n ) -> Tuple[List[Union[int, str]], List[Union[int, np.dtype]]]:\n\n self.path_or_buf.seek(seek_vartypes)\n raw_typlist = [\n struct.unpack(self.byteorder + \"H\", self.path_or_buf.read(2))[0]\n for _ in range(self.nvar)\n ]\n\n def f(typ: int) -> Union[int, str]:\n if typ <= 2045:\n return typ\n try:\n return self.TYPE_MAP_XML[typ]\n except KeyError as err:\n raise ValueError(f\"cannot convert stata types [{typ}]\") from err\n\n typlist = [f(x) for x in raw_typlist]\n\n def g(typ: int) -> Union[str, np.dtype]:\n if typ <= 2045:\n return str(typ)\n try:\n return self.DTYPE_MAP_XML[typ]\n except KeyError as err:\n raise ValueError(f\"cannot convert stata dtype [{typ}]\") from err\n\n dtyplist = [g(x) for x in raw_typlist]\n\n return typlist, dtyplist\n\n def _get_varlist(self) -> List[str]:\n # 33 in order formats, 129 in formats 118 and 119\n b = 33 if self.format_version < 118 else 129\n return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]\n\n # Returns the format list\n def _get_fmtlist(self) -> List[str]:\n if self.format_version >= 118:\n b = 57\n elif self.format_version > 113:\n b = 49\n elif self.format_version > 104:\n b = 12\n else:\n b = 7\n\n return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]\n\n # Returns the label list\n def _get_lbllist(self) -> List[str]:\n if self.format_version >= 118:\n b = 129\n elif self.format_version > 108:\n b = 33\n else:\n b = 9\n return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]\n\n def _get_variable_labels(self) -> List[str]:\n if self.format_version >= 118:\n vlblist = [\n self._decode(self.path_or_buf.read(321)) for _ in range(self.nvar)\n ]\n elif self.format_version > 105:\n vlblist = [\n self._decode(self.path_or_buf.read(81)) for _ in range(self.nvar)\n ]\n else:\n vlblist = [\n self._decode(self.path_or_buf.read(32)) for _ in range(self.nvar)\n ]\n return vlblist\n\n def _get_nobs(self) -> int:\n if self.format_version >= 118:\n return struct.unpack(self.byteorder + \"Q\", self.path_or_buf.read(8))[0]\n else:\n return struct.unpack(self.byteorder + \"I\", self.path_or_buf.read(4))[0]\n\n def _get_data_label(self) -> str:\n if self.format_version >= 118:\n strlen = struct.unpack(self.byteorder + \"H\", self.path_or_buf.read(2))[0]\n return self._decode(self.path_or_buf.read(strlen))\n elif self.format_version == 117:\n strlen = struct.unpack(\"b\", self.path_or_buf.read(1))[0]\n return self._decode(self.path_or_buf.read(strlen))\n elif self.format_version > 105:\n return self._decode(self.path_or_buf.read(81))\n else:\n return self._decode(self.path_or_buf.read(32))\n\n def _get_time_stamp(self) -> str:\n if self.format_version >= 118:\n strlen = struct.unpack(\"b\", self.path_or_buf.read(1))[0]\n return self.path_or_buf.read(strlen).decode(\"utf-8\")\n elif self.format_version == 117:\n strlen = struct.unpack(\"b\", self.path_or_buf.read(1))[0]\n return self._decode(self.path_or_buf.read(strlen))\n elif self.format_version > 104:\n return self._decode(self.path_or_buf.read(18))\n else:\n raise ValueError()\n\n def _get_seek_variable_labels(self) -> int:\n if self.format_version == 117:\n self.path_or_buf.read(8) # <variable_labels>, throw away\n # Stata 117 data files do not follow the described format. This is\n # a work around that uses the previous label, 33 bytes for each\n # variable, 20 for the closing tag and 17 for the opening tag\n return self._seek_value_label_names + (33 * self.nvar) + 20 + 17\n elif self.format_version >= 118:\n return struct.unpack(self.byteorder + \"q\", self.path_or_buf.read(8))[0] + 17\n else:\n raise ValueError()\n\n def _read_old_header(self, first_char: bytes) -> None:\n self.format_version = struct.unpack(\"b\", first_char)[0]\n if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:\n raise ValueError(_version_error.format(version=self.format_version))\n self._set_encoding()\n self.byteorder = (\n struct.unpack(\"b\", self.path_or_buf.read(1))[0] == 0x1 and \">\" or \"<\"\n )\n self.filetype = struct.unpack(\"b\", self.path_or_buf.read(1))[0]\n self.path_or_buf.read(1) # unused\n\n self.nvar = struct.unpack(self.byteorder + \"H\", self.path_or_buf.read(2))[0]\n self.nobs = self._get_nobs()\n\n self._data_label = self._get_data_label()\n\n self.time_stamp = self._get_time_stamp()\n\n # descriptors\n if self.format_version > 108:\n typlist = [ord(self.path_or_buf.read(1)) for _ in range(self.nvar)]\n else:\n buf = self.path_or_buf.read(self.nvar)\n typlistb = np.frombuffer(buf, dtype=np.uint8)\n typlist = []\n for tp in typlistb:\n if tp in self.OLD_TYPE_MAPPING:\n typlist.append(self.OLD_TYPE_MAPPING[tp])\n else:\n typlist.append(tp - 127) # bytes\n\n try:\n self.typlist = [self.TYPE_MAP[typ] for typ in typlist]\n except ValueError as err:\n invalid_types = \",\".join(str(x) for x in typlist)\n raise ValueError(f\"cannot convert stata types [{invalid_types}]\") from err\n try:\n self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]\n except ValueError as err:\n invalid_dtypes = \",\".join(str(x) for x in typlist)\n raise ValueError(f\"cannot convert stata dtypes [{invalid_dtypes}]\") from err\n\n if self.format_version > 108:\n self.varlist = [\n self._decode(self.path_or_buf.read(33)) for _ in range(self.nvar)\n ]\n else:\n self.varlist = [\n self._decode(self.path_or_buf.read(9)) for _ in range(self.nvar)\n ]\n self.srtlist = struct.unpack(\n self.byteorder + (\"h\" * (self.nvar + 1)),\n self.path_or_buf.read(2 * (self.nvar + 1)),\n )[:-1]\n\n self.fmtlist = self._get_fmtlist()\n\n self.lbllist = self._get_lbllist()\n\n self._variable_labels = self._get_variable_labels()\n\n # ignore expansion fields (Format 105 and later)\n # When reading, read five bytes; the last four bytes now tell you\n # the size of the next read, which you discard. You then continue\n # like this until you read 5 bytes of zeros.\n\n if self.format_version > 104:\n while True:\n data_type = struct.unpack(\n self.byteorder + \"b\", self.path_or_buf.read(1)\n )[0]\n if self.format_version > 108:\n data_len = struct.unpack(\n self.byteorder + \"i\", self.path_or_buf.read(4)\n )[0]\n else:\n data_len = struct.unpack(\n self.byteorder + \"h\", self.path_or_buf.read(2)\n )[0]\n if data_type == 0:\n break\n self.path_or_buf.read(data_len)\n\n # necessary data to continue parsing\n self.data_location = self.path_or_buf.tell()\n\n def _setup_dtype(self) -> np.dtype:\n \"\"\"Map between numpy and state dtypes\"\"\"\n if self._dtype is not None:\n return self._dtype\n\n dtypes = [] # Convert struct data types to numpy data type\n for i, typ in enumerate(self.typlist):\n if typ in self.NUMPY_TYPE_MAP:\n dtypes.append((\"s\" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))\n else:\n dtypes.append((\"s\" + str(i), \"S\" + str(typ)))\n self._dtype = np.dtype(dtypes)\n\n return self._dtype\n\n def _calcsize(self, fmt: Union[int, str]) -> int:\n if isinstance(fmt, int):\n return fmt\n return struct.calcsize(self.byteorder + fmt)\n\n def _decode(self, s: bytes) -> str:\n # have bytes not strings, so must decode\n s = s.partition(b\"\\0\")[0]\n try:\n return s.decode(self._encoding)\n except UnicodeDecodeError:\n # GH 25960, fallback to handle incorrect format produced when 117\n # files are converted to 118 files in Stata\n encoding = self._encoding\n msg = f\"\"\"\nOne or more strings in the dta file could not be decoded using {encoding}, and\nso the fallback encoding of latin-1 is being used. This can happen when a file\nhas been incorrectly encoded by Stata or some other software. You should verify\nthe string values returned are correct.\"\"\"\n warnings.warn(msg, UnicodeWarning)\n return s.decode(\"latin-1\")\n\n def _read_value_labels(self) -> None:\n if self._value_labels_read:\n # Don't read twice\n return\n if self.format_version <= 108:\n # Value labels are not supported in version 108 and earlier.\n self._value_labels_read = True\n self.value_label_dict: Dict[str, Dict[Union[float, int], str]] = {}\n return\n\n if self.format_version >= 117:\n self.path_or_buf.seek(self.seek_value_labels)\n else:\n assert self._dtype is not None\n offset = self.nobs * self._dtype.itemsize\n self.path_or_buf.seek(self.data_location + offset)\n\n self._value_labels_read = True\n self.value_label_dict = {}\n\n while True:\n if self.format_version >= 117:\n if self.path_or_buf.read(5) == b\"</val\": # <lbl>\n break # end of value label table\n\n slength = self.path_or_buf.read(4)\n if not slength:\n break # end of value label table (format < 117)\n if self.format_version <= 117:\n labname = self._decode(self.path_or_buf.read(33))\n else:\n labname = self._decode(self.path_or_buf.read(129))\n self.path_or_buf.read(3) # padding\n\n n = struct.unpack(self.byteorder + \"I\", self.path_or_buf.read(4))[0]\n txtlen = struct.unpack(self.byteorder + \"I\", self.path_or_buf.read(4))[0]\n off = np.frombuffer(\n self.path_or_buf.read(4 * n), dtype=self.byteorder + \"i4\", count=n\n )\n val = np.frombuffer(\n self.path_or_buf.read(4 * n), dtype=self.byteorder + \"i4\", count=n\n )\n ii = np.argsort(off)\n off = off[ii]\n val = val[ii]\n txt = self.path_or_buf.read(txtlen)\n self.value_label_dict[labname] = dict()\n for i in range(n):\n end = off[i + 1] if i < n - 1 else txtlen\n self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])\n if self.format_version >= 117:\n self.path_or_buf.read(6) # </lbl>\n self._value_labels_read = True\n\n def _read_strls(self) -> None:\n self.path_or_buf.seek(self.seek_strls)\n # Wrap v_o in a string to allow uint64 values as keys on 32bit OS\n self.GSO = {\"0\": \"\"}\n while True:\n if self.path_or_buf.read(3) != b\"GSO\":\n break\n\n if self.format_version == 117:\n v_o = struct.unpack(self.byteorder + \"Q\", self.path_or_buf.read(8))[0]\n else:\n buf = self.path_or_buf.read(12)\n # Only tested on little endian file on little endian machine.\n v_size = 2 if self.format_version == 118 else 3\n if self.byteorder == \"<\":\n buf = buf[0:v_size] + buf[4 : (12 - v_size)]\n else:\n # This path may not be correct, impossible to test\n buf = buf[0:v_size] + buf[(4 + v_size) :]\n v_o = struct.unpack(\"Q\", buf)[0]\n typ = struct.unpack(\"B\", self.path_or_buf.read(1))[0]\n length = struct.unpack(self.byteorder + \"I\", self.path_or_buf.read(4))[0]\n va = self.path_or_buf.read(length)\n if typ == 130:\n decoded_va = va[0:-1].decode(self._encoding)\n else:\n # Stata says typ 129 can be binary, so use str\n decoded_va = str(va)\n # Wrap v_o in a string to allow uint64 values as keys on 32bit OS\n self.GSO[str(v_o)] = decoded_va\n\n def __next__(self) -> DataFrame:\n return self.read(nrows=self._chunksize or 1)\n\n def get_chunk(self, size: Optional[int] = None) -> DataFrame:\n \"\"\"\n Reads lines from Stata file and returns as dataframe\n\n Parameters\n ----------\n size : int, defaults to None\n Number of lines to read. If None, reads whole file.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if size is None:\n size = self._chunksize\n return self.read(nrows=size)\n\n @Appender(_read_method_doc)\n def read(\n self,\n nrows: Optional[int] = None,\n convert_dates: Optional[bool] = None,\n convert_categoricals: Optional[bool] = None,\n index_col: Optional[str] = None,\n convert_missing: Optional[bool] = None,\n preserve_dtypes: Optional[bool] = None,\n columns: Optional[Sequence[str]] = None,\n order_categoricals: Optional[bool] = None,\n ) -> DataFrame:\n # Handle empty file or chunk. If reading incrementally raise\n # StopIteration. If reading the whole thing return an empty\n # data frame.\n if (self.nobs == 0) and (nrows is None):\n self._can_read_value_labels = True\n self._data_read = True\n self.close()\n return DataFrame(columns=self.varlist)\n\n # Handle options\n if convert_dates is None:\n convert_dates = self._convert_dates\n if convert_categoricals is None:\n convert_categoricals = self._convert_categoricals\n if convert_missing is None:\n convert_missing = self._convert_missing\n if preserve_dtypes is None:\n preserve_dtypes = self._preserve_dtypes\n if columns is None:\n columns = self._columns\n if order_categoricals is None:\n order_categoricals = self._order_categoricals\n if index_col is None:\n index_col = self._index_col\n\n if nrows is None:\n nrows = self.nobs\n\n if (self.format_version >= 117) and (not self._value_labels_read):\n self._can_read_value_labels = True\n self._read_strls()\n\n # Read data\n assert self._dtype is not None\n dtype = self._dtype\n max_read_len = (self.nobs - self._lines_read) * dtype.itemsize\n read_len = nrows * dtype.itemsize\n read_len = min(read_len, max_read_len)\n if read_len <= 0:\n # Iterator has finished, should never be here unless\n # we are reading the file incrementally\n if convert_categoricals:\n self._read_value_labels()\n self.close()\n raise StopIteration\n offset = self._lines_read * dtype.itemsize\n self.path_or_buf.seek(self.data_location + offset)\n read_lines = min(nrows, self.nobs - self._lines_read)\n data = np.frombuffer(\n self.path_or_buf.read(read_len), dtype=dtype, count=read_lines\n )\n\n self._lines_read += read_lines\n if self._lines_read == self.nobs:\n self._can_read_value_labels = True\n self._data_read = True\n # if necessary, swap the byte order to native here\n if self.byteorder != self._native_byteorder:\n data = data.byteswap().newbyteorder()\n\n if convert_categoricals:\n self._read_value_labels()\n\n if len(data) == 0:\n data = DataFrame(columns=self.varlist)\n else:\n data = DataFrame.from_records(data)\n data.columns = self.varlist\n\n # If index is not specified, use actual row number rather than\n # restarting at 0 for each chunk.\n if index_col is None:\n ix = np.arange(self._lines_read - read_lines, self._lines_read)\n data = data.set_index(ix)\n\n if columns is not None:\n try:\n data = self._do_select_columns(data, columns)\n except ValueError:\n self.close()\n raise\n\n # Decode strings\n for col, typ in zip(data, self.typlist):\n if type(typ) is int:\n data[col] = data[col].apply(self._decode, convert_dtype=True)\n\n data = self._insert_strls(data)\n\n cols_ = np.where(self.dtyplist)[0]\n\n # Convert columns (if needed) to match input type\n ix = data.index\n requires_type_conversion = False\n data_formatted = []\n for i in cols_:\n if self.dtyplist[i] is not None:\n col = data.columns[i]\n dtype = data[col].dtype\n if dtype != np.dtype(object) and dtype != self.dtyplist[i]:\n requires_type_conversion = True\n data_formatted.append(\n (col, Series(data[col], ix, self.dtyplist[i]))\n )\n else:\n data_formatted.append((col, data[col]))\n if requires_type_conversion:\n data = DataFrame.from_dict(dict(data_formatted))\n del data_formatted\n\n data = self._do_convert_missing(data, convert_missing)\n\n if convert_dates:\n\n def any_startswith(x: str) -> bool:\n return any(x.startswith(fmt) for fmt in _date_formats)\n\n cols = np.where([any_startswith(x) for x in self.fmtlist])[0]\n for i in cols:\n col = data.columns[i]\n try:\n data[col] = _stata_elapsed_date_to_datetime_vec(\n data[col], self.fmtlist[i]\n )\n except ValueError:\n self.close()\n raise\n\n if convert_categoricals and self.format_version > 108:\n data = self._do_convert_categoricals(\n data, self.value_label_dict, self.lbllist, order_categoricals\n )\n\n if not preserve_dtypes:\n retyped_data = []\n convert = False\n for col in data:\n dtype = data[col].dtype\n if dtype in (np.float16, np.float32):\n dtype = np.float64\n convert = True\n elif dtype in (np.int8, np.int16, np.int32):\n dtype = np.int64\n convert = True\n retyped_data.append((col, data[col].astype(dtype)))\n if convert:\n data = DataFrame.from_dict(dict(retyped_data))\n\n if index_col is not None:\n data = data.set_index(data.pop(index_col))\n\n return data\n\n def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame:\n # Check for missing values, and replace if found\n replacements = {}\n for i, colname in enumerate(data):\n fmt = self.typlist[i]\n if fmt not in self.VALID_RANGE:\n continue\n\n nmin, nmax = self.VALID_RANGE[fmt]\n series = data[colname]\n missing = np.logical_or(series < nmin, series > nmax)\n\n if not missing.any():\n continue\n\n if convert_missing: # Replacement follows Stata notation\n missing_loc = np.nonzero(missing._ndarray_values)[0]\n umissing, umissing_loc = np.unique(series[missing], return_inverse=True)\n replacement = Series(series, dtype=np.object)\n for j, um in enumerate(umissing):\n missing_value = StataMissingValue(um)\n\n loc = missing_loc[umissing_loc == j]\n replacement.iloc[loc] = missing_value\n else: # All replacements are identical\n dtype = series.dtype\n if dtype not in (np.float32, np.float64):\n dtype = np.float64\n replacement = Series(series, dtype=dtype)\n replacement[missing] = np.nan\n replacements[colname] = replacement\n if replacements:\n columns = data.columns\n replacement_df = DataFrame(replacements)\n replaced = concat([data.drop(replacement_df.columns, 1), replacement_df], 1)\n data = replaced[columns]\n return data\n\n def _insert_strls(self, data: DataFrame) -> DataFrame:\n if not hasattr(self, \"GSO\") or len(self.GSO) == 0:\n return data\n for i, typ in enumerate(self.typlist):\n if typ != \"Q\":\n continue\n # Wrap v_o in a string to allow uint64 values as keys on 32bit OS\n data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]\n return data\n\n def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame:\n\n if not self._column_selector_set:\n column_set = set(columns)\n if len(column_set) != len(columns):\n raise ValueError(\"columns contains duplicate entries\")\n unmatched = column_set.difference(data.columns)\n if unmatched:\n joined = \", \".join(list(unmatched))\n raise ValueError(\n \"The following columns were not \"\n f\"found in the Stata data set: {joined}\"\n )\n # Copy information for retained columns for later processing\n dtyplist = []\n typlist = []\n fmtlist = []\n lbllist = []\n for col in columns:\n i = data.columns.get_loc(col)\n dtyplist.append(self.dtyplist[i])\n typlist.append(self.typlist[i])\n fmtlist.append(self.fmtlist[i])\n lbllist.append(self.lbllist[i])\n\n self.dtyplist = dtyplist\n self.typlist = typlist\n self.fmtlist = fmtlist\n self.lbllist = lbllist\n self._column_selector_set = True\n\n return data[columns]\n\n @staticmethod\n def _do_convert_categoricals(\n data: DataFrame,\n value_label_dict: Dict[str, Dict[Union[float, int], str]],\n lbllist: Sequence[str],\n order_categoricals: bool,\n ) -> DataFrame:\n \"\"\"\n Converts categorical columns to Categorical type.\n \"\"\"\n value_labels = list(value_label_dict.keys())\n cat_converted_data = []\n for col, label in zip(data, lbllist):\n if label in value_labels:\n # Explicit call with ordered=True\n cat_data = Categorical(data[col], ordered=order_categoricals)\n categories = []\n for category in cat_data.categories:\n if category in value_label_dict[label]:\n categories.append(value_label_dict[label][category])\n else:\n categories.append(category) # Partially labeled\n try:\n cat_data.categories = categories\n except ValueError as err:\n vc = Series(categories).value_counts()\n repeated_cats = list(vc.index[vc > 1])\n repeats = \"-\" * 80 + \"\\n\" + \"\\n\".join(repeated_cats)\n # GH 25772\n msg = f\"\"\"\nValue labels for column {col} are not unique. These cannot be converted to\npandas categoricals.\n\nEither read the file with `convert_categoricals` set to False or use the\nlow level interface in `StataReader` to separately read the values and the\nvalue_labels.\n\nThe repeated labels are:\n{repeats}\n\"\"\"\n raise ValueError(msg) from err\n # TODO: is the next line needed above in the data(...) method?\n cat_series = Series(cat_data, index=data.index)\n cat_converted_data.append((col, cat_series))\n else:\n cat_converted_data.append((col, data[col]))\n data = DataFrame.from_dict(dict(cat_converted_data))\n return data\n\n @property\n def data_label(self) -> str:\n \"\"\"\n Return data label of Stata file.\n \"\"\"\n return self._data_label\n\n def variable_labels(self) -> Dict[str, str]:\n \"\"\"\n Return variable labels as a dict, associating each variable name\n with corresponding label.\n\n Returns\n -------\n dict\n \"\"\"\n return dict(zip(self.varlist, self._variable_labels))\n\n def value_labels(self) -> Dict[str, Dict[Union[float, int], str]]:\n \"\"\"\n Return a dict, associating each variable name a dict, associating\n each value its corresponding label.\n\n Returns\n -------\n dict\n \"\"\"\n if not self._value_labels_read:\n self._read_value_labels()\n\n return self.value_label_dict\n\n\n@Appender(_read_stata_doc)\ndef read_stata(\n filepath_or_buffer: FilePathOrBuffer,\n convert_dates: bool = True,\n convert_categoricals: bool = True,\n index_col: Optional[str] = None,\n convert_missing: bool = False,\n preserve_dtypes: bool = True,\n columns: Optional[Sequence[str]] = None,\n order_categoricals: bool = True,\n chunksize: Optional[int] = None,\n iterator: bool = False,\n) -> Union[DataFrame, StataReader]:\n\n reader = StataReader(\n filepath_or_buffer,\n convert_dates=convert_dates,\n convert_categoricals=convert_categoricals,\n index_col=index_col,\n convert_missing=convert_missing,\n preserve_dtypes=preserve_dtypes,\n columns=columns,\n order_categoricals=order_categoricals,\n chunksize=chunksize,\n )\n\n if iterator or chunksize:\n return reader\n\n try:\n data = reader.read()\n finally:\n reader.close()\n return data\n\n\ndef _open_file_binary_write(fname: FilePathOrBuffer) -> Tuple[BinaryIO, bool]:\n \"\"\"\n Open a binary file or no-op if file-like.\n\n Parameters\n ----------\n fname : string path, path object or buffer\n\n Returns\n -------\n file : file-like object\n File object supporting write\n own : bool\n True if the file was created, otherwise False\n \"\"\"\n if hasattr(fname, \"write\"):\n # See https://github.com/python/mypy/issues/1424 for hasattr challenges\n return fname, False # type: ignore\n elif isinstance(fname, (str, Path)):\n return open(fname, \"wb\"), True\n else:\n raise TypeError(\"fname must be a binary file, buffer or path-like.\")\n\n\ndef _set_endianness(endianness: str) -> str:\n if endianness.lower() in [\"<\", \"little\"]:\n return \"<\"\n elif endianness.lower() in [\">\", \"big\"]:\n return \">\"\n else: # pragma : no cover\n raise ValueError(f\"Endianness {endianness} not understood\")\n\n\ndef _pad_bytes(name: AnyStr, length: int) -> AnyStr:\n \"\"\"\n Take a char string and pads it with null bytes until it's length chars.\n \"\"\"\n if isinstance(name, bytes):\n return name + b\"\\x00\" * (length - len(name))\n return name + \"\\x00\" * (length - len(name))\n\n\ndef _convert_datetime_to_stata_type(fmt: str) -> np.dtype:\n \"\"\"\n Convert from one of the stata date formats to a type in TYPE_MAP.\n \"\"\"\n if fmt in [\n \"tc\",\n \"%tc\",\n \"td\",\n \"%td\",\n \"tw\",\n \"%tw\",\n \"tm\",\n \"%tm\",\n \"tq\",\n \"%tq\",\n \"th\",\n \"%th\",\n \"ty\",\n \"%ty\",\n ]:\n return np.float64 # Stata expects doubles for SIFs\n else:\n raise NotImplementedError(f\"Format {fmt} not implemented\")\n\n\ndef _maybe_convert_to_int_keys(convert_dates: Dict, varlist: List[Label]) -> Dict:\n new_dict = {}\n for key in convert_dates:\n if not convert_dates[key].startswith(\"%\"): # make sure proper fmts\n convert_dates[key] = \"%\" + convert_dates[key]\n if key in varlist:\n new_dict.update({varlist.index(key): convert_dates[key]})\n else:\n if not isinstance(key, int):\n raise ValueError(\"convert_dates key must be a column or an integer\")\n new_dict.update({key: convert_dates[key]})\n return new_dict\n\n\ndef _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:\n \"\"\"\n Convert dtype types to stata types. Returns the byte of the given ordinal.\n See TYPE_MAP and comments for an explanation. This is also explained in\n the dta spec.\n 1 - 244 are strings of this length\n Pandas Stata\n 251 - for int8 byte\n 252 - for int16 int\n 253 - for int32 long\n 254 - for float32 float\n 255 - for double double\n\n If there are dates to convert, then dtype will already have the correct\n type inserted.\n \"\"\"\n # TODO: expand to handle datetime to integer conversion\n if dtype.type == np.object_: # try to coerce it to the biggest string\n # not memory efficient, what else could we\n # do?\n itemsize = max_len_string_array(ensure_object(column.values))\n return max(itemsize, 1)\n elif dtype == np.float64:\n return 255\n elif dtype == np.float32:\n return 254\n elif dtype == np.int32:\n return 253\n elif dtype == np.int16:\n return 252\n elif dtype == np.int8:\n return 251\n else: # pragma : no cover\n raise NotImplementedError(f\"Data type {dtype} not supported.\")\n\n\ndef _dtype_to_default_stata_fmt(\n dtype, column: Series, dta_version: int = 114, force_strl: bool = False\n) -> str:\n \"\"\"\n Map numpy dtype to stata's default format for this type. Not terribly\n important since users can change this in Stata. Semantics are\n\n object -> \"%DDs\" where DD is the length of the string. If not a string,\n raise ValueError\n float64 -> \"%10.0g\"\n float32 -> \"%9.0g\"\n int64 -> \"%9.0g\"\n int32 -> \"%12.0g\"\n int16 -> \"%8.0g\"\n int8 -> \"%8.0g\"\n strl -> \"%9s\"\n \"\"\"\n # TODO: Refactor to combine type with format\n # TODO: expand this to handle a default datetime format?\n if dta_version < 117:\n max_str_len = 244\n else:\n max_str_len = 2045\n if force_strl:\n return \"%9s\"\n if dtype.type == np.object_:\n itemsize = max_len_string_array(ensure_object(column.values))\n if itemsize > max_str_len:\n if dta_version >= 117:\n return \"%9s\"\n else:\n raise ValueError(excessive_string_length_error.format(column.name))\n return \"%\" + str(max(itemsize, 1)) + \"s\"\n elif dtype == np.float64:\n return \"%10.0g\"\n elif dtype == np.float32:\n return \"%9.0g\"\n elif dtype == np.int32:\n return \"%12.0g\"\n elif dtype == np.int8 or dtype == np.int16:\n return \"%8.0g\"\n else: # pragma : no cover\n raise NotImplementedError(f\"Data type {dtype} not supported.\")\n\n\nclass StataWriter(StataParser):\n \"\"\"\n A class for writing Stata binary dta files\n\n Parameters\n ----------\n fname : path (string), buffer or path object\n string, path object (pathlib.Path or py._path.local.LocalPath) or\n object implementing a binary write() functions. If using a buffer\n then the buffer will not be automatically closed after the file\n is written.\n\n .. versionadded:: 0.23.0 support for pathlib, py.path.\n\n data : DataFrame\n Input to save\n convert_dates : dict\n Dictionary mapping columns containing datetime types to stata internal\n format to use when writing the dates. Options are 'tc', 'td', 'tm',\n 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.\n Datetime columns that do not have a conversion type specified will be\n converted to 'tc'. Raises NotImplementedError if a datetime column has\n timezone information\n write_index : bool\n Write the index to Stata dataset.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`\n time_stamp : datetime\n A datetime to use as file creation date. Default is the current time\n data_label : str\n A label for the data set. Must be 80 characters or smaller.\n variable_labels : dict\n Dictionary containing columns as keys and variable labels as values.\n Each label must be 80 characters or smaller.\n\n Returns\n -------\n writer : StataWriter instance\n The StataWriter instance has a write_file method, which will\n write the file to the given `fname`.\n\n Raises\n ------\n NotImplementedError\n * If datetimes contain timezone information\n ValueError\n * Columns listed in convert_dates are neither datetime64[ns]\n or datetime.datetime\n * Column dtype is not representable in Stata\n * Column listed in convert_dates is not in DataFrame\n * Categorical label contains more than 32,000 characters\n\n Examples\n --------\n >>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])\n >>> writer = StataWriter('./data_file.dta', data)\n >>> writer.write_file()\n\n Or with dates\n >>> from datetime import datetime\n >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])\n >>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})\n >>> writer.write_file()\n \"\"\"\n\n _max_string_length = 244\n _encoding = \"latin-1\"\n\n def __init__(\n self,\n fname: FilePathOrBuffer,\n data: DataFrame,\n convert_dates: Optional[Dict[Label, str]] = None,\n write_index: bool = True,\n byteorder: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n data_label: Optional[str] = None,\n variable_labels: Optional[Dict[Label, str]] = None,\n ):\n super().__init__()\n self._convert_dates = {} if convert_dates is None else convert_dates\n self._write_index = write_index\n self._time_stamp = time_stamp\n self._data_label = data_label\n self._variable_labels = variable_labels\n self._own_file = True\n # attach nobs, nvars, data, varlist, typlist\n self._prepare_pandas(data)\n\n if byteorder is None:\n byteorder = sys.byteorder\n self._byteorder = _set_endianness(byteorder)\n self._fname = stringify_path(fname)\n self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}\n self._converted_names: Dict[Label, str] = {}\n self._file: Optional[BinaryIO] = None\n\n def _write(self, to_write: str) -> None:\n \"\"\"\n Helper to call encode before writing to file for Python 3 compat.\n \"\"\"\n assert self._file is not None\n self._file.write(to_write.encode(self._encoding))\n\n def _write_bytes(self, value: bytes) -> None:\n \"\"\"\n Helper to assert file is open before writing.\n \"\"\"\n assert self._file is not None\n self._file.write(value)\n\n def _prepare_categoricals(self, data: DataFrame) -> DataFrame:\n \"\"\"\n Check for categorical columns, retain categorical information for\n Stata file and convert categorical data to int\n \"\"\"\n is_cat = [is_categorical_dtype(data[col]) for col in data]\n self._is_col_cat = is_cat\n self._value_labels: List[StataValueLabel] = []\n if not any(is_cat):\n return data\n\n get_base_missing_value = StataMissingValue.get_base_missing_value\n data_formatted = []\n for col, col_is_cat in zip(data, is_cat):\n if col_is_cat:\n svl = StataValueLabel(data[col], encoding=self._encoding)\n self._value_labels.append(svl)\n dtype = data[col].cat.codes.dtype\n if dtype == np.int64:\n raise ValueError(\n \"It is not possible to export \"\n \"int64-based categorical data to Stata.\"\n )\n values = data[col].cat.codes.values.copy()\n\n # Upcast if needed so that correct missing values can be set\n if values.max() >= get_base_missing_value(dtype):\n if dtype == np.int8:\n dtype = np.int16\n elif dtype == np.int16:\n dtype = np.int32\n else:\n dtype = np.float64\n values = np.array(values, dtype=dtype)\n\n # Replace missing values with Stata missing value for type\n values[values == -1] = get_base_missing_value(dtype)\n data_formatted.append((col, values))\n else:\n data_formatted.append((col, data[col]))\n return DataFrame.from_dict(dict(data_formatted))\n\n def _replace_nans(self, data: DataFrame) -> DataFrame:\n # return data\n \"\"\"\n Checks floating point data columns for nans, and replaces these with\n the generic Stata for missing value (.)\n \"\"\"\n for c in data:\n dtype = data[c].dtype\n if dtype in (np.float32, np.float64):\n if dtype == np.float32:\n replacement = self.MISSING_VALUES[\"f\"]\n else:\n replacement = self.MISSING_VALUES[\"d\"]\n data[c] = data[c].fillna(replacement)\n\n return data\n\n def _update_strl_names(self) -> None:\n \"\"\"No-op, forward compatibility\"\"\"\n pass\n\n def _validate_variable_name(self, name: str) -> str:\n \"\"\"\n Validate variable names for Stata export.\n\n Parameters\n ----------\n name : str\n Variable name\n\n Returns\n -------\n str\n The validated name with invalid characters replaced with\n underscores.\n\n Notes\n -----\n Stata 114 and 117 support ascii characters in a-z, A-Z, 0-9\n and _.\n \"\"\"\n for c in name:\n if (\n (c < \"A\" or c > \"Z\")\n and (c < \"a\" or c > \"z\")\n and (c < \"0\" or c > \"9\")\n and c != \"_\"\n ):\n name = name.replace(c, \"_\")\n return name\n\n def _check_column_names(self, data: DataFrame) -> DataFrame:\n \"\"\"\n Checks column names to ensure that they are valid Stata column names.\n This includes checks for:\n * Non-string names\n * Stata keywords\n * Variables that start with numbers\n * Variables with names that are too long\n\n When an illegal variable name is detected, it is converted, and if\n dates are exported, the variable name is propagated to the date\n conversion dictionary\n \"\"\"\n converted_names: Dict[Label, str] = {}\n columns: List[Label] = list(data.columns)\n original_columns = columns[:]\n\n duplicate_var_id = 0\n for j, name in enumerate(columns):\n orig_name = name\n if not isinstance(name, str):\n name = str(name)\n\n name = self._validate_variable_name(name)\n\n # Variable name must not be a reserved word\n if name in self.RESERVED_WORDS:\n name = \"_\" + name\n\n # Variable name may not start with a number\n if \"0\" <= name[0] <= \"9\":\n name = \"_\" + name\n\n name = name[: min(len(name), 32)]\n\n if not name == orig_name:\n # check for duplicates\n while columns.count(name) > 0:\n # prepend ascending number to avoid duplicates\n name = \"_\" + str(duplicate_var_id) + name\n name = name[: min(len(name), 32)]\n duplicate_var_id += 1\n converted_names[orig_name] = name\n\n columns[j] = name\n\n data.columns = Index(columns)\n\n # Check date conversion, and fix key if needed\n if self._convert_dates:\n for c, o in zip(columns, original_columns):\n if c != o:\n self._convert_dates[c] = self._convert_dates[o]\n del self._convert_dates[o]\n\n if converted_names:\n conversion_warning = []\n for orig_name, name in converted_names.items():\n msg = f\"{orig_name} -> {name}\"\n conversion_warning.append(msg)\n\n ws = invalid_name_doc.format(\"\\n \".join(conversion_warning))\n warnings.warn(ws, InvalidColumnName)\n\n self._converted_names = converted_names\n self._update_strl_names()\n\n return data\n\n def _set_formats_and_types(self, dtypes: Series) -> None:\n self.fmtlist: List[str] = []\n self.typlist: List[int] = []\n for col, dtype in dtypes.items():\n self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, self.data[col]))\n self.typlist.append(_dtype_to_stata_type(dtype, self.data[col]))\n\n def _prepare_pandas(self, data: DataFrame) -> None:\n # NOTE: we might need a different API / class for pandas objects so\n # we can set different semantics - handle this with a PR to pandas.io\n\n data = data.copy()\n\n if self._write_index:\n temp = data.reset_index()\n if isinstance(temp, DataFrame):\n data = temp\n\n # Ensure column names are strings\n data = self._check_column_names(data)\n\n # Check columns for compatibility with stata, upcast if necessary\n # Raise if outside the supported range\n data = _cast_to_stata_types(data)\n\n # Replace NaNs with Stata missing values\n data = self._replace_nans(data)\n\n # Convert categoricals to int data, and strip labels\n data = self._prepare_categoricals(data)\n\n self.nobs, self.nvar = data.shape\n self.data = data\n self.varlist = data.columns.tolist()\n\n dtypes = data.dtypes\n\n # Ensure all date columns are converted\n for col in data:\n if col in self._convert_dates:\n continue\n if is_datetime64_dtype(data[col]):\n self._convert_dates[col] = \"tc\"\n\n self._convert_dates = _maybe_convert_to_int_keys(\n self._convert_dates, self.varlist\n )\n for key in self._convert_dates:\n new_type = _convert_datetime_to_stata_type(self._convert_dates[key])\n dtypes[key] = np.dtype(new_type)\n\n # Verify object arrays are strings and encode to bytes\n self._encode_strings()\n\n self._set_formats_and_types(dtypes)\n\n # set the given format for the datetime cols\n if self._convert_dates is not None:\n for key in self._convert_dates:\n if isinstance(key, int):\n self.fmtlist[key] = self._convert_dates[key]\n\n def _encode_strings(self) -> None:\n \"\"\"\n Encode strings in dta-specific encoding\n\n Do not encode columns marked for date conversion or for strL\n conversion. The strL converter independently handles conversion and\n also accepts empty string arrays.\n \"\"\"\n convert_dates = self._convert_dates\n # _convert_strl is not available in dta 114\n convert_strl = getattr(self, \"_convert_strl\", [])\n for i, col in enumerate(self.data):\n # Skip columns marked for date conversion or strl conversion\n if i in convert_dates or col in convert_strl:\n continue\n column = self.data[col]\n dtype = column.dtype\n if dtype.type == np.object_:\n inferred_dtype = infer_dtype(column, skipna=True)\n if not ((inferred_dtype == \"string\") or len(column) == 0):\n col = column.name\n raise ValueError(\n f\"\"\"\\\nColumn `{col}` cannot be exported.\\n\\nOnly string-like object arrays\ncontaining all strings or a mix of strings and None can be exported.\nObject arrays containing only null values are prohibited. Other object\ntypes cannot be exported and must first be converted to one of the\nsupported types.\"\"\"\n )\n encoded = self.data[col].str.encode(self._encoding)\n # If larger than _max_string_length do nothing\n if (\n max_len_string_array(ensure_object(encoded.values))\n <= self._max_string_length\n ):\n self.data[col] = encoded\n\n def write_file(self) -> None:\n self._file, self._own_file = _open_file_binary_write(self._fname)\n try:\n self._write_header(data_label=self._data_label, time_stamp=self._time_stamp)\n self._write_map()\n self._write_variable_types()\n self._write_varnames()\n self._write_sortlist()\n self._write_formats()\n self._write_value_label_names()\n self._write_variable_labels()\n self._write_expansion_fields()\n self._write_characteristics()\n records = self._prepare_data()\n self._write_data(records)\n self._write_strls()\n self._write_value_labels()\n self._write_file_close_tag()\n self._write_map()\n except Exception as exc:\n self._close()\n if self._own_file:\n try:\n if isinstance(self._fname, (str, Path)):\n os.unlink(self._fname)\n except OSError:\n warnings.warn(\n f\"This save was not successful but {self._fname} could not \"\n \"be deleted. This file is not valid.\",\n ResourceWarning,\n )\n raise exc\n else:\n self._close()\n\n def _close(self) -> None:\n \"\"\"\n Close the file if it was created by the writer.\n\n If a buffer or file-like object was passed in, for example a GzipFile,\n then leave this file open for the caller to close. In either case,\n attempt to flush the file contents to ensure they are written to disk\n (if supported)\n \"\"\"\n # Some file-like objects might not support flush\n assert self._file is not None\n try:\n self._file.flush()\n except AttributeError:\n pass\n if self._own_file:\n self._file.close()\n\n def _write_map(self) -> None:\n \"\"\"No-op, future compatibility\"\"\"\n pass\n\n def _write_file_close_tag(self) -> None:\n \"\"\"No-op, future compatibility\"\"\"\n pass\n\n def _write_characteristics(self) -> None:\n \"\"\"No-op, future compatibility\"\"\"\n pass\n\n def _write_strls(self) -> None:\n \"\"\"No-op, future compatibility\"\"\"\n pass\n\n def _write_expansion_fields(self) -> None:\n \"\"\"Write 5 zeros for expansion fields\"\"\"\n self._write(_pad_bytes(\"\", 5))\n\n def _write_value_labels(self) -> None:\n for vl in self._value_labels:\n self._write_bytes(vl.generate_value_label(self._byteorder))\n\n def _write_header(\n self,\n data_label: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n ) -> None:\n byteorder = self._byteorder\n # ds_format - just use 114\n self._write_bytes(struct.pack(\"b\", 114))\n # byteorder\n self._write(byteorder == \">\" and \"\\x01\" or \"\\x02\")\n # filetype\n self._write(\"\\x01\")\n # unused\n self._write(\"\\x00\")\n # number of vars, 2 bytes\n self._write_bytes(struct.pack(byteorder + \"h\", self.nvar)[:2])\n # number of obs, 4 bytes\n self._write_bytes(struct.pack(byteorder + \"i\", self.nobs)[:4])\n # data label 81 bytes, char, null terminated\n if data_label is None:\n self._write_bytes(self._null_terminate_bytes(_pad_bytes(\"\", 80)))\n else:\n self._write_bytes(\n self._null_terminate_bytes(_pad_bytes(data_label[:80], 80))\n )\n # time stamp, 18 bytes, char, null terminated\n # format dd Mon yyyy hh:mm\n if time_stamp is None:\n time_stamp = datetime.datetime.now()\n elif not isinstance(time_stamp, datetime.datetime):\n raise ValueError(\"time_stamp should be datetime type\")\n # GH #13856\n # Avoid locale-specific month conversion\n months = [\n \"Jan\",\n \"Feb\",\n \"Mar\",\n \"Apr\",\n \"May\",\n \"Jun\",\n \"Jul\",\n \"Aug\",\n \"Sep\",\n \"Oct\",\n \"Nov\",\n \"Dec\",\n ]\n month_lookup = {i + 1: month for i, month in enumerate(months)}\n ts = (\n time_stamp.strftime(\"%d \")\n + month_lookup[time_stamp.month]\n + time_stamp.strftime(\" %Y %H:%M\")\n )\n self._write_bytes(self._null_terminate_bytes(ts))\n\n def _write_variable_types(self) -> None:\n for typ in self.typlist:\n self._write_bytes(struct.pack(\"B\", typ))\n\n def _write_varnames(self) -> None:\n # varlist names are checked by _check_column_names\n # varlist, requires null terminated\n for name in self.varlist:\n name = self._null_terminate_str(name)\n name = _pad_bytes(name[:32], 33)\n self._write(name)\n\n def _write_sortlist(self) -> None:\n # srtlist, 2*(nvar+1), int array, encoded by byteorder\n srtlist = _pad_bytes(\"\", 2 * (self.nvar + 1))\n self._write(srtlist)\n\n def _write_formats(self) -> None:\n # fmtlist, 49*nvar, char array\n for fmt in self.fmtlist:\n self._write(_pad_bytes(fmt, 49))\n\n def _write_value_label_names(self) -> None:\n # lbllist, 33*nvar, char array\n for i in range(self.nvar):\n # Use variable name when categorical\n if self._is_col_cat[i]:\n name = self.varlist[i]\n name = self._null_terminate_str(name)\n name = _pad_bytes(name[:32], 33)\n self._write(name)\n else: # Default is empty label\n self._write(_pad_bytes(\"\", 33))\n\n def _write_variable_labels(self) -> None:\n # Missing labels are 80 blank characters plus null termination\n blank = _pad_bytes(\"\", 81)\n\n if self._variable_labels is None:\n for i in range(self.nvar):\n self._write(blank)\n return\n\n for col in self.data:\n if col in self._variable_labels:\n label = self._variable_labels[col]\n if len(label) > 80:\n raise ValueError(\"Variable labels must be 80 characters or fewer\")\n is_latin1 = all(ord(c) < 256 for c in label)\n if not is_latin1:\n raise ValueError(\n \"Variable labels must contain only characters that \"\n \"can be encoded in Latin-1\"\n )\n self._write(_pad_bytes(label, 81))\n else:\n self._write(blank)\n\n def _convert_strls(self, data: DataFrame) -> DataFrame:\n \"\"\"No-op, future compatibility\"\"\"\n return data\n\n def _prepare_data(self) -> np.recarray:\n data = self.data\n typlist = self.typlist\n convert_dates = self._convert_dates\n # 1. Convert dates\n if self._convert_dates is not None:\n for i, col in enumerate(data):\n if i in convert_dates:\n data[col] = _datetime_to_stata_elapsed_vec(\n data[col], self.fmtlist[i]\n )\n # 2. Convert strls\n data = self._convert_strls(data)\n\n # 3. Convert bad string data to '' and pad to correct length\n dtypes = {}\n native_byteorder = self._byteorder == _set_endianness(sys.byteorder)\n for i, col in enumerate(data):\n typ = typlist[i]\n if typ <= self._max_string_length:\n data[col] = data[col].fillna(\"\").apply(_pad_bytes, args=(typ,))\n stype = f\"S{typ}\"\n dtypes[col] = stype\n data[col] = data[col].astype(stype)\n else:\n dtype = data[col].dtype\n if not native_byteorder:\n dtype = dtype.newbyteorder(self._byteorder)\n dtypes[col] = dtype\n\n return data.to_records(index=False, column_dtypes=dtypes)\n\n def _write_data(self, records: np.recarray) -> None:\n self._write_bytes(records.tobytes())\n\n @staticmethod\n def _null_terminate_str(s: str) -> str:\n s += \"\\x00\"\n return s\n\n def _null_terminate_bytes(self, s: str) -> bytes:\n return self._null_terminate_str(s).encode(self._encoding)\n\n\ndef _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int:\n \"\"\"\n Converts dtype types to stata types. Returns the byte of the given ordinal.\n See TYPE_MAP and comments for an explanation. This is also explained in\n the dta spec.\n 1 - 2045 are strings of this length\n Pandas Stata\n 32768 - for object strL\n 65526 - for int8 byte\n 65527 - for int16 int\n 65528 - for int32 long\n 65529 - for float32 float\n 65530 - for double double\n\n If there are dates to convert, then dtype will already have the correct\n type inserted.\n \"\"\"\n # TODO: expand to handle datetime to integer conversion\n if force_strl:\n return 32768\n if dtype.type == np.object_: # try to coerce it to the biggest string\n # not memory efficient, what else could we\n # do?\n itemsize = max_len_string_array(ensure_object(column.values))\n itemsize = max(itemsize, 1)\n if itemsize <= 2045:\n return itemsize\n return 32768\n elif dtype == np.float64:\n return 65526\n elif dtype == np.float32:\n return 65527\n elif dtype == np.int32:\n return 65528\n elif dtype == np.int16:\n return 65529\n elif dtype == np.int8:\n return 65530\n else: # pragma : no cover\n raise NotImplementedError(f\"Data type {dtype} not supported.\")\n\n\ndef _pad_bytes_new(name: Union[str, bytes], length: int) -> bytes:\n \"\"\"\n Takes a bytes instance and pads it with null bytes until it's length chars.\n \"\"\"\n if isinstance(name, str):\n name = bytes(name, \"utf-8\")\n return name + b\"\\x00\" * (length - len(name))\n\n\nclass StataStrLWriter:\n \"\"\"\n Converter for Stata StrLs\n\n Stata StrLs map 8 byte values to strings which are stored using a\n dictionary-like format where strings are keyed to two values.\n\n Parameters\n ----------\n df : DataFrame\n DataFrame to convert\n columns : Sequence[str]\n List of columns names to convert to StrL\n version : int, optional\n dta version. Currently supports 117, 118 and 119\n byteorder : str, optional\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`\n\n Notes\n -----\n Supports creation of the StrL block of a dta file for dta versions\n 117, 118 and 119. These differ in how the GSO is stored. 118 and\n 119 store the GSO lookup value as a uint32 and a uint64, while 117\n uses two uint32s. 118 and 119 also encode all strings as unicode\n which is required by the format. 117 uses 'latin-1' a fixed width\n encoding that extends the 7-bit ascii table with an additional 128\n characters.\n \"\"\"\n\n def __init__(\n self,\n df: DataFrame,\n columns: Sequence[str],\n version: int = 117,\n byteorder: Optional[str] = None,\n ):\n if version not in (117, 118, 119):\n raise ValueError(\"Only dta versions 117, 118 and 119 supported\")\n self._dta_ver = version\n\n self.df = df\n self.columns = columns\n self._gso_table = {\"\": (0, 0)}\n if byteorder is None:\n byteorder = sys.byteorder\n self._byteorder = _set_endianness(byteorder)\n\n gso_v_type = \"I\" # uint32\n gso_o_type = \"Q\" # uint64\n self._encoding = \"utf-8\"\n if version == 117:\n o_size = 4\n gso_o_type = \"I\" # 117 used uint32\n self._encoding = \"latin-1\"\n elif version == 118:\n o_size = 6\n else: # version == 119\n o_size = 5\n self._o_offet = 2 ** (8 * (8 - o_size))\n self._gso_o_type = gso_o_type\n self._gso_v_type = gso_v_type\n\n def _convert_key(self, key: Tuple[int, int]) -> int:\n v, o = key\n return v + self._o_offet * o\n\n def generate_table(self) -> Tuple[Dict[str, Tuple[int, int]], DataFrame]:\n \"\"\"\n Generates the GSO lookup table for the DataFrame\n\n Returns\n -------\n gso_table : dict\n Ordered dictionary using the string found as keys\n and their lookup position (v,o) as values\n gso_df : DataFrame\n DataFrame where strl columns have been converted to\n (v,o) values\n\n Notes\n -----\n Modifies the DataFrame in-place.\n\n The DataFrame returned encodes the (v,o) values as uint64s. The\n encoding depends on the dta version, and can be expressed as\n\n enc = v + o * 2 ** (o_size * 8)\n\n so that v is stored in the lower bits and o is in the upper\n bits. o_size is\n\n * 117: 4\n * 118: 6\n * 119: 5\n \"\"\"\n gso_table = self._gso_table\n gso_df = self.df\n columns = list(gso_df.columns)\n selected = gso_df[self.columns]\n col_index = [(col, columns.index(col)) for col in self.columns]\n keys = np.empty(selected.shape, dtype=np.uint64)\n for o, (idx, row) in enumerate(selected.iterrows()):\n for j, (col, v) in enumerate(col_index):\n val = row[col]\n # Allow columns with mixed str and None (GH 23633)\n val = \"\" if val is None else val\n key = gso_table.get(val, None)\n if key is None:\n # Stata prefers human numbers\n key = (v + 1, o + 1)\n gso_table[val] = key\n keys[o, j] = self._convert_key(key)\n for i, col in enumerate(self.columns):\n gso_df[col] = keys[:, i]\n\n return gso_table, gso_df\n\n def generate_blob(self, gso_table: Dict[str, Tuple[int, int]]) -> bytes:\n \"\"\"\n Generates the binary blob of GSOs that is written to the dta file.\n\n Parameters\n ----------\n gso_table : dict\n Ordered dictionary (str, vo)\n\n Returns\n -------\n gso : bytes\n Binary content of dta file to be placed between strl tags\n\n Notes\n -----\n Output format depends on dta version. 117 uses two uint32s to\n express v and o while 118+ uses a uint32 for v and a uint64 for o.\n \"\"\"\n # Format information\n # Length includes null term\n # 117\n # GSOvvvvooootllllxxxxxxxxxxxxxxx...x\n # 3 u4 u4 u1 u4 string + null term\n #\n # 118, 119\n # GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x\n # 3 u4 u8 u1 u4 string + null term\n\n bio = BytesIO()\n gso = bytes(\"GSO\", \"ascii\")\n gso_type = struct.pack(self._byteorder + \"B\", 130)\n null = struct.pack(self._byteorder + \"B\", 0)\n v_type = self._byteorder + self._gso_v_type\n o_type = self._byteorder + self._gso_o_type\n len_type = self._byteorder + \"I\"\n for strl, vo in gso_table.items():\n if vo == (0, 0):\n continue\n v, o = vo\n\n # GSO\n bio.write(gso)\n\n # vvvv\n bio.write(struct.pack(v_type, v))\n\n # oooo / oooooooo\n bio.write(struct.pack(o_type, o))\n\n # t\n bio.write(gso_type)\n\n # llll\n utf8_string = bytes(strl, \"utf-8\")\n bio.write(struct.pack(len_type, len(utf8_string) + 1))\n\n # xxx...xxx\n bio.write(utf8_string)\n bio.write(null)\n\n bio.seek(0)\n return bio.read()\n\n\nclass StataWriter117(StataWriter):\n \"\"\"\n A class for writing Stata binary dta files in Stata 13 format (117)\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n fname : path (string), buffer or path object\n string, path object (pathlib.Path or py._path.local.LocalPath) or\n object implementing a binary write() functions. If using a buffer\n then the buffer will not be automatically closed after the file\n is written.\n data : DataFrame\n Input to save\n convert_dates : dict\n Dictionary mapping columns containing datetime types to stata internal\n format to use when writing the dates. Options are 'tc', 'td', 'tm',\n 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.\n Datetime columns that do not have a conversion type specified will be\n converted to 'tc'. Raises NotImplementedError if a datetime column has\n timezone information\n write_index : bool\n Write the index to Stata dataset.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`\n time_stamp : datetime\n A datetime to use as file creation date. Default is the current time\n data_label : str\n A label for the data set. Must be 80 characters or smaller.\n variable_labels : dict\n Dictionary containing columns as keys and variable labels as values.\n Each label must be 80 characters or smaller.\n convert_strl : list\n List of columns names to convert to Stata StrL format. Columns with\n more than 2045 characters are automatically written as StrL.\n Smaller columns can be converted by including the column name. Using\n StrLs can reduce output file size when strings are longer than 8\n characters, and either frequently repeated or sparse.\n\n Returns\n -------\n writer : StataWriter117 instance\n The StataWriter117 instance has a write_file method, which will\n write the file to the given `fname`.\n\n Raises\n ------\n NotImplementedError\n * If datetimes contain timezone information\n ValueError\n * Columns listed in convert_dates are neither datetime64[ns]\n or datetime.datetime\n * Column dtype is not representable in Stata\n * Column listed in convert_dates is not in DataFrame\n * Categorical label contains more than 32,000 characters\n\n Examples\n --------\n >>> from pandas.io.stata import StataWriter117\n >>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c'])\n >>> writer = StataWriter117('./data_file.dta', data)\n >>> writer.write_file()\n\n Or with long strings stored in strl format\n\n >>> data = pd.DataFrame([['A relatively long string'], [''], ['']],\n ... columns=['strls'])\n >>> writer = StataWriter117('./data_file_with_long_strings.dta', data,\n ... convert_strl=['strls'])\n >>> writer.write_file()\n \"\"\"\n\n _max_string_length = 2045\n _dta_version = 117\n\n def __init__(\n self,\n fname: FilePathOrBuffer,\n data: DataFrame,\n convert_dates: Optional[Dict[Label, str]] = None,\n write_index: bool = True,\n byteorder: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n data_label: Optional[str] = None,\n variable_labels: Optional[Dict[Label, str]] = None,\n convert_strl: Optional[Sequence[Label]] = None,\n ):\n # Copy to new list since convert_strl might be modified later\n self._convert_strl: List[Label] = []\n if convert_strl is not None:\n self._convert_strl.extend(convert_strl)\n\n super().__init__(\n fname,\n data,\n convert_dates,\n write_index,\n byteorder=byteorder,\n time_stamp=time_stamp,\n data_label=data_label,\n variable_labels=variable_labels,\n )\n self._map: Dict[str, int] = {}\n self._strl_blob = b\"\"\n\n @staticmethod\n def _tag(val: Union[str, bytes], tag: str) -> bytes:\n \"\"\"Surround val with <tag></tag>\"\"\"\n if isinstance(val, str):\n val = bytes(val, \"utf-8\")\n return bytes(\"<\" + tag + \">\", \"utf-8\") + val + bytes(\"</\" + tag + \">\", \"utf-8\")\n\n def _update_map(self, tag: str) -> None:\n \"\"\"Update map location for tag with file position\"\"\"\n assert self._file is not None\n self._map[tag] = self._file.tell()\n\n def _write_header(\n self,\n data_label: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n ) -> None:\n \"\"\"Write the file header\"\"\"\n byteorder = self._byteorder\n self._write_bytes(bytes(\"<stata_dta>\", \"utf-8\"))\n bio = BytesIO()\n # ds_format - 117\n bio.write(self._tag(bytes(str(self._dta_version), \"utf-8\"), \"release\"))\n # byteorder\n bio.write(self._tag(byteorder == \">\" and \"MSF\" or \"LSF\", \"byteorder\"))\n # number of vars, 2 bytes in 117 and 118, 4 byte in 119\n nvar_type = \"H\" if self._dta_version <= 118 else \"I\"\n bio.write(self._tag(struct.pack(byteorder + nvar_type, self.nvar), \"K\"))\n # 117 uses 4 bytes, 118 uses 8\n nobs_size = \"I\" if self._dta_version == 117 else \"Q\"\n bio.write(self._tag(struct.pack(byteorder + nobs_size, self.nobs), \"N\"))\n # data label 81 bytes, char, null terminated\n label = data_label[:80] if data_label is not None else \"\"\n encoded_label = label.encode(self._encoding)\n label_size = \"B\" if self._dta_version == 117 else \"H\"\n label_len = struct.pack(byteorder + label_size, len(encoded_label))\n encoded_label = label_len + encoded_label\n bio.write(self._tag(encoded_label, \"label\"))\n # time stamp, 18 bytes, char, null terminated\n # format dd Mon yyyy hh:mm\n if time_stamp is None:\n time_stamp = datetime.datetime.now()\n elif not isinstance(time_stamp, datetime.datetime):\n raise ValueError(\"time_stamp should be datetime type\")\n # Avoid locale-specific month conversion\n months = [\n \"Jan\",\n \"Feb\",\n \"Mar\",\n \"Apr\",\n \"May\",\n \"Jun\",\n \"Jul\",\n \"Aug\",\n \"Sep\",\n \"Oct\",\n \"Nov\",\n \"Dec\",\n ]\n month_lookup = {i + 1: month for i, month in enumerate(months)}\n ts = (\n time_stamp.strftime(\"%d \")\n + month_lookup[time_stamp.month]\n + time_stamp.strftime(\" %Y %H:%M\")\n )\n # '\\x11' added due to inspection of Stata file\n stata_ts = b\"\\x11\" + bytes(ts, \"utf-8\")\n bio.write(self._tag(stata_ts, \"timestamp\"))\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"header\"))\n\n def _write_map(self) -> None:\n \"\"\"\n Called twice during file write. The first populates the values in\n the map with 0s. The second call writes the final map locations when\n all blocks have been written.\n \"\"\"\n assert self._file is not None\n if not self._map:\n self._map = dict(\n (\n (\"stata_data\", 0),\n (\"map\", self._file.tell()),\n (\"variable_types\", 0),\n (\"varnames\", 0),\n (\"sortlist\", 0),\n (\"formats\", 0),\n (\"value_label_names\", 0),\n (\"variable_labels\", 0),\n (\"characteristics\", 0),\n (\"data\", 0),\n (\"strls\", 0),\n (\"value_labels\", 0),\n (\"stata_data_close\", 0),\n (\"end-of-file\", 0),\n )\n )\n # Move to start of map\n self._file.seek(self._map[\"map\"])\n bio = BytesIO()\n for val in self._map.values():\n bio.write(struct.pack(self._byteorder + \"Q\", val))\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"map\"))\n\n def _write_variable_types(self) -> None:\n self._update_map(\"variable_types\")\n bio = BytesIO()\n for typ in self.typlist:\n bio.write(struct.pack(self._byteorder + \"H\", typ))\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"variable_types\"))\n\n def _write_varnames(self) -> None:\n self._update_map(\"varnames\")\n bio = BytesIO()\n # 118 scales by 4 to accommodate utf-8 data worst case encoding\n vn_len = 32 if self._dta_version == 117 else 128\n for name in self.varlist:\n name = self._null_terminate_str(name)\n name = _pad_bytes_new(name[:32].encode(self._encoding), vn_len + 1)\n bio.write(name)\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"varnames\"))\n\n def _write_sortlist(self) -> None:\n self._update_map(\"sortlist\")\n sort_size = 2 if self._dta_version < 119 else 4\n self._write_bytes(self._tag(b\"\\x00\" * sort_size * (self.nvar + 1), \"sortlist\"))\n\n def _write_formats(self) -> None:\n self._update_map(\"formats\")\n bio = BytesIO()\n fmt_len = 49 if self._dta_version == 117 else 57\n for fmt in self.fmtlist:\n bio.write(_pad_bytes_new(fmt.encode(self._encoding), fmt_len))\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"formats\"))\n\n def _write_value_label_names(self) -> None:\n self._update_map(\"value_label_names\")\n bio = BytesIO()\n # 118 scales by 4 to accommodate utf-8 data worst case encoding\n vl_len = 32 if self._dta_version == 117 else 128\n for i in range(self.nvar):\n # Use variable name when categorical\n name = \"\" # default name\n if self._is_col_cat[i]:\n name = self.varlist[i]\n name = self._null_terminate_str(name)\n encoded_name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1)\n bio.write(encoded_name)\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"value_label_names\"))\n\n def _write_variable_labels(self) -> None:\n # Missing labels are 80 blank characters plus null termination\n self._update_map(\"variable_labels\")\n bio = BytesIO()\n # 118 scales by 4 to accommodate utf-8 data worst case encoding\n vl_len = 80 if self._dta_version == 117 else 320\n blank = _pad_bytes_new(\"\", vl_len + 1)\n\n if self._variable_labels is None:\n for _ in range(self.nvar):\n bio.write(blank)\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"variable_labels\"))\n return\n\n for col in self.data:\n if col in self._variable_labels:\n label = self._variable_labels[col]\n if len(label) > 80:\n raise ValueError(\"Variable labels must be 80 characters or fewer\")\n try:\n encoded = label.encode(self._encoding)\n except UnicodeEncodeError as err:\n raise ValueError(\n \"Variable labels must contain only characters that \"\n f\"can be encoded in {self._encoding}\"\n ) from err\n\n bio.write(_pad_bytes_new(encoded, vl_len + 1))\n else:\n bio.write(blank)\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"variable_labels\"))\n\n def _write_characteristics(self) -> None:\n self._update_map(\"characteristics\")\n self._write_bytes(self._tag(b\"\", \"characteristics\"))\n\n def _write_data(self, records) -> None:\n self._update_map(\"data\")\n self._write_bytes(b\"<data>\")\n self._write_bytes(records.tobytes())\n self._write_bytes(b\"</data>\")\n\n def _write_strls(self) -> None:\n self._update_map(\"strls\")\n self._write_bytes(self._tag(self._strl_blob, \"strls\"))\n\n def _write_expansion_fields(self) -> None:\n \"\"\"No-op in dta 117+\"\"\"\n pass\n\n def _write_value_labels(self) -> None:\n self._update_map(\"value_labels\")\n bio = BytesIO()\n for vl in self._value_labels:\n lab = vl.generate_value_label(self._byteorder)\n lab = self._tag(lab, \"lbl\")\n bio.write(lab)\n bio.seek(0)\n self._write_bytes(self._tag(bio.read(), \"value_labels\"))\n\n def _write_file_close_tag(self) -> None:\n self._update_map(\"stata_data_close\")\n self._write_bytes(bytes(\"</stata_dta>\", \"utf-8\"))\n self._update_map(\"end-of-file\")\n\n def _update_strl_names(self) -> None:\n \"\"\"\n Update column names for conversion to strl if they might have been\n changed to comply with Stata naming rules\n \"\"\"\n # Update convert_strl if names changed\n for orig, new in self._converted_names.items():\n if orig in self._convert_strl:\n idx = self._convert_strl.index(orig)\n self._convert_strl[idx] = new\n\n def _convert_strls(self, data: DataFrame) -> DataFrame:\n \"\"\"\n Convert columns to StrLs if either very large or in the\n convert_strl variable\n \"\"\"\n convert_cols = [\n col\n for i, col in enumerate(data)\n if self.typlist[i] == 32768 or col in self._convert_strl\n ]\n\n if convert_cols:\n ssw = StataStrLWriter(data, convert_cols, version=self._dta_version)\n tab, new_data = ssw.generate_table()\n data = new_data\n self._strl_blob = ssw.generate_blob(tab)\n return data\n\n def _set_formats_and_types(self, dtypes: Series) -> None:\n self.typlist = []\n self.fmtlist = []\n for col, dtype in dtypes.items():\n force_strl = col in self._convert_strl\n fmt = _dtype_to_default_stata_fmt(\n dtype,\n self.data[col],\n dta_version=self._dta_version,\n force_strl=force_strl,\n )\n self.fmtlist.append(fmt)\n self.typlist.append(\n _dtype_to_stata_type_117(dtype, self.data[col], force_strl)\n )\n\n\nclass StataWriterUTF8(StataWriter117):\n \"\"\"\n Stata binary dta file writing in Stata 15 (118) and 16 (119) formats\n\n DTA 118 and 119 format files support unicode string data (both fixed\n and strL) format. Unicode is also supported in value labels, variable\n labels and the dataset label. Format 119 is automatically used if the\n file contains more than 32,767 variables.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n fname : path (string), buffer or path object\n string, path object (pathlib.Path or py._path.local.LocalPath) or\n object implementing a binary write() functions. If using a buffer\n then the buffer will not be automatically closed after the file\n is written.\n data : DataFrame\n Input to save\n convert_dates : dict, default None\n Dictionary mapping columns containing datetime types to stata internal\n format to use when writing the dates. Options are 'tc', 'td', 'tm',\n 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.\n Datetime columns that do not have a conversion type specified will be\n converted to 'tc'. Raises NotImplementedError if a datetime column has\n timezone information\n write_index : bool, default True\n Write the index to Stata dataset.\n byteorder : str, default None\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`\n time_stamp : datetime, default None\n A datetime to use as file creation date. Default is the current time\n data_label : str, default None\n A label for the data set. Must be 80 characters or smaller.\n variable_labels : dict, default None\n Dictionary containing columns as keys and variable labels as values.\n Each label must be 80 characters or smaller.\n convert_strl : list, default None\n List of columns names to convert to Stata StrL format. Columns with\n more than 2045 characters are automatically written as StrL.\n Smaller columns can be converted by including the column name. Using\n StrLs can reduce output file size when strings are longer than 8\n characters, and either frequently repeated or sparse.\n version : int, default None\n The dta version to use. By default, uses the size of data to determine\n the version. 118 is used if data.shape[1] <= 32767, and 119 is used\n for storing larger DataFrames.\n\n Returns\n -------\n StataWriterUTF8\n The instance has a write_file method, which will write the file to the\n given `fname`.\n\n Raises\n ------\n NotImplementedError\n * If datetimes contain timezone information\n ValueError\n * Columns listed in convert_dates are neither datetime64[ns]\n or datetime.datetime\n * Column dtype is not representable in Stata\n * Column listed in convert_dates is not in DataFrame\n * Categorical label contains more than 32,000 characters\n\n Examples\n --------\n Using Unicode data and column names\n\n >>> from pandas.io.stata import StataWriterUTF8\n >>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ'])\n >>> writer = StataWriterUTF8('./data_file.dta', data)\n >>> writer.write_file()\n\n Or with long strings stored in strl format\n\n >>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']],\n ... columns=['strls'])\n >>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data,\n ... convert_strl=['strls'])\n >>> writer.write_file()\n \"\"\"\n\n _encoding = \"utf-8\"\n\n def __init__(\n self,\n fname: FilePathOrBuffer,\n data: DataFrame,\n convert_dates: Optional[Dict[Label, str]] = None,\n write_index: bool = True,\n byteorder: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n data_label: Optional[str] = None,\n variable_labels: Optional[Dict[Label, str]] = None,\n convert_strl: Optional[Sequence[Label]] = None,\n version: Optional[int] = None,\n ):\n if version is None:\n version = 118 if data.shape[1] <= 32767 else 119\n elif version not in (118, 119):\n raise ValueError(\"version must be either 118 or 119.\")\n elif version == 118 and data.shape[1] > 32767:\n raise ValueError(\n \"You must use version 119 for data sets containing more than\"\n \"32,767 variables\"\n )\n\n super().__init__(\n fname,\n data,\n convert_dates=convert_dates,\n write_index=write_index,\n byteorder=byteorder,\n time_stamp=time_stamp,\n data_label=data_label,\n variable_labels=variable_labels,\n convert_strl=convert_strl,\n )\n # Override version set in StataWriter117 init\n self._dta_version = version\n\n def _validate_variable_name(self, name: str) -> str:\n \"\"\"\n Validate variable names for Stata export.\n\n Parameters\n ----------\n name : str\n Variable name\n\n Returns\n -------\n str\n The validated name with invalid characters replaced with\n underscores.\n\n Notes\n -----\n Stata 118+ support most unicode characters. The only limitation is in\n the ascii range where the characters supported are a-z, A-Z, 0-9 and _.\n \"\"\"\n # High code points appear to be acceptable\n for c in name:\n if (\n ord(c) < 128\n and (c < \"A\" or c > \"Z\")\n and (c < \"a\" or c > \"z\")\n and (c < \"0\" or c > \"9\")\n and c != \"_\"\n ) or 128 <= ord(c) < 256:\n name = name.replace(c, \"_\")\n\n return name\n"
] | [
[
"pandas.Series",
"numpy.asarray",
"pandas._libs.lib.tuples_to_object_array",
"numpy.cumsum",
"numpy.dtype",
"pandas.core.indexes.base.Index",
"pandas.core.indexes.frozen.FrozenList",
"numpy.concatenate",
"pandas._config.get_option",
"numpy.all",
"numpy.any",
"pandas.core.sorting.get_group_index",
"pandas.core.missing.clean_reindex_fill_method",
"pandas.core.indexes.base._ensure_has_len",
"numpy.where",
"pandas.core.indexes.numeric.Int64Index",
"pandas.core.arrays.categorical.factorize_from_iterables",
"numpy.arange",
"pandas._libs.lib.fast_unique_multiple",
"pandas.core.algorithms.unique",
"numpy.lexsort",
"numpy.bitwise_or.reduce",
"pandas.core.algorithms.factorize",
"pandas.core.common.cast_scalar_indexer",
"pandas._libs.lib.fast_zip",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_iterator",
"pandas._libs.lib.infer_dtype",
"pandas._libs.algos.is_lexsorted",
"pandas.core.dtypes.cast.coerce_indexer_dtype",
"pandas.core.sorting.lexsort_indexer",
"numpy.zeros",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.io.formats.format._get_adjustment",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.indexes.base.ensure_index",
"pandas.core.dtypes.common.is_hashable",
"pandas.core.common.is_true_slices",
"numpy.delete",
"pandas.core.sorting.indexer_from_factorized",
"pandas._libs.hashtable.duplicated_int64",
"pandas.io.formats.printing.format_object_attrs",
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.append",
"numpy.array",
"pandas.errors.UnsortedIndexError",
"pandas.core.algorithms.isin",
"pandas.core.common.is_bool_indexer",
"pandas.core.algorithms.take_1d",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.missing.array_equivalent",
"pandas._libs.lib.to_object_array_tuples",
"pandas.core.dtypes.common.is_integer",
"numpy.empty",
"pandas.core.common.is_null_slice",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.dtypes.common.is_object_dtype",
"numpy.bincount",
"pandas.core.dtypes.missing.isna",
"pandas.core.reshape.util.cartesian_product",
"pandas.io.formats.printing.format_object_summary",
"pandas.core.common.index_labels_to_array",
"pandas.core.indexes.base.InvalidIndexError"
],
[
"pandas.to_datetime",
"numpy.dtype",
"pandas.core.indexes.base.Index",
"numpy.iinfo",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.dtypes.common.ensure_object",
"pandas.isna",
"pandas.core.frame.DataFrame",
"numpy.where",
"numpy.ones_like",
"pandas.core.series.Series",
"numpy.unique",
"numpy.arange",
"pandas.DatetimeIndex",
"numpy.frombuffer",
"pandas.core.frame.DataFrame.from_records",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.util._decorators.Appender",
"pandas.io.common.stringify_path",
"numpy.nonzero",
"numpy.isnan",
"pandas.Categorical",
"numpy.logical_or",
"numpy.argsort",
"numpy.array",
"numpy.vectorize",
"pandas.io.common.get_filepath_or_buffer",
"pandas.to_timedelta",
"pandas._libs.lib.infer_dtype",
"numpy.isinf",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ihmeuw-msca/SLIME | [
"255dfc6fc1880545f1ca9a5062eff823571cc025"
] | [
"src/slime/core/utils.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n utils\n ~~~~~\n\"\"\"\nimport numpy as np\n\n\ndef sizes_to_indices(sizes):\n \"\"\"Converting sizes to corresponding indices.\n Args:\n sizes (numpy.dnarray):\n An array consist of non-negative number.\n Returns:\n list{range}:\n List the indices.\n \"\"\"\n u_id = np.cumsum(sizes)\n l_id = np.insert(u_id[:-1], 0, 0)\n\n return [\n np.arange(l, u) for l, u in zip(l_id, u_id)\n ]\n"
] | [
[
"numpy.arange",
"numpy.insert",
"numpy.cumsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KANG91/Deep_Learning | [
"e3e9de769ab835215d0ebeee79ff869afbe64ebf",
"e3e9de769ab835215d0ebeee79ff869afbe64ebf",
"e3e9de769ab835215d0ebeee79ff869afbe64ebf"
] | [
"lab-12-2-char-seq-rnn.py",
"lab-05-1-logistic_regression.py",
"lab-13-3-mnist_save_restore.py"
] | [
"import tensorflow as tf\nimport numpy as np\ntf.set_random_seed(777) # reproducibility\n\nsample = \" if you want you\"\nidx2char = list(set(sample)) # index -> char\nchar2idx = {c: i for i, c in enumerate(idx2char)} # char -> idex\n\n# hyper parameters\ndic_size = len(char2idx) # RNN input size (one hot size)\nrnn_hidden_size = len(char2idx) # RNN output size\nnum_classes = len(char2idx) # final output size (RNN or softmax, etc.)\nbatch_size = 1 # one sample data, one batch\nsequence_length = len(sample) - 1 # number of lstm rollings (unit #)\n\nsample_idx = [char2idx[c] for c in sample] # char to index\nx_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell\ny_data = [sample_idx[1:]] # Y label sample (1 ~ n) hello: ello\n\nX = tf.placeholder(tf.int32, [None, sequence_length]) # X data\nY = tf.placeholder(tf.int32, [None, sequence_length]) # Y label\n\nx_one_hot = tf.one_hot(X, num_classes) # one hot: 1 -> 0 1 0 0 0 0 0 0 0 0\ncell = tf.contrib.rnn.BasicLSTMCell(\n num_units=rnn_hidden_size, state_is_tuple=True)\ninitial_state = cell.zero_state(batch_size, tf.float32)\noutputs, _states = tf.nn.dynamic_rnn(\n cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)\n\nweights = tf.ones([batch_size, sequence_length])\nsequence_loss = tf.contrib.seq2seq.sequence_loss(\n logits=outputs, targets=Y, weights=weights)\nloss = tf.reduce_mean(sequence_loss)\ntrain = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)\n\nprediction = tf.argmax(outputs, axis=2)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(3000):\n l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})\n result = sess.run(prediction, feed_dict={X: x_data})\n\n # print char using dic\n result_str = [idx2char[c] for c in np.squeeze(result)]\n\n print(i, \"loss:\", l, \"Prediction:\", ''.join(result_str))\n\n\n'''\n0 loss: 2.29895 Prediction: nnuffuunnuuuyuy\n1 loss: 2.29675 Prediction: nnuffuunnuuuyuy\n2 loss: 2.29459 Prediction: nnuffuunnuuuyuy\n3 loss: 2.29247 Prediction: nnuffuunnuuuyuy\n\n...\n\n1413 loss: 1.3745 Prediction: if you want you\n1414 loss: 1.3743 Prediction: if you want you\n1415 loss: 1.3741 Prediction: if you want you\n1416 loss: 1.3739 Prediction: if you want you\n1417 loss: 1.3737 Prediction: if you want you\n1418 loss: 1.37351 Prediction: if you want you\n1419 loss: 1.37331 Prediction: if you want you\n'''\n",
"# Lab 5 Logistic Regression Classifier\nimport tensorflow as tf\ntf.set_random_seed(777) # for reproducibility\n\nx_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]\ny_data = [[0], [0], [0], [1], [1], [1]]\n\n# placeholders for a tensor that will be always fed.\nX = tf.placeholder(tf.float32, shape=[None, 2])\nY = tf.placeholder(tf.float32, shape=[None, 1])\n\nW = tf.Variable(tf.random_normal([2, 1]), name='weight')\nb = tf.Variable(tf.random_normal([1]), name='bias')\n\n# Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W)))\nhypothesis = tf.sigmoid(tf.matmul(X, W) + b)\n\n# Cost function\ncost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *\n tf.log(1 - hypothesis))\n\ntrain = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)\n\n# Accuracy computation\n# True if hypothesis>0.5 else False\npredicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))\n\n# Launch graph\nwith tf.Session() as sess:\n # Initialize TensorFlow variables\n sess.run(tf.global_variables_initializer())\n feed = {X: x_data, Y: y_data}\n\n for step in range(10001):\n sess.run(train, feed_dict=feed)\n if step % 200 == 0:\n print(step, sess.run(cost, feed_dict=feed), sess.run(W))\n\n # Accuracy report\n h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict=feed)\n print(\"\\nHypothesis: \", h, \"\\nCorrect (Y): \", c, \"\\nAccuracy: \", a)\n\n'''\nHypothesis: [[ 0.03074029]\n [ 0.15884677]\n [ 0.30486736]\n [ 0.78138196]\n [ 0.93957496]\n [ 0.98016882]]\nCorrect (Y): [[ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 1.]\n [ 1.]]\nAccuracy: 1.0\n'''\n",
"# Lab 7 Learning rate and Evaluation\nimport tensorflow as tf\nimport random\nimport matplotlib.pyplot as plt\nimport os\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ntf.set_random_seed(777) # reproducibility\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset\n\n# parameters\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\n\nCHECK_POINT_DIR = TB_SUMMARY_DIR = './tb/mnist2'\n\n\n# input place holders\nX = tf.placeholder(tf.float32, [None, 784])\nY = tf.placeholder(tf.float32, [None, 10])\n\n# Image input\nx_image = tf.reshape(X, [-1, 28, 28, 1])\ntf.summary.image('input', x_image, 3)\n\n# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1 for testing\nkeep_prob = tf.placeholder(tf.float32)\n\n# weights & bias for nn layers\n# http://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow\nwith tf.variable_scope('layer1') as scope:\n W1 = tf.get_variable(\"W\", shape=[784, 512],\n initializer=tf.contrib.layers.xavier_initializer())\n b1 = tf.Variable(tf.random_normal([512]))\n L1 = tf.nn.relu(tf.matmul(X, W1) + b1)\n L1 = tf.nn.dropout(L1, keep_prob=keep_prob)\n\n tf.summary.histogram(\"X\", X)\n tf.summary.histogram(\"weights\", W1)\n tf.summary.histogram(\"bias\", b1)\n tf.summary.histogram(\"layer\", L1)\n\nwith tf.variable_scope('layer2') as scope:\n W2 = tf.get_variable(\"W\", shape=[512, 512],\n initializer=tf.contrib.layers.xavier_initializer())\n b2 = tf.Variable(tf.random_normal([512]))\n L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)\n L2 = tf.nn.dropout(L2, keep_prob=keep_prob)\n\n tf.summary.histogram(\"weights\", W2)\n tf.summary.histogram(\"bias\", b2)\n tf.summary.histogram(\"layer\", L2)\n\nwith tf.variable_scope('layer3') as scope:\n W3 = tf.get_variable(\"W\", shape=[512, 512],\n initializer=tf.contrib.layers.xavier_initializer())\n b3 = tf.Variable(tf.random_normal([512]))\n L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)\n L3 = tf.nn.dropout(L3, keep_prob=keep_prob)\n\n tf.summary.histogram(\"weights\", W3)\n tf.summary.histogram(\"bias\", b3)\n tf.summary.histogram(\"layer\", L3)\n\nwith tf.variable_scope('layer4') as scope:\n W4 = tf.get_variable(\"W\", shape=[512, 512],\n initializer=tf.contrib.layers.xavier_initializer())\n b4 = tf.Variable(tf.random_normal([512]))\n L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)\n L4 = tf.nn.dropout(L4, keep_prob=keep_prob)\n\n tf.summary.histogram(\"weights\", W4)\n tf.summary.histogram(\"bias\", b4)\n tf.summary.histogram(\"layer\", L4)\n\nwith tf.variable_scope('layer5') as scope:\n W5 = tf.get_variable(\"W\", shape=[512, 10],\n initializer=tf.contrib.layers.xavier_initializer())\n b5 = tf.Variable(tf.random_normal([10]))\n hypothesis = tf.matmul(L4, W5) + b5\n\n tf.summary.histogram(\"weights\", W5)\n tf.summary.histogram(\"bias\", b5)\n tf.summary.histogram(\"hypothesis\", hypothesis)\n\n\n# define cost & optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=hypothesis, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\ntf.summary.scalar(\"loss\", cost)\n\nlast_epoch = tf.Variable(0, name='last_epoch')\n\n# Summary\nsummary = tf.summary.merge_all()\n\n# initialize\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# Create summary writer\nwriter = tf.summary.FileWriter(TB_SUMMARY_DIR)\nwriter.add_graph(sess.graph)\nglobal_step = 0\n\n# Savor and Restore\nsaver = tf.train.Saver()\ncheckpoint = tf.train.get_checkpoint_state(CHECK_POINT_DIR)\n\nif checkpoint and checkpoint.model_checkpoint_path:\n try:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n except:\n print(\"Error on loading old network weights\")\nelse:\n print(\"Could not find old network weights\")\n\nstart_from = sess.run(last_epoch)\n\n# train my model\nprint('Start learning from:', start_from)\n\nfor epoch in range(start_from, training_epochs):\n print('Start Epoch:', epoch)\n\n avg_cost = 0\n total_batch = int(mnist.train.num_examples / batch_size)\n\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7}\n s, _ = sess.run([summary, optimizer], feed_dict=feed_dict)\n writer.add_summary(s, global_step=global_step)\n global_step += 1\n\n avg_cost += sess.run(cost, feed_dict=feed_dict) / total_batch\n\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))\n\n print(\"Saving network...\")\n sess.run(last_epoch.assign(epoch + 1))\n if not os.path.exists(CHECK_POINT_DIR):\n os.makedirs(CHECK_POINT_DIR)\n saver.save(sess, CHECK_POINT_DIR + \"/model\", global_step=i)\n\nprint('Learning Finished!')\n\n# Test model and check accuracy\ncorrect_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint('Accuracy:', sess.run(accuracy, feed_dict={\n X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}))\n\n# Get one and predict\nr = random.randint(0, mnist.test.num_examples - 1)\nprint(\"Label: \", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))\nprint(\"Prediction: \", sess.run(\n tf.argmax(hypothesis, 1), {X: mnist.test.images[r:r + 1], keep_prob: 1}))\n\n# plt.imshow(mnist.test.images[r:r + 1].\n# reshape(28, 28), cmap='Greys', interpolation='nearest')\n# plt.show()\n\n'''\n\n...\n\nSuccessfully loaded: ./tb/mnist/model-549\nStart learning from: 2\nEpoch: 2\n\n...\ntensorboard --logdir tb/\nStarting TensorBoard b'41' on port 6006\n(You can navigate to http://10.0.1.4:6006)\n\n'''\n"
] | [
[
"tensorflow.nn.dynamic_rnn",
"tensorflow.reduce_mean",
"tensorflow.contrib.seq2seq.sequence_loss",
"tensorflow.contrib.rnn.BasicLSTMCell",
"numpy.squeeze",
"tensorflow.placeholder",
"tensorflow.ones",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.one_hot",
"tensorflow.Session",
"tensorflow.set_random_seed",
"tensorflow.argmax"
],
[
"tensorflow.matmul",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.set_random_seed",
"tensorflow.random_normal"
],
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.cast",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.summary.image",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.set_random_seed",
"tensorflow.summary.histogram",
"tensorflow.train.get_checkpoint_state",
"tensorflow.summary.FileWriter",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
zhulingchen/deep-reinforcement-learning | [
"193486659e17861208fa0a8703487e7be5868ff9",
"193486659e17861208fa0a8703487e7be5868ff9"
] | [
"p2_continuous-control/agent_ddpg.py",
"p1_navigation/agent_dqn_tf.py"
] | [
"import numpy as np\nimport random\nimport copy\nfrom collections import namedtuple, deque\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom model_ddpg import Actor, Critic\nfrom replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n\nBUFFER_SIZE = int(1e6) # replay buffer size\nSTART_SIZE = 1024 # when to start training\nBATCH_SIZE = 512 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR_ACTOR = 1e-3 # learning rate of the actor\nLR_CRITIC = 1e-3 # learning rate of the critic\nWEIGHT_DECAY = 0 # L2 weight decay\nTRAIN_EVERY = 5 # how often to train a batch\nTRAIN_STEPS = 3 # how many training steps when a batch is trained\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Agent():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n \n def __init__(self, num_agents, state_size, action_size, random_seed, use_per=False):\n \"\"\"Initialize an Agent object.\n \n Params\n ======\n num_agents (int): number of agents\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n random_seed (int): random seed\n use_per (bool): whether to use prioritized replay buffer\n \"\"\"\n self.num_agents = num_agents\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n self.use_per = use_per\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size, random_seed).to(device)\n self.actor_target = Actor(state_size, action_size, random_seed).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(state_size, action_size, random_seed).to(device)\n self.critic_target = Critic(state_size, action_size, random_seed).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)\n\n # Noise process\n self.noise = OUNoise(action_size, random_seed)\n\n # Replay memory\n if use_per:\n self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, BATCH_SIZE)\n else:\n self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed)\n\n # Initialize time step\n self.t_step = 0\n\n def get_critic_Q(self, states, actions, rewards, next_states, dones, gamma, is_train=True):\n # Get max predicted Q values (for next states) from target model\n if is_train:\n actions_next = self.actor_target(next_states)\n Q_targets_next = self.critic_target(next_states, actions_next)\n # Compute Q targets for current states (y_i)\n Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)\n Q_expected = self.critic_local(states, actions)\n else:\n self.actor_local.eval()\n self.actor_target.eval()\n self.critic_local.eval()\n self.critic_target.eval()\n with torch.no_grad():\n actions_next = self.actor_target(next_states)\n Q_targets_next = self.critic_target(next_states, actions_next)\n # Compute Q targets for current states (y_i)\n Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)\n Q_expected = self.critic_local(states, actions)\n self.actor_local.train()\n self.actor_target.train()\n self.critic_local.train()\n self.critic_target.train()\n return Q_expected, Q_targets\n\n def step(self, states, actions, rewards, next_states, dones):\n \"\"\"Save experience in replay memory, and use random sample from buffer to learn.\"\"\"\n # Save experience / reward\n if self.use_per:\n # Convert numpy array to torch tensor\n states = torch.from_numpy(states).float().to(device)\n actions = torch.from_numpy(actions).float().to(device)\n rewards = torch.from_numpy(np.array(rewards)).float().unsqueeze(1).to(device)\n next_states = torch.from_numpy(next_states).float().to(device)\n dones = torch.from_numpy(np.array(dones).astype(np.uint8)).float().unsqueeze(1).to(device)\n # Get max predicted Q values (for next states) from target model\n Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, GAMMA, is_train=False)\n # Convert torch tensor to numpy array\n states = states.cpu().data.numpy()\n actions = actions.cpu().data.numpy()\n rewards = rewards.cpu().data.numpy().squeeze(1).tolist()\n next_states = next_states.cpu().data.numpy()\n dones = dones.cpu().data.numpy().squeeze(1).astype(np.bool).tolist()\n # Calculate error\n errors = Q_expected - Q_targets\n errors = errors.cpu().data.numpy().squeeze(1)\n for i in range(self.num_agents):\n self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i], errors[i])\n else:\n for i in range(self.num_agents):\n self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i])\n\n # Update time step\n self.t_step += 1\n\n # If enough samples are available in memory,\n if len(self.memory) >= START_SIZE:\n # Get random subset and learn every TRAIN_EVERY time steps,\n if self.t_step % TRAIN_EVERY == 0:\n for _ in range(TRAIN_STEPS):\n if self.use_per:\n experiences, idx_tree, is_weight = self.memory.sample()\n self.learn(experiences, GAMMA, idx_tree, is_weight)\n else:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)\n\n def act(self, states, add_noise=True):\n \"\"\"Returns epsilon-greedy actions for given state as per current policy.\"\"\"\n states = torch.from_numpy(states).float().to(device)\n self.actor_local.eval()\n with torch.no_grad():\n actions = self.actor_local(states).cpu().data.numpy()\n self.actor_local.train()\n if add_noise:\n actions += np.concatenate([np.expand_dims(self.noise.sample(), axis=0) for _ in range(self.num_agents)], axis=0)\n return np.clip(actions, -1, 1)\n\n def reset(self):\n self.noise.reset()\n\n def learn(self, experiences, gamma, idx_tree=None, is_weight=None):\n \"\"\"Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n # ---------------------------- update critic ---------------------------- #\n # Get predicted next-state actions and Q values from target models\n Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, gamma, is_train=True)\n # Compute critic loss\n if self.use_per:\n assert ((is_weight is not None) and (is_weight.size > 0))\n is_weight = torch.from_numpy(is_weight).float().to(device)\n critic_loss = (is_weight * F.smooth_l1_loss(Q_expected, Q_targets, reduction='none').squeeze()).mean()\n else:\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # use gradient norm clipping\n self.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n # Compute actor loss\n actions_pred = self.actor_local(states)\n actor_loss = -self.critic_local(states, actions_pred).mean()\n # Minimize the loss\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # ----------------------- update target networks ----------------------- #\n self.soft_update(self.critic_local, self.critic_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)\n\n # update priority\n if self.use_per:\n assert((idx_tree is not None) and (len(idx_tree) > 0))\n errors = Q_expected - Q_targets\n errors = errors.cpu().data.numpy().squeeze()\n for i in range(self.memory.batch_size):\n self.memory.update(idx_tree[i], errors[i])\n \n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n \nclass OUNoise:\n \"\"\"Ornstein-Uhlenbeck process.\"\"\"\n\n def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):\n \"\"\"Initialize parameters and noise process.\"\"\"\n self.mu = mu * np.ones(size)\n self.theta = theta\n self.sigma = sigma\n self.seed = random.seed(seed)\n self.reset()\n\n def reset(self):\n \"\"\"Reset the internal state (= noise) to mean (mu).\"\"\"\n self.state = copy.copy(self.mu)\n\n def sample(self):\n \"\"\"Update internal state and return it as a noise sample.\"\"\"\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state",
"import numpy as np\nimport random\nimport tensorflow as tf\n# tf.compat.v1.disable_eager_execution() # disable eager execution (enabled by TF2 by default)\n\nfrom model_dqn_tf import QNetwork\nfrom replay_buffer_tf import ReplayBuffer, PrioritizedReplayBuffer\n\n# constant values\nBUFFER_SIZE = int(1e5) # replay buffer size\nSTART_SIZE = int(1e3) # when to start training\nBATCH_SIZE = 128 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR = 5e-4 # learning rate\nTRAIN_EVERY = 4 # how often to train a batch\nTRAIN_STEPS = 2 # how many training steps when a batch is trained\n\n\n\nclass Agent():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n\n def __init__(self, action_size, seed, use_double=False, use_dueling=False, use_per=False):\n \"\"\"Initialize an Agent object.\n\n Params\n ======\n action_size (int): dimension of each action\n seed (int): random seed\n use_double (bool): whether to use double deep Q-learning\n use_dueling (bool): whether to use the dueling network architecture\n use_per (bool): whether to use prioritized replay buffer\n \"\"\"\n self.action_size = action_size\n self.seed = random.seed(seed)\n self.use_double = use_double\n self.use_per = use_per\n\n # Q-Network\n self.qnetwork_local = QNetwork(action_size, seed, use_dueling=use_dueling)\n self.qnetwork_target = QNetwork(action_size, seed, use_dueling=use_dueling)\n self.optimizer = tf.keras.optimizers.Adam(lr=LR)\n\n # Replay memory\n if use_per:\n self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, BATCH_SIZE)\n else:\n self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, seed)\n\n # Initialize time step\n self.t_step = 0\n\n def get_Q(self, state, action, reward, next_state, done, gamma, is_train=True):\n # Get max predicted Q values (for next states) from target model\n if is_train:\n if self.use_double:\n idx = tf.argmax(self.qnetwork_local(next_state), axis=-1, output_type=tf.int32)\n Q_target_next = tf.expand_dims(tf.gather_nd(self.qnetwork_target(next_state), tf.stack([tf.range(len(idx)), idx], axis=-1)), axis=-1)\n else:\n Q_target_next = tf.expand_dims(tf.reduce_max(self.qnetwork_target(next_state), axis=-1), axis=-1)\n # Compute Q targets for current states\n Q_target = reward + (gamma * (1 - done) * Q_target_next)\n # Get expected Q values from local model\n Q_expected = tf.expand_dims(tf.gather_nd(self.qnetwork_local(state), tf.stack([tf.range(len(action)), action[:, 0]], axis=-1)), axis=-1)\n else:\n if self.use_double:\n idx = np.argmax(self.qnetwork_local.predict(next_state), axis=-1).astype('int')\n Q_target_next = np.take_along_axis(self.qnetwork_target.predict(next_state), idx[:, np.newaxis], axis=-1)\n else:\n Q_target_next = self.qnetwork_target.predict(next_state).max(-1)[:, np.newaxis]\n # Compute Q targets for current states\n Q_target = reward + (gamma * (1 - done) * Q_target_next)\n # Get expected Q values from local model\n Q_expected = np.take_along_axis(self.qnetwork_local.predict(state), action, axis=-1)\n return Q_expected, Q_target\n\n def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n if self.use_per:\n state = state[np.newaxis]\n action = np.array([action])[np.newaxis]\n next_state = next_state[np.newaxis]\n done = int(done)\n # Get max predicted Q values (for next states) from target model\n Q_expected, Q_target = self.get_Q(state, action, reward, next_state, done, GAMMA, is_train=False)\n state = state.squeeze()\n action = action.item()\n next_state = next_state.squeeze()\n done = bool(done)\n # Calculate error\n error = Q_expected - Q_target\n error = error.item()\n self.memory.add(state, action, reward, next_state, done, error)\n else:\n self.memory.add(state, action, reward, next_state, done)\n\n # Update time step\n self.t_step += 1\n\n # If enough samples are available in memory,\n if len(self.memory) >= START_SIZE:\n # Get random subset and learn every TRAIN_EVERY time steps,\n if self.t_step % TRAIN_EVERY == 0:\n for _ in range(TRAIN_STEPS):\n if self.use_per:\n experiences, idx_tree, is_weight = self.memory.sample()\n self.learn(experiences, GAMMA, idx_tree, is_weight)\n else:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)\n\n def act(self, state, eps=0.):\n \"\"\"Returns actions for given state as per current policy.\n\n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n \"\"\"\n action_values = self.qnetwork_local.predict(np.atleast_2d(state))\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n action = np.argmax(action_values.squeeze())\n else:\n action = random.choice(np.arange(self.action_size))\n return action.item()\n\n def learn(self, experiences, gamma, idx_tree=None, is_weight=None):\n \"\"\"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n idx_tree\n experiences (Tuple[numpy.ndarray]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n # Compute loss\n with tf.GradientTape() as tape:\n # Get max predicted Q values (for next states) from target model\n Q_expected, Q_targets = self.get_Q(states, actions, rewards, next_states, dones, gamma, is_train=True)\n if self.use_per:\n assert (is_weight is not None) and (is_weight.size > 0)\n huber_loss = tf.losses.Huber(reduction=tf.losses.Reduction.NONE)\n loss = tf.reduce_mean(is_weight * tf.squeeze(huber_loss(Q_expected, Q_targets)))\n else:\n loss = tf.reduce_mean(tf.square(Q_targets - Q_expected))\n\n # One-step gradient descent for training network weights\n variables = self.qnetwork_local.trainable_variables\n gradients = tape.gradient(loss, variables)\n self.optimizer.apply_gradients(zip(gradients, variables))\n\n # Update target network\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)\n\n # update priority\n if self.use_per:\n assert (idx_tree is not None) and (len(idx_tree) > 0)\n errors = Q_expected - Q_targets\n if tf.executing_eagerly():\n errors = errors.numpy().squeeze()\n else:\n errors = tf.keras.backend.eval(errors).squeeze()\n for i in range(self.memory.batch_size):\n self.memory.update(idx_tree[i], errors[i])\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"\n Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (TensorFlow model): weights will be copied from\n target_model (TensorFlow model): weights will be copied to\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(target_model.trainable_variables, local_model.trainable_variables):\n target_param.assign(tau * local_param + (1.0 - tau) * target_param)"
] | [
[
"numpy.clip",
"torch.from_numpy",
"numpy.ones",
"torch.nn.functional.mse_loss",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.functional.smooth_l1_loss",
"numpy.array"
],
[
"tensorflow.executing_eagerly",
"numpy.arange",
"tensorflow.losses.Huber",
"numpy.atleast_2d",
"tensorflow.keras.backend.eval",
"tensorflow.keras.optimizers.Adam",
"tensorflow.square",
"numpy.array",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
KustomApe/nerdape | [
"aef6fb2d1f8c364b26d91bf8570b4487a24de69a",
"aef6fb2d1f8c364b26d91bf8570b4487a24de69a",
"aef6fb2d1f8c364b26d91bf8570b4487a24de69a"
] | [
".history/mercari/mercari_search_20201124185000.py",
".history/spider/car_spider_20201124003716.py",
".history/spider/car_spider_20201124004636.py"
] | [
"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport pandas as pd\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport PyQt5\nimport time\n\"\"\"[Initial Settings]\n初期設定\n\"\"\"\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headeless')\noptions.add_argument('--disable-gpu')\noptions.add_argument('--lang-ja')\nbrowser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')\n\n\"\"\"[CSS Selector Settings]\nCSSセレクターの設定\n\"\"\"\nPAGER = \"li.pager-next a\"\nword = input(\"検索したいキーワードを入力してください:\")\ndf_main = pd.DataFrame(columns=['在庫有無','タイトル','値段','URL'])\ndf_graf = pd.DataFrame(columns=['SOLD','PRICE'])\nn = 1\nres = browser.get(\"https://www.mercari.com/jp/search/?page=\" + str(n) +\"&keyword=\"+ word)\nres = browser.get(\"https://www.mercari.com/jp/search/?page=\"+str(num)+\"&keyword=\"+word)\n\nprint(res)\nbrowser.get(res)\nwhile True:\n if PAGER:\n item_boxlist = browser.find_elements_by_css_selector(\".items-box\")\n for item_box in item_boxlist:\n try:\n if len(item_box.find_elements_by_css_selector(\".item-sold-out-badge\")) > 0:\n sold = \"SOLD\"\n else:\n sold = \"NOT SOLD\"\n sub_title = item_box.find_element_by_class_name(\"items-box-body\")\n title = sub_title.find_element_by_tag_name(\"h3\").text\n item_price = item_box.find_element_by_css_selector(\".items-box-price\")\n price_text = item_price.text\n price_text = re.sub(r\",\", \"\", price_text).lstrip(\"¥ \")\n price_text_int = int(price_text)\n print(price_text_int)\n url = item_box.find_element_by_tag_name(\"a\").get_attribute(\"href\")\n data = pd.Series( [ sold,title,price_text_int,url ], index=df_main.columns )\n grdata = pd.Series( [ sold,price_text_int ], index=df_graf.columns )\n df_main = df_main.append( data, ignore_index=True )\n df_graf = df_graf.append( grdata, ignore_index=True )\n except Exception as e:\n print(e)\n btn = browser.find_element_by_css_selector(PAGER).get_attribute('href')\n n += 1\n print('next url:{}'.format(btn))\n time.sleep(3)\n browser.get(btn)\n print('Moving to next page...')\n else:\n print('No items anymore...')\n break\nprint(df_main)\nsns.stripplot(x='SOLD', y='PRICE', data=df_graf)\nplt.show()\nsns.pairplot(df_graf,hue=\"SOLD\")\nplt.show()\nprint('Writing out to CSV file...')\ndf_main.to_csv(\"pricedata.csv\", encoding=\"utf_8_sig\")\nprint(\"Done\")",
"from selenium import webdriver\nimport pandas as pd\nimport time\n\n\"\"\"[Initial setting]\n初期設定\n\"\"\"\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headeless')\noptions.add_argument('--disable-gpu')\noptions.add_argument('--lang-ja')\nbrowser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')\ndf = pd.DataFrame(columns=['name', 'image', 'price', 'category', 'car'])\nurl = 'https://motorz-garage.com/parts/'",
"from selenium import webdriver\nimport pandas as pd\nimport time\n\n\"\"\"[Initial Setting]\n初期設定\n\"\"\"\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headeless')\noptions.add_argument('--disable-gpu')\noptions.add_argument('--lang-ja')\nbrowser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')\ndf = pd.DataFrame(columns=['name', 'image', 'price', 'category', 'car'])\nurl = 'https://motorz-garage.com/parts/'\n\n\"\"\"[CSS Selector Setting]\nCSSセレクターの設定\n\"\"\"\nPAGER_NEXT = \"li.select-page.arrow a[rel='next']\"\nPOSTS = \".product-item-list__item\"\nPRODUCT_NAME = \".product-item-list__item-name\"\nIMAGE = \".product-item-list__item-image img\"\nPRICE = \".product-item-list__item-price\""
] | [
[
"matplotlib.pyplot.show",
"pandas.Series",
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sanghuynh1501/mlcollect | [
"e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2",
"e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2"
] | [
"mlcollect/cnn/lenet.py",
"mlcollect/cnn/minivggnet.py"
] | [
"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras import backend as K\n\n\nclass LeNet:\n @staticmethod\n def build(width, height, depth, classes, last_active=\"softmax\"):\n # Initialize the model\n model = Sequential()\n input_shape = (height, width, depth)\n\n # If we are using 'channels-first', update the input shape\n if K.image_data_format() == 'channels_first':\n input_shape = (depth, height, width)\n\n # First set of CONV => RELU => POOL layers\n model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # Second set of CONV => RELU => POOL layers\n model.add(Conv2D(50, (5, 5), padding='same'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # First (and only) set of FC => RELU layers\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation('relu'))\n\n model.add(Dense(classes))\n model.add(Activation(last_active))\n \n # return the constructed network architecture\n return model\n",
"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras import backend as K\n\n\nclass MiniVGGNet:\n @staticmethod\n def build(width, height, depth, classes, last_active=\"solfmax\"):\n # Initialize the model, input shape and the channel dimension\n model = Sequential()\n input_shape = (height, width, depth)\n channel_dim = -1\n\n # If we are using 'channels_first', update the input shape and channels dimension\n if K.image_data_format() == 'channels_first':\n input_shape = (depth, height, width)\n channel_dim = 1\n\n # First CONV => RELU => CONV => RELU => POOL layer set\n model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(BatchNormalization(axis=channel_dim))\n model.add(Conv2D(32, (3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(BatchNormalization(axis=channel_dim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # Second CONV => RELU => CONV => RELU => POOL layer set\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(Activation('relu'))\n # model.add(BatchNormalization(axis=channel_dim))\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(Activation('relu'))\n # model.add(BatchNormalization(axis=channel_dim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # First (and only) set of FC => RELU layers\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n # Softmax classifier\n model.add(Dense(classes))\n model.add(Activation(last_active))\n\n # Return the constructed network architecture\n return model\n"
] | [
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
],
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
keisukefukuda/optuna | [
"ac4ea8d0c74726f8a603ba2cb0bfb7f4112f736e"
] | [
"optuna/visualization/matplotlib/_contour.py"
] | [
"from typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport numpy as np\nimport scipy\n\nfrom optuna._experimental import experimental\nfrom optuna.logging import get_logger\nfrom optuna.study import Study\nfrom optuna.study import StudyDirection\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._utils import _check_plot_args\nfrom optuna.visualization._utils import _get_param_values\nfrom optuna.visualization.matplotlib._matplotlib_imports import _imports\nfrom optuna.visualization.matplotlib._utils import _is_log_scale\nfrom optuna.visualization.matplotlib._utils import _is_numerical\n\n\nif _imports.is_successful():\n from optuna.visualization.matplotlib._matplotlib_imports import Axes\n from optuna.visualization.matplotlib._matplotlib_imports import Colormap\n from optuna.visualization.matplotlib._matplotlib_imports import ContourSet\n from optuna.visualization.matplotlib._matplotlib_imports import plt\n\n_logger = get_logger(__name__)\n\n\nAXES_PADDING_RATIO = 5e-2\n\n\n@experimental(\"2.2.0\")\ndef plot_contour(\n study: Study,\n params: Optional[List[str]] = None,\n *,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"Axes\":\n \"\"\"Plot the parameter relationship as contour plot in a study with Matplotlib.\n\n Note that, if a parameter contains missing values, a trial with missing values is not plotted.\n\n .. seealso::\n Please refer to :func:`optuna.visualization.plot_contour` for an example.\n\n Warnings:\n Output figures of this Matplotlib-based\n :func:`~optuna.visualization.matplotlib.plot_contour` function would be different from\n those of the Plotly-based :func:`~optuna.visualization.plot_contour`.\n\n Example:\n\n The following code snippet shows how to plot the parameter relationship as contour plot.\n\n .. plot::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_categorical(\"y\", [-1, 0, 1])\n return x ** 2 + y\n\n\n sampler = optuna.samplers.TPESampler(seed=10)\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=30)\n\n optuna.visualization.matplotlib.plot_contour(study, params=[\"x\", \"y\"])\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their target values.\n params:\n Parameter list to visualize. The default is all parameters.\n target:\n A function to specify the value to display. If it is :obj:`None` and ``study`` is being\n used for single-objective optimization, the objective values are plotted.\n\n .. note::\n Specify this argument if ``study`` is being used for multi-objective optimization.\n target_name:\n Target's name to display on the color bar.\n\n Returns:\n A :class:`matplotlib.axes.Axes` object.\n\n Raises:\n :exc:`ValueError`:\n If ``target`` is :obj:`None` and ``study`` is being used for multi-objective\n optimization.\n \"\"\"\n\n _imports.check()\n _check_plot_args(study, target, target_name)\n _logger.warning(\n \"Output figures of this Matplotlib-based `plot_contour` function would be different from \"\n \"those of the Plotly-based `plot_contour`.\"\n )\n return _get_contour_plot(study, params, target, target_name)\n\n\ndef _get_contour_plot(\n study: Study,\n params: Optional[List[str]] = None,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"Axes\":\n # Calculate basic numbers for plotting.\n trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]\n\n if len(trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n _, ax = plt.subplots()\n return ax\n\n all_params = {p_name for t in trials for p_name in t.params.keys()}\n\n if params is None:\n sorted_params = sorted(all_params)\n elif len(params) <= 1:\n _logger.warning(\"The length of params must be greater than 1.\")\n _, ax = plt.subplots()\n return ax\n else:\n for input_p_name in params:\n if input_p_name not in all_params:\n raise ValueError(\"Parameter {} does not exist in your study.\".format(input_p_name))\n sorted_params = sorted(set(params))\n n_params = len(sorted_params)\n\n plt.style.use(\"ggplot\") # Use ggplot style sheet for similar outputs to plotly.\n if n_params == 2:\n # Set up the graph style.\n fig, axs = plt.subplots()\n axs.set_title(\"Contour Plot\")\n cmap = _set_cmap(study, target)\n contour_point_num = 100\n\n # Prepare data and draw contour plots.\n if params:\n x_param = params[0]\n y_param = params[1]\n else:\n x_param = sorted_params[0]\n y_param = sorted_params[1]\n cs = _generate_contour_subplot(\n trials, x_param, y_param, axs, cmap, contour_point_num, target\n )\n if isinstance(cs, ContourSet):\n axcb = fig.colorbar(cs)\n axcb.set_label(target_name)\n else:\n # Set up the graph style.\n fig, axs = plt.subplots(n_params, n_params)\n fig.suptitle(\"Contour Plot\")\n cmap = _set_cmap(study, target)\n contour_point_num = 100\n\n # Prepare data and draw contour plots.\n cs_list = []\n for x_i, x_param in enumerate(sorted_params):\n for y_i, y_param in enumerate(sorted_params):\n ax = axs[y_i, x_i]\n cs = _generate_contour_subplot(\n trials, x_param, y_param, ax, cmap, contour_point_num, target\n )\n if isinstance(cs, ContourSet):\n cs_list.append(cs)\n if cs_list:\n axcb = fig.colorbar(cs_list[0], ax=axs)\n axcb.set_label(target_name)\n\n return axs\n\n\ndef _set_cmap(study: Study, target: Optional[Callable[[FrozenTrial], float]]) -> \"Colormap\":\n cmap = \"Blues_r\" if target is None and study.direction == StudyDirection.MAXIMIZE else \"Blues\"\n return plt.get_cmap(cmap)\n\n\nclass _LabelEncoder:\n def __init__(self) -> None:\n self.labels: List[str] = []\n\n def fit(self, labels: List[str]) -> \"_LabelEncoder\":\n self.labels = sorted(set(labels))\n return self\n\n def transform(self, labels: List[str]) -> List[int]:\n return [self.labels.index(label) for label in labels]\n\n def fit_transform(self, labels: List[str]) -> List[int]:\n return self.fit(labels).transform(labels)\n\n def get_labels(self) -> List[str]:\n return self.labels\n\n def get_indices(self) -> List[int]:\n return list(range(len(self.labels)))\n\n\ndef _calculate_griddata(\n trials: List[FrozenTrial],\n x_param: str,\n x_indices: List[Union[str, int, float]],\n y_param: str,\n y_indices: List[Union[str, int, float]],\n contour_point_num: int,\n target: Optional[Callable[[FrozenTrial], float]],\n) -> Tuple[\n np.ndarray,\n np.ndarray,\n np.ndarray,\n List[Union[int, float]],\n List[Union[int, float]],\n List[Union[int, float]],\n List[Union[int, float]],\n List[int],\n List[str],\n List[int],\n List[str],\n int,\n int,\n]:\n\n # Extract values for x, y, z axes from each trail.\n x_values = []\n y_values = []\n z_values = []\n x_range_values = []\n y_range_values = []\n for trial in trials:\n contains_x_param = x_param in trial.params\n if contains_x_param:\n x_range_values.append(trial.params[x_param])\n\n contains_y_param = y_param in trial.params\n if contains_y_param:\n y_range_values.append(trial.params[y_param])\n\n if not contains_x_param or not contains_y_param:\n continue\n x_values.append(trial.params[x_param])\n y_values.append(trial.params[y_param])\n\n if target is None:\n value = trial.value\n else:\n value = target(trial)\n\n if isinstance(value, int):\n value = float(value)\n elif not isinstance(value, float):\n raise ValueError(\n \"Trial{} has COMPLETE state, but its target value is non-numeric.\".format(\n trial.number\n )\n )\n z_values.append(value)\n\n # Return empty values when x or y has no value.\n if len(x_values) == 0 or len(y_values) == 0:\n return (\n np.array([]),\n np.array([]),\n np.array([]),\n x_values,\n y_values,\n [],\n [],\n [],\n [],\n [],\n [],\n 0,\n 0,\n )\n\n # Add dummy values for grid data calculation when a parameter has one unique value.\n x_values_dummy = []\n y_values_dummy = []\n if len(set(x_values)) == 1:\n x_values_dummy = [x for x in x_indices if x not in x_values]\n x_values = x_values + x_values_dummy * len(x_values)\n y_values = y_values + (y_values * len(x_values_dummy))\n z_values = z_values + (z_values * len(x_values_dummy))\n if len(set(y_values)) == 1:\n y_values_dummy = [y for y in y_indices if y not in y_values]\n y_values = y_values + y_values_dummy * len(y_values)\n x_values = x_values + (x_values * len(y_values_dummy))\n z_values = z_values + (z_values * len(y_values_dummy))\n\n # Convert categorical values to int.\n cat_param_labels_x = [] # type: List[str]\n cat_param_pos_x = [] # type: List[int]\n cat_param_labels_y = [] # type: List[str]\n cat_param_pos_y = [] # type: List[int]\n if not _is_numerical(trials, x_param):\n enc = _LabelEncoder()\n x_range_values = enc.fit_transform(list(map(str, x_range_values)))\n x_values = enc.transform(list(map(str, x_values)))\n cat_param_labels_x = enc.get_labels()\n cat_param_pos_x = enc.get_indices()\n if not _is_numerical(trials, y_param):\n enc = _LabelEncoder()\n y_range_values = enc.fit_transform(list(map(str, y_range_values)))\n y_values = enc.transform(list(map(str, y_values)))\n cat_param_labels_y = enc.get_labels()\n cat_param_pos_y = enc.get_indices()\n\n # Calculate min and max of x and y.\n x_values_min = min(x_range_values)\n x_values_max = max(x_range_values)\n y_values_min = min(y_range_values)\n y_values_max = max(y_range_values)\n\n # Calculate grid data points.\n # For x and y, create 1-D array of evenly spaced coordinates on linear or log scale.\n xi = np.array([])\n yi = np.array([])\n zi = np.array([])\n\n if _is_log_scale(trials, x_param):\n padding_x = (np.log10(x_values_max) - np.log10(x_values_min)) * AXES_PADDING_RATIO\n x_values_min = np.power(10, np.log10(x_values_min) - padding_x)\n x_values_max = np.power(10, np.log10(x_values_max) + padding_x)\n xi = np.logspace(np.log10(x_values_min), np.log10(x_values_max), contour_point_num)\n else:\n padding_x = (x_values_max - x_values_min) * AXES_PADDING_RATIO\n x_values_min -= padding_x\n x_values_max += padding_x\n xi = np.linspace(x_values_min, x_values_max, contour_point_num)\n\n if _is_log_scale(trials, y_param):\n padding_y = (np.log10(y_values_max) - np.log10(y_values_min)) * AXES_PADDING_RATIO\n y_values_min = np.power(10, np.log10(y_values_min) - padding_y)\n y_values_max = np.power(10, np.log10(y_values_max) + padding_y)\n yi = np.logspace(np.log10(y_values_min), np.log10(y_values_max), contour_point_num)\n else:\n padding_y = (y_values_max - y_values_min) * AXES_PADDING_RATIO\n y_values_min -= padding_y\n y_values_max += padding_y\n yi = np.linspace(y_values_min, y_values_max, contour_point_num)\n\n # create irregularly spaced map of trial values\n # and interpolate it with Plotly's interpolation formulation\n if x_param != y_param:\n zmap = _create_zmap(x_values, y_values, z_values, xi, yi)\n zi = _interpolate_zmap(zmap, contour_point_num)\n\n return (\n xi,\n yi,\n zi,\n x_values,\n y_values,\n [x_values_min, x_values_max],\n [y_values_min, y_values_max],\n cat_param_pos_x,\n cat_param_labels_x,\n cat_param_pos_y,\n cat_param_labels_y,\n len(x_values_dummy),\n len(y_values_dummy),\n )\n\n\ndef _generate_contour_subplot(\n trials: List[FrozenTrial],\n x_param: str,\n y_param: str,\n ax: \"Axes\",\n cmap: \"Colormap\",\n contour_point_num: int,\n target: Optional[Callable[[FrozenTrial], float]],\n) -> \"ContourSet\":\n\n x_indices = sorted(set(_get_param_values(trials, x_param)))\n y_indices = sorted(set(_get_param_values(trials, y_param)))\n if len(x_indices) < 2:\n _logger.warning(\"Param {} unique value length is less than 2.\".format(x_param))\n return ax\n if len(y_indices) < 2:\n _logger.warning(\"Param {} unique value length is less than 2.\".format(y_param))\n return ax\n\n (\n xi,\n yi,\n zi,\n x_values,\n y_values,\n x_values_range,\n y_values_range,\n x_cat_param_pos,\n x_cat_param_label,\n y_cat_param_pos,\n y_cat_param_label,\n x_values_dummy_count,\n y_values_dummy_count,\n ) = _calculate_griddata(\n trials, x_param, x_indices, y_param, y_indices, contour_point_num, target\n )\n cs = None\n ax.set(xlabel=x_param, ylabel=y_param)\n ax.set_xlim(x_values_range[0], x_values_range[1])\n ax.set_ylim(y_values_range[0], y_values_range[1])\n if len(zi) > 0:\n if _is_log_scale(trials, x_param):\n ax.set_xscale(\"log\")\n if _is_log_scale(trials, y_param):\n ax.set_yscale(\"log\")\n if x_param != y_param:\n # Contour the gridded data.\n ax.contour(xi, yi, zi, 15, linewidths=0.5, colors=\"k\")\n cs = ax.contourf(xi, yi, zi, 15, cmap=cmap.reversed())\n # Plot data points.\n if x_values_dummy_count > 0:\n x_org_len = int(len(x_values) / (x_values_dummy_count + 1))\n y_org_len = int(len(y_values) / (x_values_dummy_count + 1))\n elif y_values_dummy_count > 0:\n x_org_len = int(len(x_values) / (y_values_dummy_count + 1))\n y_org_len = int(len(y_values) / (y_values_dummy_count + 1))\n else:\n x_org_len = len(x_values)\n y_org_len = len(x_values)\n ax.scatter(\n x_values[:x_org_len],\n y_values[:y_org_len],\n marker=\"o\",\n c=\"black\",\n s=20,\n edgecolors=\"grey\",\n linewidth=2.0,\n )\n if x_cat_param_pos:\n ax.set_xticks(x_cat_param_pos)\n ax.set_xticklabels(x_cat_param_label)\n if y_cat_param_pos:\n ax.set_yticks(y_cat_param_pos)\n ax.set_yticklabels(y_cat_param_label)\n ax.label_outer()\n return cs\n\n\ndef _create_zmap(\n x_values: List[Union[int, float]],\n y_values: List[Union[int, float]],\n z_values: List[float],\n xi: np.ndarray,\n yi: np.ndarray,\n) -> Dict[Tuple[int, int], float]:\n\n # creates z-map from trial values and params.\n # z-map is represented by hashmap of coordinate and trial value pairs\n #\n # coordinates are represented by tuple of integers, where the first item\n # indicates x-axis index and the second item indicates y-axis index\n # and refer to a position of trial value on irregular param grid\n #\n # since params were resampled either with linspace or logspace\n # original params might not be on the x and y axes anymore\n # so we are going with close approximations of trial value positions\n zmap = dict()\n for x, y, z in zip(x_values, y_values, z_values):\n xindex = int(np.argmin(np.abs(xi - x)))\n yindex = int(np.argmin(np.abs(yi - y)))\n zmap[(xindex, yindex)] = z\n\n return zmap\n\n\ndef _interpolate_zmap(zmap: Dict[Tuple[int, int], float], contour_plot_num: int) -> np.ndarray:\n\n # implements interpolation formulation used in Plotly\n # to interpolate heatmaps and contour plots\n # https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30\n # citing their doc:\n #\n # > Fill in missing data from a 2D array using an iterative\n # > poisson equation solver with zero-derivative BC at edges.\n # > Amazingly, this just amounts to repeatedly averaging all the existing\n # > nearest neighbors\n #\n # Plotly's algorithm is equivalent to solve the following linear simultaneous equation.\n # It is discretization form of the Poisson equation.\n #\n # z[x, y] = zmap[(x, y)] (if zmap[(x, y)] is given)\n # 4 * z[x, y] = z[x-1, y] + z[x+1, y] + z[x, y-1] + z[x, y+1] (if zmap[(x, y)] is not given)\n\n a_data = []\n a_row = []\n a_col = []\n b = np.zeros(contour_plot_num**2)\n for x in range(contour_plot_num):\n for y in range(contour_plot_num):\n grid_index = y * contour_plot_num + x\n if (x, y) in zmap:\n a_data.append(1)\n a_row.append(grid_index)\n a_col.append(grid_index)\n b[grid_index] = zmap[(x, y)]\n else:\n for dx, dy in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n if 0 <= x + dx < contour_plot_num and 0 <= y + dy < contour_plot_num:\n a_data.append(1)\n a_row.append(grid_index)\n a_col.append(grid_index)\n a_data.append(-1)\n a_row.append(grid_index)\n a_col.append(grid_index + dy * contour_plot_num + dx)\n\n z = scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix((a_data, (a_row, a_col))), b)\n\n return z.reshape((contour_plot_num, contour_plot_num))\n"
] | [
[
"scipy.sparse.csc_matrix",
"numpy.abs",
"numpy.linspace",
"numpy.log10",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
kotori-y/kotori_work | [
"51ebfdf49571ae34c246dc5b37cc86e25f4ccf3d"
] | [
"py_work/AI/ML/FeatureSelection.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 24 21:46:41 2019\n\nYou are not expected to understand my codes!\n\n@Author: Kotori_Y\n@Blog: blog.moyule.me\n@Weibo: Kotori-Y\n@Mail: [email protected]\n\nI love Megumi forerver!\n\"\"\"\n\nprint(__doc__)\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split,KFold\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score\nimport pandas as pd\nimport time\nimport os\nfrom tqdm import tqdm\n\nkf = KFold(n_splits=5)#kfold\n\n\nstart = time.clock()\n\n#os.chdir(r'E:\\student\\yzy\\Importance')\n#files = os.listdir()\n#os.makedirs('FeatureAna')\n\n#df = df.sample(frac=1).reset_index(drop=True)\n#df.drop('SMILES',axis=1,inplace=True)\n#y = df.pop('Label')\n\n#fold = 0\n\n\n####################################### 5-Fold #######################################\n\n#df_i = pd.DataFrame()#creat a dataframe for importance\n#df_m = pd.DataFrame()#creat a dataframe for metrics\n\n#for train_index, test_index in kf.split(df):\n# col = list(df.columns)\n# fold += 1\n# X_train, x_test = df.iloc[train_index], df.iloc[test_index]\n# Y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n# X = X_train.copy()\n# x = x_test.copy()\n# \n# for _ in tqdm(range(len(df.columns))):\n# \n# rfc = RandomForestClassifier(n_estimators=500,n_jobs=-1)\n## print('----------------Fitting----------------')\n# rfc.fit(X,Y_train)\n# \n# fea = pd.DataFrame(\n# {\n# 'Feature':col,\n# 'Importance':rfc.feature_importances_,\n# 'Fold':'fold_{}'.format(fold),\n# 'Class':len(col)\n# }\n# )\n# fea.sort_values('Importance',ascending=False,inplace=True)\n# df_i = pd.concat([df_i,fea],ignore_index=True)\n# \n# #cal correlate metrics\n# acc = accuracy_score(y_test,rfc.predict(x))\n# pre = precision_score(y_test,rfc.predict(x))\n# rec = recall_score(y_test,rfc.predict(x))\n# \n# me = pd.DataFrame(\n# {\n# 'Precision':[pre],\n# 'Recall':[rec],\n# 'Accuracy':[acc],\n# 'Fold':['fold_{}'.format(fold)],\n# 'Class':[len(col)]\n# }\n# ) \n# df_m = pd.concat([df_m,me],ignore_index=True)\n# \n# #drop the most unimportant feature\n# drop = list(fea['Feature'])[-1]\n# \n# X.drop(drop,axis=1,inplace=True)\n# x.drop(drop,axis=1,inplace=True)\n# col.remove(drop)\n# \n# del rfc,fea,me\n# \n# \n#end = time.clock()\n#\n#print(end-start)\n#\n#df_i.to_csv('Importances.csv')\n#df_m.to_csv('Metrics.csv')\n\n###########################################################################################\n\n\n\n\n\n\n\n\n\n\n\n\n####################################### ONCE #######################################\ndef Selection(file,filepath):\n os.chdir(filepath)\n print('-----{} start-----'.format(file.replace('.csv','')))\n df_i = pd.DataFrame()#creat a dataframe for importance\n df_m = pd.DataFrame()#creat a dataframe for metrics\n \n #df_1 = pd.read_csv(r'E:\\student\\kotori\\Lemon\\backup\\2C9_In_MACCS-1.csv')\n #df_0 = pd.read_csv(r'E:\\student\\kotori\\Lemon\\backup\\2C9_In_MACCS-0.csv')\n #df_1 = df_1.sample(len(df_0),replace=True)\n #df = pd.concat([df_1,df_0],ignore_index=True,sort=False)\n \n df = pd.read_csv(file)\n df = df.sample(frac=1).reset_index(drop=True)\n# df = df.iloc[:,3:]\n# try:\n# df.drop('SMILES',axis=1,inplace=True)\n# except:\n# df.drop('Smiles',axis=1,inplace=True)\n y = df.pop('grades')\n \n col = list(df.columns)\n X_train,x_test,Y_train,y_test = train_test_split(df,y,test_size=0.2)\n X = X_train.copy()\n x = x_test.copy()\n \n for _ in tqdm(range(len(df.columns))):\n \n rfc = RandomForestClassifier(n_estimators=500,n_jobs=-1)\n # print('----------------Fitting----------------')\n rfc.fit(X,Y_train)\n \n fea = pd.DataFrame(\n {\n 'Feature':col\n ,'Importance':rfc.feature_importances_\n \n ,'Class':len(col)\n }\n )\n fea.sort_values('Importance',ascending=False,inplace=True)\n df_i = pd.concat([df_i,fea],ignore_index=True,sort=False)\n \n #cal correlate metrics\n acc = accuracy_score(y_test,rfc.predict(x))\n pre = precision_score(y_test,rfc.predict(x))\n rec = recall_score(y_test,rfc.predict(x))\n \n me = pd.DataFrame(\n {\n 'Precision':[pre]\n ,'Recall':[rec]\n ,'Accuracy':[acc]\n #,'Fold':['fold_{}'.format(fold)]\n ,'Class':[len(col)]\n }\n ) \n df_m = pd.concat([df_m,me],ignore_index=True,sort=False)\n \n #drop the most unimportant feature\n drop = list(fea['Feature'])[-1]\n \n X.drop(drop,axis=1,inplace=True)\n x.drop(drop,axis=1,inplace=True)\n col.remove(drop)\n \n del rfc,fea,me\n #file = '2C9_In_MACCS'\n #df_i.to_csv('FeatureAna/{}_Importances_oversampling.csv'.format(file),index=False)\n #df_m.to_csv('FeatureAna/{}_Metrics_oversampling.csv'.format(file),index=False)\n return df_i,df_m\n \ndef main():\n tempt = print(\"Input the absolute path of your file locate and ensure the file only contain 'SMILES', 'Label' and the features vector\\n\")\n filepath = input(\"The absolute path: \")\n files = os.listdir(filepath)\n for file in files: \n df_i, df_m = Selection(file,filepath)\n# os.chdir(r'E:\\student\\yzy\\All')\n# \n# part_1_class = list(range(1000,1717))\n# \n# df_i_a = df_i[df_i['Class'].isin(part_1_class)]\n# df_i_b = df_i[~df_i['Class'].isin(part_1_class)]\n# df_i.iloc[:,:].to_csv(file.replace('.csv','') + '_Importances.csv',index=False)\n# df_m.to_csv(file.replace('.csv','') + '_Metrics.csv',index=False)\n df_i.to_csv('{}_Importances.csv'.format(file.replace('.csv','')))\n\nif '__main__' == __name__: \n main()\n #,'Fold':'fold_{}'.format(fold)\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.KFold",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jni/vispy | [
"8b61cd439076aa3f50ac5f6dacb4c0af8c1d0684",
"8b61cd439076aa3f50ac5f6dacb4c0af8c1d0684"
] | [
"vispy/visuals/line/line.py",
"vispy/visuals/surface_plot.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nLine visual implementing Agg- and GL-based drawing modes.\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom ... import gloo, glsl\nfrom ...color import Color, ColorArray, get_colormap\nfrom ...ext.six import string_types\nfrom ..shaders import Function\nfrom ..visual import Visual, CompoundVisual\nfrom ...util.profiler import Profiler\n\nfrom .dash_atlas import DashAtlas\n\n\nvec2to4 = Function(\"\"\"\n vec4 vec2to4(vec2 inp) {\n return vec4(inp, 0, 1);\n }\n\"\"\")\n\nvec3to4 = Function(\"\"\"\n vec4 vec3to4(vec3 inp) {\n return vec4(inp, 1);\n }\n\"\"\")\n\n\n\"\"\"\nTODO:\n\n* Agg support is very minimal; needs attention.\n* Optimization--avoid creating new buffers, avoid triggering program\n recompile.\n\"\"\"\n\n\njoins = {'miter': 0, 'round': 1, 'bevel': 2}\n\ncaps = {'': 0, 'none': 0, '.': 0,\n 'round': 1, ')': 1, '(': 1, 'o': 1,\n 'triangle in': 2, '<': 2,\n 'triangle out': 3, '>': 3,\n 'square': 4, '=': 4, 'butt': 4,\n '|': 5}\n\n\nclass LineVisual(CompoundVisual):\n \"\"\"Line visual\n\n Parameters\n ----------\n pos : array\n Array of shape (..., 2) or (..., 3) specifying vertex coordinates.\n color : Color, tuple, or array\n The color to use when drawing the line. If an array is given, it\n must be of shape (..., 4) and provide one rgba color per vertex.\n Can also be a colormap name, or appropriate `Function`.\n width:\n The width of the line in px. Line widths > 1px are only\n guaranteed to work when using 'agg' method.\n connect : str or array\n Determines which vertices are connected by lines.\n\n * \"strip\" causes the line to be drawn with each vertex\n connected to the next.\n * \"segments\" causes each pair of vertices to draw an\n independent line segment\n * numpy arrays specify the exact set of segment pairs to\n connect.\n\n method : str\n Mode to use for drawing.\n\n * \"agg\" uses anti-grain geometry to draw nicely antialiased lines\n with proper joins and endcaps.\n * \"gl\" uses OpenGL's built-in line rendering. This is much faster,\n but produces much lower-quality results and is not guaranteed to\n obey the requested line width or join/endcap styles.\n\n antialias : bool\n Enables or disables antialiasing.\n For method='gl', this specifies whether to use GL's line smoothing,\n which may be unavailable or inconsistent on some platforms.\n \"\"\"\n def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1,\n connect='strip', method='gl', antialias=False):\n self._line_visual = None\n\n self._changed = {'pos': False, 'color': False, 'width': False,\n 'connect': False}\n\n self._pos = None\n self._color = None\n self._width = None\n self._connect = None\n self._bounds = None\n self._antialias = None\n self._method = 'none'\n\n CompoundVisual.__init__(self, [])\n\n # don't call subclass set_data; these often have different\n # signatures.\n LineVisual.set_data(self, pos=pos, color=color, width=width,\n connect=connect)\n self.antialias = antialias\n self.method = method\n\n @property\n def antialias(self):\n return self._antialias\n\n @antialias.setter\n def antialias(self, aa):\n self._antialias = bool(aa)\n self.update()\n\n @property\n def method(self):\n \"\"\"The current drawing method\"\"\"\n return self._method\n\n @method.setter\n def method(self, method):\n if method not in ('agg', 'gl'):\n raise ValueError('method argument must be \"agg\" or \"gl\".')\n if method == self._method:\n return\n\n self._method = method\n if self._line_visual is not None:\n self.remove_subvisual(self._line_visual)\n\n if method == 'gl':\n self._line_visual = _GLLineVisual(self)\n elif method == 'agg':\n self._line_visual = _AggLineVisual(self)\n self.add_subvisual(self._line_visual)\n\n for k in self._changed:\n self._changed[k] = True\n\n def set_data(self, pos=None, color=None, width=None, connect=None):\n \"\"\"Set the data used to draw this visual.\n\n Parameters\n ----------\n pos : array\n Array of shape (..., 2) or (..., 3) specifying vertex coordinates.\n color : Color, tuple, or array\n The color to use when drawing the line. If an array is given, it\n must be of shape (..., 4) and provide one rgba color per vertex.\n width:\n The width of the line in px. Line widths < 1 px will be rounded up\n to 1 px when using the 'gl' method.\n connect : str or array\n Determines which vertices are connected by lines.\n\n * \"strip\" causes the line to be drawn with each vertex\n connected to the next.\n * \"segments\" causes each pair of vertices to draw an\n independent line segment\n * int numpy arrays specify the exact set of segment pairs to\n connect.\n * bool numpy arrays specify which _adjacent_ pairs to connect.\n\n \"\"\"\n if pos is not None:\n self._bounds = None\n self._pos = pos\n self._changed['pos'] = True\n\n if color is not None:\n self._color = color\n self._changed['color'] = True\n\n if width is not None:\n self._width = width\n self._changed['width'] = True\n\n if connect is not None:\n self._connect = connect\n self._changed['connect'] = True\n\n self.update()\n\n @property\n def color(self):\n return self._color\n\n @property\n def width(self):\n return self._width\n\n @property\n def connect(self):\n return self._connect\n\n @property\n def pos(self):\n return self._pos\n\n def _interpret_connect(self):\n if isinstance(self._connect, np.ndarray):\n # Convert a boolean connection array to a vertex index array\n if self._connect.ndim == 1 and self._connect.dtype == bool:\n index = np.empty((len(self._connect), 2), dtype=np.uint32)\n index[:] = np.arange(len(self._connect))[:, np.newaxis]\n index[:, 1] += 1\n return index[self._connect]\n elif self._connect.ndim == 2 and self._connect.shape[1] == 2:\n return self._connect.astype(np.uint32)\n else:\n raise TypeError(\"Got invalid connect array of shape %r and \"\n \"dtype %r\" % (self._connect.shape,\n self._connect.dtype))\n else:\n return self._connect\n\n def _interpret_color(self, color_in=None):\n color_in = self._color if color_in is None else color_in\n colormap = None\n if isinstance(color_in, string_types):\n try:\n colormap = get_colormap(color_in)\n color = Function(colormap.glsl_map)\n except KeyError:\n color = Color(color_in).rgba\n elif isinstance(color_in, Function):\n color = Function(color_in)\n else:\n color = ColorArray(color_in).rgba\n if len(color) == 1:\n color = color[0]\n return color, colormap\n\n def _compute_bounds(self, axis, view):\n \"\"\"Get the bounds\n\n Parameters\n ----------\n mode : str\n Describes the type of boundary requested. Can be \"visual\", \"data\",\n or \"mouse\".\n axis : 0, 1, 2\n The axis along which to measure the bounding values, in\n x-y-z order.\n \"\"\"\n # Can and should we calculate bounds?\n if (self._bounds is None) and self._pos is not None:\n pos = self._pos\n self._bounds = [(pos[:, d].min(), pos[:, d].max())\n for d in range(pos.shape[1])]\n # Return what we can\n if self._bounds is None:\n return\n else:\n if axis < len(self._bounds):\n return self._bounds[axis]\n else:\n return (0, 0)\n\n def _prepare_draw(self, view):\n if self._width == 0:\n return False\n CompoundVisual._prepare_draw(self, view)\n\n\nclass _GLLineVisual(Visual):\n VERTEX_SHADER = \"\"\"\n varying vec4 v_color;\n\n void main(void) {\n gl_Position = $transform($to_vec4($position));\n v_color = $color;\n }\n \"\"\"\n\n FRAGMENT_SHADER = \"\"\"\n varying vec4 v_color;\n void main() {\n gl_FragColor = v_color;\n }\n \"\"\"\n\n def __init__(self, parent):\n self._parent = parent\n self._pos_vbo = gloo.VertexBuffer()\n self._color_vbo = gloo.VertexBuffer()\n self._connect_ibo = gloo.IndexBuffer()\n self._connect = None\n\n Visual.__init__(self, vcode=self.VERTEX_SHADER,\n fcode=self.FRAGMENT_SHADER)\n self.set_gl_state('translucent')\n\n def _prepare_transforms(self, view):\n xform = view.transforms.get_transform()\n view.view_program.vert['transform'] = xform\n\n def _prepare_draw(self, view):\n prof = Profiler()\n\n if self._parent._changed['pos']:\n if self._parent._pos is None:\n return False\n # todo: does this result in unnecessary copies?\n pos = np.ascontiguousarray(self._parent._pos.astype(np.float32))\n self._pos_vbo.set_data(pos)\n self._program.vert['position'] = self._pos_vbo\n if pos.shape[-1] == 2:\n self._program.vert['to_vec4'] = vec2to4\n elif pos.shape[-1] == 3:\n self._program.vert['to_vec4'] = vec3to4\n else:\n raise TypeError(\"Got bad position array shape: %r\"\n % (pos.shape,))\n\n if self._parent._changed['color']:\n color, cmap = self._parent._interpret_color()\n # If color is not visible, just quit now\n if isinstance(color, Color) and color.is_blank:\n return False\n if isinstance(color, Function):\n # TODO: Change to the parametric coordinate once that is done\n self._program.vert['color'] = color(\n '(gl_Position.x + 1.0) / 2.0')\n else:\n if color.ndim == 1:\n self._program.vert['color'] = color\n else:\n self._color_vbo.set_data(color)\n self._program.vert['color'] = self._color_vbo\n\n self.shared_program['texture2D_LUT'] = cmap.texture_lut() \\\n if (hasattr(cmap, 'texture_lut')) else None\n\n # Do we want to use OpenGL, and can we?\n GL = None\n from ...app._default_app import default_app\n if default_app is not None and \\\n default_app.backend_name != 'ipynb_webgl':\n try:\n import OpenGL.GL as GL\n except Exception: # can be other than ImportError sometimes\n pass\n\n # Turn on line smooth and/or line width\n if GL:\n if self._parent._antialias:\n GL.glEnable(GL.GL_LINE_SMOOTH)\n else:\n GL.glDisable(GL.GL_LINE_SMOOTH)\n px_scale = self.transforms.pixel_scale\n width = px_scale * self._parent._width\n GL.glLineWidth(max(width, 1.))\n\n if self._parent._changed['connect']:\n self._connect = self._parent._interpret_connect()\n if isinstance(self._connect, np.ndarray):\n self._connect_ibo.set_data(self._connect)\n if self._connect is None:\n return False\n\n prof('prepare')\n\n # Draw\n if isinstance(self._connect, string_types) and \\\n self._connect == 'strip':\n self._draw_mode = 'line_strip'\n self._index_buffer = None\n elif isinstance(self._connect, string_types) and \\\n self._connect == 'segments':\n self._draw_mode = 'lines'\n self._index_buffer = None\n elif isinstance(self._connect, np.ndarray):\n self._draw_mode = 'lines'\n self._index_buffer = self._connect_ibo\n else:\n raise ValueError(\"Invalid line connect mode: %r\" % self._connect)\n\n prof('draw')\n\n\nclass _AggLineVisual(Visual):\n _agg_vtype = np.dtype([('a_position', np.float32, (2,)),\n ('a_tangents', np.float32, (4,)),\n ('a_segment', np.float32, (2,)),\n ('a_angles', np.float32, (2,)),\n ('a_texcoord', np.float32, (2,)),\n ('alength', np.float32),\n ('color', np.float32, (4,))])\n\n VERTEX_SHADER = glsl.get('lines/agg.vert')\n FRAGMENT_SHADER = glsl.get('lines/agg.frag')\n\n def __init__(self, parent):\n self._parent = parent\n self._vbo = gloo.VertexBuffer()\n\n self._pos = None\n self._color = None\n\n self._da = DashAtlas()\n dash_index, dash_period = self._da['solid']\n self._U = dict(dash_index=dash_index, dash_period=dash_period,\n linejoin=joins['round'],\n linecaps=(caps['round'], caps['round']),\n dash_caps=(caps['round'], caps['round']),\n antialias=1.0)\n self._dash_atlas = gloo.Texture2D(self._da._data)\n\n Visual.__init__(self, vcode=self.VERTEX_SHADER,\n fcode=self.FRAGMENT_SHADER)\n self._index_buffer = gloo.IndexBuffer()\n self.set_gl_state('translucent', depth_test=False)\n self._draw_mode = 'triangles'\n\n def _prepare_transforms(self, view):\n data_doc = view.get_transform('visual', 'document')\n doc_px = view.get_transform('document', 'framebuffer')\n px_ndc = view.get_transform('framebuffer', 'render')\n\n vert = view.view_program.vert\n vert['transform'] = data_doc\n vert['doc_px_transform'] = doc_px\n vert['px_ndc_transform'] = px_ndc\n\n def _prepare_draw(self, view):\n bake = False\n if self._parent._changed['pos']:\n if self._parent._pos is None:\n return False\n # todo: does this result in unnecessary copies?\n self._pos = np.ascontiguousarray(\n self._parent._pos.astype(np.float32))\n bake = True\n\n if self._parent._changed['color']:\n color, cmap = self._parent._interpret_color()\n self._color = color\n bake = True\n\n if self._parent._changed['connect']:\n if self._parent._connect not in [None, 'strip']:\n raise NotImplementedError(\"Only 'strip' connection mode \"\n \"allowed for agg-method lines.\")\n\n if bake:\n V, idxs = self._agg_bake(self._pos, self._color)\n self._vbo.set_data(V)\n self._index_buffer.set_data(idxs)\n\n # self._program.prepare()\n self.shared_program.bind(self._vbo)\n uniforms = dict(closed=False, miter_limit=4.0, dash_phase=0.0,\n linewidth=self._parent._width)\n for n, v in uniforms.items():\n self.shared_program[n] = v\n for n, v in self._U.items():\n self.shared_program[n] = v\n self.shared_program['u_dash_atlas'] = self._dash_atlas\n\n @classmethod\n def _agg_bake(cls, vertices, color, closed=False):\n \"\"\"\n Bake a list of 2D vertices for rendering them as thick line. Each line\n segment must have its own vertices because of antialias (this means no\n vertex sharing between two adjacent line segments).\n \"\"\"\n\n n = len(vertices)\n P = np.array(vertices).reshape(n, 2).astype(float)\n idx = np.arange(n) # used to eventually tile the color array\n\n dx, dy = P[0] - P[-1]\n d = np.sqrt(dx*dx+dy*dy)\n\n # If closed, make sure first vertex = last vertex (+/- epsilon=1e-10)\n if closed and d > 1e-10:\n P = np.append(P, P[0]).reshape(n+1, 2)\n idx = np.append(idx, idx[-1])\n n += 1\n\n V = np.zeros(len(P), dtype=cls._agg_vtype)\n V['a_position'] = P\n\n # Tangents & norms\n T = P[1:] - P[:-1]\n\n N = np.sqrt(T[:, 0]**2 + T[:, 1]**2)\n # T /= N.reshape(len(T),1)\n V['a_tangents'][+1:, :2] = T\n V['a_tangents'][0, :2] = T[-1] if closed else T[0]\n V['a_tangents'][:-1, 2:] = T\n V['a_tangents'][-1, 2:] = T[0] if closed else T[-1]\n\n # Angles\n T1 = V['a_tangents'][:, :2]\n T2 = V['a_tangents'][:, 2:]\n A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0],\n T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1])\n V['a_angles'][:-1, 0] = A[:-1]\n V['a_angles'][:-1, 1] = A[+1:]\n\n # Segment\n L = np.cumsum(N)\n V['a_segment'][+1:, 0] = L\n V['a_segment'][:-1, 1] = L\n # V['a_lengths'][:,2] = L[-1]\n\n # Step 1: A -- B -- C => A -- B, B' -- C\n V = np.repeat(V, 2, axis=0)[1:-1]\n V['a_segment'][1:] = V['a_segment'][:-1]\n V['a_angles'][1:] = V['a_angles'][:-1]\n V['a_texcoord'][0::2] = -1\n V['a_texcoord'][1::2] = +1\n idx = np.repeat(idx, 2)[1:-1]\n\n # Step 2: A -- B, B' -- C -> A0/A1 -- B0/B1, B'0/B'1 -- C0/C1\n V = np.repeat(V, 2, axis=0)\n V['a_texcoord'][0::2, 1] = -1\n V['a_texcoord'][1::2, 1] = +1\n idx = np.repeat(idx, 2)\n\n idxs = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32),\n (n-1)*(2*3))\n idxs += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6)\n\n # Length\n V['alength'] = L[-1] * np.ones(len(V))\n\n # Color\n if color.ndim == 1:\n color = np.tile(color, (len(V), 1))\n elif color.ndim == 2 and len(color) == n:\n color = color[idx]\n else:\n raise ValueError('Color length %s does not match number of '\n 'vertices %s' % (len(color), n))\n V['color'] = color\n\n return V, idxs\n",
"# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom .mesh import MeshVisual\nfrom ..geometry import MeshData\n\n\nclass SurfacePlotVisual(MeshVisual):\n \"\"\"Displays a surface plot on a regular x,y grid\n\n Parameters\n ----------\n x : ndarray | None\n 1D array of values specifying the x positions of vertices in the\n grid. If None, values will be assumed to be integers.\n y : ndarray | None\n 1D array of values specifying the x positions of vertices in the\n grid. If None, values will be assumed to be integers.\n z : ndarray\n 2D array of height values for each grid vertex.\n colors : ndarray\n (width, height, 4) array of vertex colors.\n\n Notes\n -----\n All arguments are optional.\n\n Note that if vertex positions are updated, the normal vectors for each\n triangle must be recomputed. This is somewhat expensive if the surface\n was initialized with smooth=False and very expensive if smooth=True.\n For faster performance, initialize with compute_normals=False and use\n per-vertex colors or a material that does not require normals.\n \"\"\"\n def __init__(self, x=None, y=None, z=None, colors=None, **kwargs):\n # The x, y, z, and colors arguments are passed to set_data().\n # All other keyword arguments are passed to MeshVisual.__init__().\n self._x = None\n self._y = None\n self._z = None\n self.__vertices = None\n self.__faces = None\n self.__meshdata = MeshData()\n kwargs.setdefault('shading', 'smooth')\n MeshVisual.__init__(self, **kwargs)\n self.set_data(x, y, z, colors)\n\n def set_data(self, x=None, y=None, z=None, colors=None):\n \"\"\"Update the data in this surface plot.\n\n Parameters\n ----------\n x : ndarray | None\n 1D array of values specifying the x positions of vertices in the\n grid. If None, values will be assumed to be integers.\n y : ndarray | None\n 1D array of values specifying the x positions of vertices in the\n grid. If None, values will be assumed to be integers.\n z : ndarray\n 2D array of height values for each grid vertex.\n colors : ndarray\n (width, height, 4) array of vertex colors.\n \"\"\"\n if x is not None:\n if self._x is None or len(x) != len(self._x):\n self.__vertices = None\n self._x = x\n\n if y is not None:\n if self._y is None or len(y) != len(self._y):\n self.__vertices = None\n self._y = y\n\n if z is not None:\n if self._x is not None and z.shape[0] != len(self._x):\n raise TypeError('Z values must have shape (len(x), len(y))')\n if self._y is not None and z.shape[1] != len(self._y):\n raise TypeError('Z values must have shape (len(x), len(y))')\n self._z = z\n if (self.__vertices is not None and\n self._z.shape != self.__vertices.shape[:2]):\n self.__vertices = None\n\n if self._z is None:\n return\n\n update_mesh = False\n new_vertices = False\n\n # Generate vertex and face array\n if self.__vertices is None:\n new_vertices = True\n self.__vertices = np.empty((self._z.shape[0], self._z.shape[1], 3),\n dtype=np.float32)\n self.generate_faces()\n self.__meshdata.set_faces(self.__faces)\n update_mesh = True\n\n # Copy x, y, z data into vertex array\n if new_vertices or x is not None:\n if x is None:\n if self._x is None:\n x = np.arange(self._z.shape[0])\n else:\n x = self._x\n self.__vertices[:, :, 0] = x.reshape(len(x), 1)\n update_mesh = True\n\n if new_vertices or y is not None:\n if y is None:\n if self._y is None:\n y = np.arange(self._z.shape[1])\n else:\n y = self._y\n self.__vertices[:, :, 1] = y.reshape(1, len(y))\n update_mesh = True\n\n if new_vertices or z is not None:\n self.__vertices[..., 2] = self._z\n update_mesh = True\n\n if colors is not None:\n self.__meshdata.set_vertex_colors(colors)\n update_mesh = True\n\n # Update MeshData\n if update_mesh:\n self.__meshdata.set_vertices(\n self.__vertices.reshape(self.__vertices.shape[0] *\n self.__vertices.shape[1], 3))\n MeshVisual.set_data(self, meshdata=self.__meshdata)\n\n def generate_faces(self):\n cols = self._z.shape[1]-1\n rows = self._z.shape[0]-1\n faces = np.empty((cols*rows*2, 3), dtype=np.uint)\n rowtemplate1 = (np.arange(cols).reshape(cols, 1) +\n np.array([[0, 1, cols+1]]))\n rowtemplate2 = (np.arange(cols).reshape(cols, 1) +\n np.array([[cols+1, 1, cols+2]]))\n for row in range(rows):\n start = row * cols * 2\n faces[start:start+cols] = rowtemplate1 + row * (cols+1)\n faces[start+cols:start+(cols*2)] = rowtemplate2 + row * (cols+1)\n self.__faces = faces\n"
] | [
[
"numpy.sqrt",
"numpy.arange",
"numpy.cumsum",
"numpy.dtype",
"numpy.arctan2",
"numpy.append",
"numpy.repeat",
"numpy.array"
],
[
"numpy.arange",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nghugo88/tf-pose-estimation | [
"0df660feeb52957f40f4a5e18920adc317af3653"
] | [
"src/slim/nets/nets_factory_test.py"
] | [
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for slim.inception.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport tensorflow as tf\n\nfrom nets import nets_factory\n\n\nclass NetworksTest(tf.test.TestCase):\n\n def testGetNetworkFnFirstHalf(self):\n batch_size = 5\n num_classes = 1000\n for net in nets_factory.networks_map.keys()[:10]:\n with tf.Graph().as_default() as g, self.test_session(g):\n net_fn = nets_factory.get_network_fn(net, num_classes)\n # Most networks use 224 as their default_image_size\n image_size = getattr(net_fn, 'default_image_size', 224)\n inputs = tf.random_uniform((batch_size, image_size, image_size, 3))\n logits, end_points = net_fn(inputs)\n self.assertTrue(isinstance(logits, tf.Tensor))\n self.assertTrue(isinstance(end_points, dict))\n self.assertEqual(logits.get_shape().as_list()[0], batch_size)\n self.assertEqual(logits.get_shape().as_list()[-1], num_classes)\n\n def testGetNetworkFnSecondHalf(self):\n batch_size = 5\n num_classes = 1000\n for net in nets_factory.networks_map.keys()[10:]:\n with tf.Graph().as_default() as g, self.test_session(g):\n net_fn = nets_factory.get_network_fn(net, num_classes)\n # Most networks use 224 as their default_image_size\n image_size = getattr(net_fn, 'default_image_size', 224)\n inputs = tf.random_uniform((batch_size, image_size, image_size, 3))\n logits, end_points = net_fn(inputs)\n self.assertTrue(isinstance(logits, tf.Tensor))\n self.assertTrue(isinstance(end_points, dict))\n self.assertEqual(logits.get_shape().as_list()[0], batch_size)\n self.assertEqual(logits.get_shape().as_list()[-1], num_classes)\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.Graph",
"tensorflow.random_uniform",
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JacopoTeneggi/InnerEye-DeepLearning | [
"988d9fa318a19cfd435370248970d976ee2e78b0",
"988d9fa318a19cfd435370248970d976ee2e78b0",
"988d9fa318a19cfd435370248970d976ee2e78b0",
"988d9fa318a19cfd435370248970d976ee2e78b0"
] | [
"InnerEye/ML/pipelines/inference.py",
"InnerEye/ML/models/architectures/classification/image_encoder_with_mlp.py",
"InnerEye/ML/utils/lr_scheduler.py",
"InnerEye/ML/models/layers/weight_standardization.py"
] | [
"# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom __future__ import annotations\n\nimport logging\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nimport torch\nfrom radio import CTImagesMaskedBatch\nfrom radio.batchflow import Dataset, action, inbatch_parallel\n\nfrom InnerEye.Common.type_annotations import TupleFloat3\nfrom InnerEye.ML import config\nfrom InnerEye.ML.common import ModelExecutionMode\nfrom InnerEye.ML.config import SegmentationModelBase\nfrom InnerEye.ML.lightning_helpers import load_from_checkpoint_and_adjust_for_inference\nfrom InnerEye.ML.lightning_models import SegmentationLightning\nfrom InnerEye.ML.model_config_base import ModelConfigBase\nfrom InnerEye.ML.models.architectures.base_model import BaseSegmentationModel\nfrom InnerEye.ML.utils import image_util, ml_util\nfrom InnerEye.ML.utils.image_util import compute_uncertainty_map_from_posteriors, gaussian_smooth_posteriors, \\\n posteriors_to_segmentation\n\n\nclass InferencePipelineBase:\n \"\"\"Base class for all inference pipelines.\"\"\"\n\n def __init__(self, model_config: ModelConfigBase):\n self.model_config = model_config\n\n\nclass FullImageInferencePipelineBase(InferencePipelineBase):\n \"\"\"\n Base Class for full image inference intended to be inherited by inference pipelines\n that can perform full image prediction\n \"\"\"\n\n def __init__(self, model_config: SegmentationModelBase):\n super().__init__(model_config)\n\n def predict_and_post_process_whole_image(self, image_channels: np.ndarray,\n voxel_spacing_mm: TupleFloat3,\n mask: np.ndarray = None,\n patient_id: int = 0) -> InferencePipeline.Result:\n return self.post_process(self.predict_whole_image(image_channels, voxel_spacing_mm, mask, patient_id))\n\n def predict_whole_image(self, image_channels: np.ndarray,\n voxel_spacing_mm: TupleFloat3,\n mask: np.ndarray = None,\n patient_id: int = 0) -> InferencePipeline.Result:\n raise NotImplementedError(\"Full image inference capability must be implemented by concrete classes\")\n\n def post_process(self, results: InferencePipeline.Result) -> InferencePipeline.Result:\n \"\"\"\n Perform connected component analysis to update segmentation with largest\n connected component based on the configurations\n :param results: inference results to post-process\n :return: post-processed version of results\n \"\"\"\n if self.model_config.posterior_smoothing_mm:\n posteriors = gaussian_smooth_posteriors(\n posteriors=results.posteriors,\n kernel_size_mm=self.model_config.posterior_smoothing_mm,\n voxel_spacing_mm=results.voxel_spacing_mm\n )\n\n results = InferencePipeline.Result(\n patient_id=results.patient_id,\n posteriors=posteriors,\n segmentation=posteriors_to_segmentation(posteriors),\n voxel_spacing_mm=results.voxel_spacing_mm\n )\n\n if self.model_config.summed_probability_rules and not self.model_config.disable_extra_postprocessing:\n assert isinstance(self.model_config, SegmentationModelBase)\n results = results.with_new_segmentation(\n image_util.apply_summed_probability_rules(self.model_config, results.posteriors, results.segmentation))\n\n if self.model_config.largest_connected_component_foreground_classes is not None:\n # get indices for classes to restrict\n restrict_class_indices_and_thresholds = []\n for name, idx in self.model_config.class_and_index_with_background().items():\n for name2, threshold in self.model_config.largest_connected_component_foreground_classes:\n if name2 == name:\n restrict_class_indices_and_thresholds.append((idx, threshold))\n results = results.with_new_segmentation(\n image_util.extract_largest_foreground_connected_component(\n multi_label_array=results.segmentation,\n # mypy gets confused below because List is invariant. Sequence is covariant\n # but does not allow \"append\".\n restrictions=restrict_class_indices_and_thresholds)) # type: ignore\n\n if self.model_config.slice_exclusion_rules and not self.model_config.disable_extra_postprocessing:\n results = results.with_new_segmentation(\n image_util.apply_slice_exclusion_rules(self.model_config, results.segmentation))\n\n return results\n\n\nclass InferencePipeline(FullImageInferencePipelineBase):\n \"\"\"\n Pipeline class for model for whole image inference on ct-images.\n \"\"\"\n\n # the model output is expected to be a valid probability distribution\n MODEL_OUTPUT_POSTERIOR_RANGE = (0, 1)\n\n class Variables(Enum):\n \"\"\"\n Variables associated with the inference pipeline\n \"\"\"\n\n # an instantiated model to use for inference.\n Model = 'model'\n # the configuration associated with the model.\n ModelConfig = 'model_config'\n # the shape of the image required as output from the pipeline.\n OutputImageShape = 'output_image_shape'\n # A Tuple[int,int,int] with the crop size that should be used. For large images, this will be\n # the test_crop_size from the model config, but for smaller images, it will be the componentwise\n # minimum of test_crop_size and image_size\n CropSize = 'crop_size'\n # The stride size to use, possibly adjusted for small images (see above for crop_size)\n Stride = 'stride'\n # The size of the output tensor that the model will produce when fed with an input tensor that\n # has the given crop_size.\n OutputSize = 'output_size'\n\n class Result:\n \"\"\"\n Contains the inference results from a single pass of the inference pipeline\n \"\"\"\n\n def __init__(self,\n patient_id: int,\n segmentation: np.ndarray,\n posteriors: np.ndarray,\n voxel_spacing_mm: TupleFloat3):\n \"\"\"\n :param patient_id: The id of the patient instance for with inference is being performed on.\n :param segmentation: Z x Y x X (argmaxed over the posteriors in the class dimension)\n :param voxel_spacing_mm: Voxel spacing to use for each dimension in (Z x Y x X) order\n :param posteriors: Class x Z x Y x X\n \"\"\"\n self.patient_id = patient_id\n self.segmentation = segmentation\n self.posteriors = posteriors\n self.voxel_spacing_mm = voxel_spacing_mm\n\n if len(self.voxel_spacing_mm) != 3:\n raise ValueError(f\"voxel_spacing_mm must have length 3, found: {voxel_spacing_mm}\")\n if any(np.array(self.voxel_spacing_mm) <= 0):\n raise ValueError(f\"voxel_spacing_mm must have values > 0 in each dimension, found: {voxel_spacing_mm}\")\n\n ml_util.check_size_matches(self.segmentation,\n self.posteriors,\n dim1=3,\n dim2=4,\n matching_dimensions=[-3, -2, -1],\n arg1_name=\"segmentation\",\n arg2_name=\"posteriors\")\n\n segmentation_value_range = np.unique(self.segmentation)\n if not np.all([x in range(self.posteriors.shape[0]) for x in segmentation_value_range]):\n raise Exception(\"values in the segmentation map must be in range [0, classes), \"\n \"found classes:{}, segmentation range:{}\"\n .format(self.posteriors.shape[0], segmentation_value_range))\n\n self._uncertainty = compute_uncertainty_map_from_posteriors(self.posteriors)\n\n @property\n def uncertainty(self) -> np.ndarray:\n return self._uncertainty\n\n def with_new_segmentation(self, segmentation: np.ndarray) -> InferencePipeline.Result:\n if segmentation.shape != self.segmentation.shape:\n raise ValueError(f\"Attempt to replace segmentation of shape {self.segmentation.shape} \"\n f\"with one of shape {segmentation.shape}\")\n return InferencePipeline.Result(\n patient_id=self.patient_id,\n segmentation=segmentation,\n posteriors=self.posteriors,\n voxel_spacing_mm=self.voxel_spacing_mm)\n\n def __init__(self, model: SegmentationLightning, model_config: config.SegmentationModelBase,\n pipeline_id: int = 0):\n super().__init__(model_config)\n self.model = model\n self.model.model.eval()\n self.pipeline_id = pipeline_id\n\n @staticmethod\n def create_from_checkpoint(path_to_checkpoint: Path,\n model_config: SegmentationModelBase,\n pipeline_id: int = 0) -> Optional[InferencePipeline]:\n \"\"\"\n Creates an instance of the inference pipeline for a given epoch from a stored checkpoint.\n After loading, the model parameters are checked for NaN and Infinity values.\n If there is no checkpoint file for the given epoch, return None.\n :param path_to_checkpoint: The path to the checkpoint that we want to load\n model_config.checkpoint_folder\n :param model_config: Model related configurations.\n :param pipeline_id: Numeric identifier for the pipeline (useful for logging when ensembling)\n :return InferencePipeline: an instantiated inference pipeline instance, or None if there was no checkpoint\n file for this epoch.\n \"\"\"\n if not path_to_checkpoint.is_file():\n # not raising a value error here: This is used to create individual pipelines for ensembles,\n # possible one model cannot be created but others can\n logging.warning(f\"Could not recover model from checkpoint path {path_to_checkpoint}\")\n return None\n lightning_model = load_from_checkpoint_and_adjust_for_inference(model_config, path_to_checkpoint)\n assert isinstance(lightning_model, SegmentationLightning)\n return InferencePipeline(model=lightning_model, model_config=model_config, pipeline_id=pipeline_id)\n\n def predict_whole_image(self, image_channels: np.ndarray,\n voxel_spacing_mm: TupleFloat3,\n mask: np.ndarray = None,\n patient_id: int = 0) -> InferencePipeline.Result:\n \"\"\"\n Performs a single inference pass through the pipeline for the provided image\n :param image_channels: The input image channels to perform inference on in format: Channels x Z x Y x X.\n :param voxel_spacing_mm: Voxel spacing to use for each dimension in (Z x Y x X) order\n :param mask: A binary image used to ignore results outside it in format: Z x Y x X.\n :param patient_id: The identifier of the patient this image belongs to (defaults to 0 if None provided).\n :return InferenceResult: that contains Segmentation for each of the classes and their posterior probabilities.\n \"\"\"\n if image_channels is None:\n raise Exception(\"image_channels cannot be None\")\n if image_channels.ndim != 4:\n raise NotImplementedError(\"image_channels must be in shape: Channels x Z x Y x X\"\n \"found image_channels shape: {}\".format(image_channels.shape))\n if mask is not None:\n ml_util.check_size_matches(image_channels, mask, 4, 3, [-1, -2, -3])\n self.model.eval()\n # create the dataset for the batch\n batch_dataset = Dataset(index=[patient_id], batch_class=InferenceBatch)\n # setup the pipeline\n pipeline = (batch_dataset.p\n # define pipeline variables\n .init_variables([InferencePipeline.Variables.Model,\n InferencePipeline.Variables.ModelConfig,\n InferencePipeline.Variables.CropSize,\n InferencePipeline.Variables.OutputSize,\n InferencePipeline.Variables.OutputImageShape,\n InferencePipeline.Variables.Stride])\n # update the variables for the batch actions\n .update_variable(name=InferencePipeline.Variables.Model, value=self.model)\n .update_variable(name=InferencePipeline.Variables.ModelConfig, value=self.model_config)\n # perform cascaded batch actions\n .load(image_channels=image_channels, mask=mask)\n .pre_process()\n .predict()\n .post_process()\n )\n # run the batch through the pipeline\n logging.info(f\"Inference pipeline ({self.pipeline_id}), Predicting patient: {patient_id}\")\n processed_batch: InferenceBatch = pipeline.next_batch(batch_size=1)\n posteriors = processed_batch.get_component(InferenceBatch.Components.Posteriors)\n image_util.check_array_range(posteriors, error_prefix=\"Whole image posteriors\")\n # prepare pipeline results from the processed batch\n return InferencePipeline.Result(\n patient_id=patient_id,\n segmentation=processed_batch.get_component(InferenceBatch.Components.Segmentation),\n posteriors=posteriors,\n voxel_spacing_mm=voxel_spacing_mm\n )\n\n\nclass InferenceBatch(CTImagesMaskedBatch):\n \"\"\"\n Batch class for IO with the inference pipeline. One instance of a batch will load the image\n into the 'images' component of the pipeline, and store the results of the full pass\n of the pipeline into the 'segmentation' and 'posteriors' components.\n \"\"\"\n\n class Components(Enum):\n \"\"\"\n Components associated with the inference batch class\n \"\"\"\n\n # the input image channels in Channels x Z x Y x X format.\n ImageChannels = 'channels'\n # a set of 2D image slices (ie: a 3D image channel), stacked in Z x Y x X format.\n Images = 'images'\n # a binary mask used to ignore predictions in Z x Y x X format.\n Mask = 'mask'\n # a numpy.ndarray in Z x Y x X format with class labels for each voxel in the original image.\n Segmentation = 'segmentation'\n # a numpy.ndarray with the first dimension indexing each class in C x Z x Y x X format\n # with each Z x Y x X being the same shape as the Images component, and consisting of\n # [0, 1] values representing the model confidence for each voxel.\n Posteriors = 'posteriors'\n\n def __init__(self, index: int, *args: Any, **kwargs: Any):\n super().__init__(index, *args, **kwargs)\n self.components = [x.value for x in InferenceBatch.Components]\n\n @action\n def load(self, image_channels: np.ndarray, mask: np.ndarray) -> InferenceBatch:\n \"\"\"\n Load image channels and mask into their respective pipeline components.\n \"\"\"\n self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)\n model_config = self.get_configs()\n if model_config is None:\n raise ValueError(\"model_config is None\")\n if model_config.test_crop_size is None:\n raise ValueError(\"model_config.test_crop_size is None\")\n if model_config.inference_stride_size is None:\n raise ValueError(\"model_config.inference_stride_size is None\")\n\n # fetch the image channels from the batch\n image_channels = self.get_component(InferenceBatch.Components.ImageChannels)\n self.pipeline.set_variable(name=InferencePipeline.Variables.OutputImageShape, value=image_channels[0].shape)\n # There may be cases where the test image is smaller than the test_crop_size. Adjust crop_size\n # to always fit into image. If test_crop_size is smaller than the image, crop will remain unchanged.\n image_size = image_channels.shape[1:]\n model: BaseSegmentationModel = self.pipeline.get_variable(InferencePipeline.Variables.Model).model\n effective_crop, effective_stride = \\\n model.crop_size_constraints.restrict_crop_size_to_image(image_size,\n model_config.test_crop_size,\n model_config.inference_stride_size)\n self.pipeline.set_variable(name=InferencePipeline.Variables.CropSize, value=effective_crop)\n self.pipeline.set_variable(name=InferencePipeline.Variables.Stride, value=effective_stride)\n logging.debug(\n f\"Inference on image size {image_size} will run \"\n f\"with crop size {effective_crop} and stride {effective_stride}\")\n # In most cases, we will be able to read the output size from the pre-computed values\n # via get_output_size. Only if we have a non-standard (smaller) crop size, re-computed the output size.\n output_size = model_config.get_output_size(execution_mode=ModelExecutionMode.TEST)\n if effective_crop != model_config.test_crop_size:\n output_size = model.get_output_shape(input_shape=effective_crop) # type: ignore\n self.pipeline.set_variable(name=InferencePipeline.Variables.OutputSize, value=output_size)\n\n if mask is not None:\n self.set_component(component=InferenceBatch.Components.Mask, data=mask)\n\n return self\n\n @action\n def pre_process(self) -> InferenceBatch:\n \"\"\"\n Prepare the input components of the batch for further processing.\n \"\"\"\n model_config = self.get_configs()\n\n # fetch the image channels from the batch\n image_channels = self.get_component(InferenceBatch.Components.ImageChannels)\n\n crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)\n output_size = self.pipeline.get_variable(InferencePipeline.Variables.OutputSize)\n image_channels = image_util.pad_images_for_inference(\n images=image_channels,\n crop_size=crop_size,\n output_size=output_size,\n padding_mode=model_config.padding_mode\n )\n\n # update the post-processed components\n self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)\n\n return self\n\n @action\n def predict(self) -> InferenceBatch:\n \"\"\"\n Perform a forward pass of the model on the provided image, this generates\n a set of posterior maps for each class, as well as a segmentation output\n stored in the respective 'posteriors' and 'segmentation' components.\n \"\"\"\n model_config = self.get_configs()\n\n # extract patches for each image channel: Num patches x Channels x Z x Y x X\n patches = self._extract_patches_for_image_channels()\n\n # split the generated patches into batches and perform forward passes\n predictions = []\n batch_size = model_config.inference_batch_size\n\n for batch_idx in range(0, len(patches), batch_size):\n # slice over the batches to prepare batch\n batch = torch.tensor(patches[batch_idx: batch_idx + batch_size, ...]).float()\n if model_config.use_gpu:\n batch = batch.cuda()\n # perform the forward pass\n batch_predictions = self._model_fn(batch).detach().cpu().numpy()\n # collect the predictions over each of the batches\n predictions.append(batch_predictions)\n\n # map the batched predictions to the original batch shape\n # of shape but with an added class dimension: Num patches x Class x Z x Y x X\n predictions = np.concatenate(predictions, axis=0)\n\n # create posterior output for each class with the shape: Class x Z x Y x x. We use float32 as these\n # arrays can be big.\n output_image_shape = self.pipeline.get_variable(InferencePipeline.Variables.OutputImageShape)\n posteriors = np.zeros(shape=[model_config.number_of_classes] + list(output_image_shape), dtype=np.float32)\n stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)\n\n for c in range(len(posteriors)):\n # stitch the patches for each posterior class\n self.load_from_patches(predictions[:, c, ...], # type: ignore\n stride=stride,\n scan_shape=output_image_shape,\n data_attr=InferenceBatch.Components.Posteriors.value)\n # extract computed output from the component so the pipeline buffer can be reused\n posteriors[c] = self.get_component(InferenceBatch.Components.Posteriors)\n\n # store the stitched up results for the batch\n self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)\n\n return self\n\n @action\n def post_process(self) -> InferenceBatch:\n \"\"\"\n Perform post processing on the computed outputs of the a single pass of the pipelines.\n Currently the following operations are performed:\n -------------------------------------------------------------------------------------\n 1) the mask is applied to the posteriors (if required).\n 2) the final posteriors are used to perform an argmax to generate a multi-label segmentation.\n 3) extract the largest foreground connected component in the segmentation if required\n \"\"\"\n mask = self.get_component(InferenceBatch.Components.Mask)\n posteriors = self.get_component(InferenceBatch.Components.Posteriors)\n if mask is not None:\n posteriors = image_util.apply_mask_to_posteriors(posteriors=posteriors, mask=mask)\n\n # create segmentation using an argmax over the posterior probabilities\n segmentation = image_util.posteriors_to_segmentation(posteriors)\n\n # update the post-processed posteriors and save the segmentation\n self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)\n self.set_component(component=InferenceBatch.Components.Segmentation, data=segmentation)\n\n return self\n\n def get_configs(self) -> config.SegmentationModelBase:\n return self.pipeline.get_variable(InferencePipeline.Variables.ModelConfig)\n\n def get_component(self, component: InferenceBatch.Components) -> np.ndarray:\n return getattr(self, component.value) if hasattr(self, component.value) else None\n\n @inbatch_parallel(init='indices', post='_post_custom_components', target='threads')\n def set_component(self, batch_idx: int, component: InferenceBatch.Components, data: np.ndarray) \\\n -> Dict[str, Any]:\n logging.debug(\"Updated data in pipeline component: {}, for batch: {}.\".format(component.value, batch_idx))\n return {\n component.value: {'type': component.value, 'data': data}\n }\n\n def _extract_patches_for_image_channels(self) -> np.ndarray:\n \"\"\"\n Extracts deterministically, patches from each image channel\n :return: Patches for each image channel in format: Num patches x Channels x Z x Y x X\n \"\"\"\n model_config = self.get_configs()\n image_channels = self.get_component(InferenceBatch.Components.ImageChannels)\n # There may be cases where the test image is smaller than the test_crop_size. Adjust crop_size\n # to always fit into image, and adjust stride accordingly. If test_crop_size is smaller than the\n # image, crop and stride will remain unchanged.\n crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)\n stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)\n patches = []\n for channel_index, channel in enumerate(image_channels):\n # set the current image channel component to process\n self.set_component(component=InferenceBatch.Components.Images, data=channel)\n channel_patches = self.get_patches(patch_shape=crop_size,\n stride=stride,\n padding=model_config.padding_mode.value,\n data_attr=InferenceBatch.Components.Images.value)\n logging.debug(\n f\"Image channel {channel_index}: Tensor with extracted patches has size {channel_patches.shape}\")\n patches.append(channel_patches)\n # reset the images component\n self.set_component(component=InferenceBatch.Components.Images, data=[])\n\n return np.stack(patches, axis=1)\n\n def _model_fn(self, patches: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Wrapper function to handle the model forward pass\n :param patches: Image patches to be passed to the model in format Patches x Channels x Z x Y x X\n :return posteriors: Confidence maps [0,1] for each patch per class\n in format: Patches x Channels x Class x Z x Y x X\n \"\"\"\n model = self.pipeline.get_variable(InferencePipeline.Variables.Model)\n # Model forward pass returns posteriors\n with torch.no_grad():\n return model(patches)\n",
"# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom enum import Enum\nfrom typing import Any, Callable, List, Optional, Union\n\nimport numpy as np\nimport torch\nfrom torch.nn import ModuleList, Sequential\n\nfrom InnerEye.Common.type_annotations import TupleInt3\nfrom InnerEye.ML.config import PaddingMode\nfrom InnerEye.ML.dataset.scalar_sample import ScalarItem\nfrom InnerEye.ML.models.architectures.base_model import DeviceAwareModule\nfrom InnerEye.ML.models.architectures.mlp import MLP\nfrom InnerEye.ML.models.architectures.unet_3d import UNet3D\nfrom InnerEye.ML.models.layers.basic import BasicLayer\nfrom InnerEye.ML.models.layers.identity import Identity\nfrom InnerEye.ML.models.layers.pooling_layers import AveragePooling, Gated3dPoolingLayer, \\\n MaxPooling, MixPooling, ZAdaptive3dAvgLayer\nfrom InnerEye.ML.scalar_config import AggregationType\nfrom InnerEye.ML.utils.image_util import HDF5_NUM_SEGMENTATION_CLASSES, segmentation_to_one_hot\n\n\nclass ImagingFeatureType(Enum):\n Segmentation = \"Segmentation\"\n Image = \"Image\"\n ImageAndSegmentation = \"ImageAndSegmentation\"\n\n\nclass ImageAndNonImageFeaturesAggregator(torch.nn.Module):\n \"\"\"\n Aggregator module to combine imaging and non imaging features by concatenating.\n \"\"\"\n\n def forward(self, *item: torch.Tensor, **kwargs: Any) -> torch.Tensor:\n image_features, non_image_features = item[0], item[1]\n x = torch.cat([image_features.flatten(1), non_image_features], dim=1)\n return x\n\n\nclass ImageEncoder(DeviceAwareModule[ScalarItem, torch.Tensor]):\n \"\"\"\n An architecture for an image encoder that encodes the image with several UNet encoder blocks, and\n optionally appends non-imaging features to the encoder image features. This module hence creates the\n features to be used as an input for a classification or a regression module.\n \"\"\"\n\n def __init__(self,\n imaging_feature_type: ImagingFeatureType = ImagingFeatureType.Image,\n encode_channels_jointly: bool = False,\n num_image_channels: int = 1,\n num_encoder_blocks: int = 5,\n initial_feature_channels: int = 32,\n num_non_image_features: int = 0,\n padding_mode: PaddingMode = PaddingMode.NoPadding,\n kernel_size_per_encoding_block: Union[TupleInt3, List[TupleInt3]] = (1, 3, 3),\n stride_size_per_encoding_block: Union[TupleInt3, List[TupleInt3]] = (1, 2, 2),\n encoder_dimensionality_reduction_factor: float = 0.8,\n aggregation_type: AggregationType = AggregationType.Average,\n scan_size: Optional[TupleInt3] = None,\n ) -> None:\n \"\"\"\n Creates an image classifier that has UNet encoders sections for each image channel. The encoder output\n is fed through average pooling and an MLP.\n :param encode_channels_jointly: If False, create a UNet encoder structure separately for each channel. If True,\n encode all channels jointly (convolution will run over all channels).\n :param num_encoder_blocks: Number of UNet encoder blocks.\n :param initial_feature_channels: Number of feature channels in the first UNet encoder.\n :param num_image_channels: Number of channels of the input. Input is expected to be of size\n B x num_image_channels x Z x Y x X, where B is the batch dimension.\n :param num_non_image_features: Number of non imaging features will be used in the model.\n :param kernel_size_per_encoding_block: The size of the kernels per encoding block, assumed to be the same\n if a single tuple is provided. Otherwise the list of tuples must match num_encoder_blocks. Default\n performs convolutions only in X and Y.\n :param stride_size_per_encoding_block: The stride size for the encoding block, assumed to be the same\n if a single tuple is provided. Otherwise the list of tuples must match num_encoder_blocks. Default\n reduces spatial dimensions only in X and Y.\n :param encoder_dimensionality_reduction_factor: how to reduce the dimensionality of the image features in the\n combined model to balance with non imaging features.\n :param scan_size: should be a tuple representing 3D tensor shape and if specified it's usedd in initializing\n gated pooling or z-adaptive. The first element should be representing the z-direction for classification images\n \"\"\"\n super().__init__()\n self.num_non_image_features = num_non_image_features\n self.imaging_feature_type = imaging_feature_type\n if isinstance(kernel_size_per_encoding_block, list):\n if len(kernel_size_per_encoding_block) != num_encoder_blocks:\n raise ValueError(f\"expected kernel_size_per_encoding_block to be of \"\n f\"length {num_encoder_blocks} found {len(kernel_size_per_encoding_block)}\")\n self.kernel_size_per_encoding_block = kernel_size_per_encoding_block\n else:\n self.kernel_size_per_encoding_block = [kernel_size_per_encoding_block] * num_encoder_blocks\n\n if isinstance(stride_size_per_encoding_block, list):\n if len(stride_size_per_encoding_block) != num_encoder_blocks:\n raise ValueError(f\"expected stride_size_per_encoding_block to be of \"\n f\"length {num_encoder_blocks} found {len(stride_size_per_encoding_block)}\")\n self.stride_size_per_encoding_block = stride_size_per_encoding_block\n else:\n self.stride_size_per_encoding_block = [stride_size_per_encoding_block] * num_encoder_blocks\n self.conv_in_3d = np.any([k[0] != 1 for k in self.kernel_size_per_encoding_block]) \\\n or np.any([s[0] != 1 for s in self.stride_size_per_encoding_block])\n self.padding_mode = padding_mode\n self.encode_channels_jointly = encode_channels_jointly\n self.num_image_channels = num_image_channels\n self.image_and_non_image_features_aggregator = None\n fcn_channels = [initial_feature_channels * i for i in range(1, num_encoder_blocks)]\n if encode_channels_jointly:\n # Segmentations are encoded as one-hot tensors, separately for each of the input channels.\n # 10 classes for 2 image input channels would create a tensor of size [10*2, Z, Y, X]\n if self.imaging_feature_type == ImagingFeatureType.Segmentation:\n self.encoder_input_channels = num_image_channels * HDF5_NUM_SEGMENTATION_CLASSES\n elif self.imaging_feature_type == ImagingFeatureType.ImageAndSegmentation:\n self.encoder_input_channels = num_image_channels * (HDF5_NUM_SEGMENTATION_CLASSES + 1)\n elif self.imaging_feature_type == ImagingFeatureType.Image:\n self.encoder_input_channels = num_image_channels\n else:\n raise NotImplementedError(f\"Image feature type {self.imaging_feature_type} is not supported yet.\")\n _encoder: ModuleList = self.create_encoder([self.encoder_input_channels] + fcn_channels)\n final_num_feature_channels = fcn_channels[-1]\n else:\n # When working with segmentations as inputs: Feed every group of 10 per-class channels through the encoder\n # When working with normal images, each image input channel is treated separately.\n if self.imaging_feature_type == ImagingFeatureType.Segmentation:\n self.encoder_input_channels = HDF5_NUM_SEGMENTATION_CLASSES\n elif self.imaging_feature_type == ImagingFeatureType.ImageAndSegmentation:\n self.encoder_input_channels = HDF5_NUM_SEGMENTATION_CLASSES + 1\n elif self.imaging_feature_type == ImagingFeatureType.Image:\n self.encoder_input_channels = 1\n else:\n raise NotImplementedError(f\"Image feature type {self.imaging_feature_type} is not supported yet.\")\n _encoder = self.create_encoder([self.encoder_input_channels] + fcn_channels)\n final_num_feature_channels = fcn_channels[-1] * num_image_channels\n\n # Name of the last layer of the encoder to use for GradCam computation\n self.last_encoder_layer: List[str] = [\"encoder\", f\"{len([self.encoder_input_channels] + fcn_channels) - 2}\",\n \"block2\"]\n\n if num_non_image_features > 0:\n self.image_and_non_image_features_aggregator = self.create_non_image_and_image_aggregator()\n if encoder_dimensionality_reduction_factor < 1:\n # reduce the dimensionality of the image features to be the same as the non-image features\n # so that we can balance the input representation\n reduced_num_img_features = max(int(encoder_dimensionality_reduction_factor * fcn_channels[-1]), 1)\n _encoder.append(BasicLayer(\n channels=(fcn_channels[-1], reduced_num_img_features),\n kernel_size=(1, 3, 3),\n stride=(1, 2, 2),\n activation=None,\n padding=padding_mode\n ))\n self.last_encoder_layer = [\"encoder\", f\"{len([self.encoder_input_channels] + fcn_channels) - 1}\", \"bn1\"]\n if encode_channels_jointly:\n final_num_feature_channels = reduced_num_img_features\n else:\n final_num_feature_channels = (reduced_num_img_features * num_image_channels)\n final_num_feature_channels += num_non_image_features\n self.final_num_feature_channels = final_num_feature_channels\n self.encoder = Sequential(*_encoder) # type: ignore\n\n self.aggregation_layer = self._get_aggregation_layer(aggregation_type, scan_size)\n\n def get_last_encoder_layer_names(self) -> List[str]:\n return self.last_encoder_layer\n\n def _get_aggregation_layer(self, aggregation_type: AggregationType, scan_size: Optional[TupleInt3]) -> Any:\n \"\"\"\n Returns the aggregation layer as specified by the config\n :param aggregation_type: name of the aggregation\n :param scan_size: [Z, Y, X] size of the scans\n \"\"\"\n if aggregation_type == AggregationType.Average:\n return AveragePooling()\n elif aggregation_type == AggregationType.MixPooling:\n return MixPooling()\n elif aggregation_type == AggregationType.MaxPooling:\n return MaxPooling()\n else:\n assert scan_size is not None\n input_size = [1, self.encoder_input_channels, *scan_size]\n output = self.encoder(torch.ones(input_size))\n if aggregation_type == AggregationType.GatedPooling:\n return Gated3dPoolingLayer(output.shape[2] * output.shape[3] * output.shape[4])\n elif aggregation_type == AggregationType.ZAdaptive3dAvg:\n return ZAdaptive3dAvgLayer(output.shape[2])\n else:\n raise ValueError(f\"The aggregation type {aggregation_type} is not recognized\")\n\n def get_input_tensors(self, item: ScalarItem) -> List[torch.Tensor]:\n \"\"\"\n Transforms a classification item into a torch.Tensor that the forward pass can consume\n :param item: ClassificationItem\n :return: Tensor\n \"\"\"\n use_gpu = self.is_model_on_gpu()\n if self.imaging_feature_type == ImagingFeatureType.Segmentation \\\n or self.imaging_feature_type == ImagingFeatureType.ImageAndSegmentation:\n if item.segmentations is None:\n raise ValueError(\"Expected item.segmentations to not be None\")\n # Special case need for the loading of individual positions in the sequence model,\n # the images are loaded as [C, Z, X, Y] but the segmentation_to_one_hot expects [B, C, Z, X, Y]\n segmentation_multilabel = item.segmentations\n is_4dim = segmentation_multilabel.ndimension() == 4\n if is_4dim:\n segmentation_multilabel = segmentation_multilabel.unsqueeze(dim=0)\n segmentation_one_hot = segmentation_to_one_hot(segmentation_multilabel,\n use_gpu=use_gpu,\n result_dtype=torch.float32)\n if is_4dim:\n segmentation_one_hot = segmentation_one_hot.squeeze(dim=0)\n input_tensors = [segmentation_one_hot]\n\n if self.imaging_feature_type == ImagingFeatureType.ImageAndSegmentation:\n input_tensors.append(item.images)\n _dim = 0 if item.images.ndimension() == 4 else 1\n input_tensors = [torch.cat(input_tensors, dim=_dim)]\n else:\n input_tensors = [item.images]\n\n if self.image_and_non_image_features_aggregator:\n input_tensors.append(item.get_all_non_imaging_features())\n return input_tensors\n\n def forward(self, *item: torch.Tensor, **kwargs: Any) -> torch.Tensor:\n x = item[0]\n x = self.encode_and_aggregate(x)\n # combine non image features if required\n if self.image_and_non_image_features_aggregator:\n x = self.image_and_non_image_features_aggregator(x, item[1].float())\n return x\n\n def encode_and_aggregate(self, x: torch.Tensor) -> torch.Tensor:\n return encode_and_aggregate(encoder=self.encoder,\n num_encoder_input_channels=self.encoder_input_channels,\n num_image_channels=self.num_image_channels,\n encode_channels_jointly=self.encode_channels_jointly,\n aggregation_layer=self.aggregation_layer,\n input_tensor=x)\n\n def create_non_image_and_image_aggregator(self) -> ImageAndNonImageFeaturesAggregator:\n return ImageAndNonImageFeaturesAggregator()\n\n def create_encoder(self, channels: List[int]) -> ModuleList:\n \"\"\"\n Create an image encoder network.\n \"\"\"\n layers = []\n for i in range(len(channels) - 1):\n layers.append(\n UNet3D.UNetEncodeBlock(\n channels=(channels[i], channels[i + 1]),\n kernel_size=self.kernel_size_per_encoding_block[i],\n downsampling_stride=self.stride_size_per_encoding_block[i],\n padding_mode=self.padding_mode,\n use_residual=False,\n depth=i,\n )\n )\n return ModuleList(layers)\n\n\nclass ImageEncoderWithMlp(ImageEncoder):\n \"\"\"\n An architecture for an image classifier that first encodes the image with several UNet encoder blocks,\n and then feeds the resulting features through a multi layer perceptron (MLP). The architecture can handle\n multiple input channels. Each input channels is fed either through a separate UNet encoder pathway (if\n the argument encode_channels_jointly is False) or together with all other channels (if encode_channels_jointly is\n False) The latter makes the implicit assumption that the channels are spatially aligned.\n \"\"\"\n\n def __init__(self,\n mlp_dropout: float = 0.5,\n final_activation: torch.nn.Module = Identity(),\n imaging_feature_type: ImagingFeatureType = ImagingFeatureType.Image,\n encode_channels_jointly: bool = False,\n num_image_channels: int = 1,\n num_encoder_blocks: int = 5,\n initial_feature_channels: int = 32,\n num_non_image_features: int = 0,\n padding_mode: PaddingMode = PaddingMode.NoPadding,\n kernel_size_per_encoding_block: Union[TupleInt3, List[TupleInt3]] = (1, 3, 3),\n stride_size_per_encoding_block: Union[TupleInt3, List[TupleInt3]] = (1, 2, 2),\n encoder_dimensionality_reduction_factor: float = 0.8,\n aggregation_type: AggregationType = AggregationType.Average,\n scan_size: Optional[TupleInt3] = None,\n ) -> None:\n \"\"\"\n Creates an image classifier that has UNet encoders sections for each image channel. The encoder output\n is fed through average pooling and an MLP. Extension of the ImageEncoder class using an MLP as classification\n layer.\n :param encode_channels_jointly: If False, create a UNet encoder structure separately for each channel. If True,\n encode all channels jointly (convolution will run over all channels).\n :param num_encoder_blocks: Number of UNet encoder blocks.\n :param initial_feature_channels: Number of feature channels in the first UNet encoder.\n :param num_image_channels: Number of channels of the input. Input is expected to be of size\n B x num_image_channels x Z x Y x X, where B is the batch dimension.\n :param mlp_dropout: The amount of dropout that should be applied between the two layers of the classifier MLP.\n :param final_activation: Activation function to normalize the logits default is Identity.\n :param num_non_image_features: Number of non imaging features will be used in the model.\n :param kernel_size_per_encoding_block: The size of the kernels per encoding block, assumed to be the same\n if a single tuple is provided. Otherwise the list of tuples must match num_encoder_blocks. Default\n performs convolutions only in X and Y.\n :param stride_size_per_encoding_block: The stride size for the encoding block, assumed to be the same\n if a single tuple is provided. Otherwise the list of tuples must match num_encoder_blocks. Default\n reduces spatial dimensions only in X and Y.\n :param encoder_dimensionality_reduction_factor: how to reduce the dimensionality of the image features in the\n combined model to balance with non imaging features.\n :param scan_size: should be a tuple representing 3D tensor shape and if specified it's usedd in initializing\n gated pooling or z-adaptive. The first element should be representing the z-direction for classification images\n \"\"\"\n super().__init__(imaging_feature_type=imaging_feature_type,\n encode_channels_jointly=encode_channels_jointly,\n num_image_channels=num_image_channels,\n num_encoder_blocks=num_encoder_blocks,\n initial_feature_channels=initial_feature_channels,\n num_non_image_features=num_non_image_features,\n padding_mode=padding_mode,\n kernel_size_per_encoding_block=kernel_size_per_encoding_block,\n stride_size_per_encoding_block=stride_size_per_encoding_block,\n encoder_dimensionality_reduction_factor=encoder_dimensionality_reduction_factor,\n aggregation_type=aggregation_type,\n scan_size=scan_size)\n self.classification_layer = create_mlp(self.final_num_feature_channels, mlp_dropout)\n self.final_activation = final_activation\n\n def forward(self, *item: torch.Tensor, **kwargs: Any) -> torch.Tensor:\n x = super(ImageEncoderWithMlp, self).forward(*item)\n # pass all the features to the MLP\n x = self.classification_layer(x.view(-1, x.shape[1]))\n return self.final_activation(x)\n\n\ndef encode_and_aggregate(input_tensor: torch.Tensor,\n encoder: torch.nn.Module,\n num_encoder_input_channels: int,\n num_image_channels: int,\n encode_channels_jointly: bool,\n aggregation_layer: Callable) -> torch.Tensor:\n \"\"\"\n Function that encodes a given input tensor either jointly using the encoder or separately for each channel\n in a sequential manner. Features obtained at the output encoder are then aggregated with the pooling function\n defined by `aggregation layer`.\n \"\"\"\n if encode_channels_jointly:\n input_tensor = encoder(input_tensor)\n input_tensor = aggregation_layer(input_tensor)\n else:\n shape = input_tensor.shape\n channel_shape = (shape[0], num_encoder_input_channels, shape[2], shape[3], shape[4])\n encode_and_aggregate = []\n # When using multiple encoders, it is more memory efficient to aggregate the individual\n # encoder outputs and then stack those smaller results, rather than stack huge outputs and aggregate.\n for i in range(num_image_channels):\n start_index = i * num_encoder_input_channels\n end_index = start_index + num_encoder_input_channels\n encoder_output = encoder(input_tensor[:, start_index:end_index].view(channel_shape))\n aggregated = aggregation_layer(encoder_output)\n encode_and_aggregate.append(aggregated)\n input_tensor = torch.cat(encode_and_aggregate, dim=1)\n return input_tensor\n\n\ndef create_mlp(input_num_feature_channels: int,\n dropout: float,\n final_output_channels: int = 1,\n final_layer: Optional[torch.nn.Module] = None,\n hidden_layer_num_feature_channels: Optional[int] = None) -> MLP:\n \"\"\"\n Create an MLP with 1 hidden layer.\n :param input_num_feature_channels: The number of input channels to the first MLP layer.\n :param dropout: The drop out factor that should be applied between the first and second MLP layer.\n :param final_output_channels: if provided, the final number of output channels.\n :param final_layer: if provided, the final (activation) layer to apply\n :param hidden_layer_num_feature_channels: if provided, will be used to create hidden layers, If None then\n input_num_feature_channels // 2 will be used to create the hidden layer.\n :return:\n \"\"\"\n layers: List[torch.nn.Module] = []\n hidden_layer_num_feature_channels = hidden_layer_num_feature_channels \\\n if hidden_layer_num_feature_channels else input_num_feature_channels // 2\n channels: List[int] = [input_num_feature_channels, hidden_layer_num_feature_channels, final_output_channels]\n dropouts: List[float] = [dropout, 0.0]\n use_layer_normalisation: List[bool] = [True, False]\n activation: List[torch.nn.Module] = [torch.nn.Tanh(), Identity()]\n\n for i in range(len(channels) - 1):\n layers.append(\n MLP.HiddenLayer(\n channels=(channels[i], channels[i + 1]),\n dropout=dropouts[i],\n use_layer_normalisation=use_layer_normalisation[i],\n activation=activation[i]\n )\n )\n\n if final_layer:\n layers.append(final_layer)\n\n return MLP(layers) # type: ignore\n",
"# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom __future__ import annotations\n\nfrom typing import Dict, List\n\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, ExponentialLR, LambdaLR, MultiStepLR, StepLR, _LRScheduler\nfrom torch.optim.optimizer import Optimizer\n\nfrom InnerEye.ML.deep_learning_config import LRSchedulerType, LRWarmUpType, OptimizerParams\n\n\ndef get_current_learning_rates(optimizer: Optimizer) -> List[float]:\n \"\"\"\n Reads the current values of the learning rate(s) for all parameter groups from the optimizer.\n \"\"\"\n return [group['lr'] for group in optimizer.param_groups]\n\n\nclass LinearWarmUp(_LRScheduler):\n \"\"\"\n Implements linear warmup up to a given initial learning rate.\n \"\"\"\n\n def __init__(self, optimizer: Optimizer, warmup_epochs: int, final_lr: float, last_epoch: int = -1):\n if warmup_epochs < 0:\n raise ValueError(\"The number of warmup epochs must be >= 0.\")\n self.warmup_epochs = warmup_epochs\n self.final_lr = final_lr\n self.last_epoch = last_epoch\n super().__init__(optimizer, last_epoch)\n\n def warmup_multiplier(self) -> float:\n if self.warmup_epochs <= 0:\n return 1.0\n if self.last_epoch >= self.warmup_epochs:\n return 1.0\n return (self.last_epoch + 1) / (self.warmup_epochs + 1)\n\n def get_lr(self) -> List[float]: # type: ignore\n return [self.final_lr * self.warmup_multiplier()]\n\n\nclass PolynomialLR:\n def __init__(self, gamma: float, l_rate: float, min_l_rate: float, epochs_after_warmup: int) -> None:\n self.gamma = gamma\n self.l_rate = l_rate\n self.min_l_rate = min_l_rate\n self.epochs_after_warmup = epochs_after_warmup\n\n def get_lr(self, epoch: int) -> float:\n x = self.min_l_rate / self.l_rate\n return (1 - x) * ((1. - float(epoch) / self.epochs_after_warmup) ** self.gamma) + x\n\n\nclass SchedulerWithWarmUp(_LRScheduler):\n \"\"\"\n LR Scheduler which runs a warmup schedule (linear ramp-up) for a few iterations, and then switches to one\n of the normal schedulers.\n \"\"\"\n\n def __init__(self, args: OptimizerParams, optimizer: Optimizer, num_epochs: int, last_epoch: int = -1):\n self.optimizer = optimizer\n self.last_epoch = last_epoch\n self.num_epochs = num_epochs\n self.warmup_epochs = 0 if args.l_rate_warmup == LRWarmUpType.NoWarmUp else args.l_rate_warmup_epochs\n self._scheduler = self.get_scheduler(args)\n # This must be called after self.get_scheduler, because we want the optimizer to have the learning rate\n # guided by the warmup schedule\n self._warmup = LinearWarmUp(optimizer,\n warmup_epochs=self.warmup_epochs,\n final_lr=args.l_rate,\n last_epoch=last_epoch)\n self._last_lr = get_current_learning_rates(optimizer)\n self.min_l_rate = args.min_l_rate\n super().__init__(optimizer, last_epoch)\n\n def get_scheduler(self, args: OptimizerParams) -> _LRScheduler:\n \"\"\"\n Create the LR scheduler that will be used after warmup, based on the config params.\n \"\"\"\n scheduler: _LRScheduler\n epochs_after_warmup = self.num_epochs - self.warmup_epochs\n if args.l_rate_scheduler == LRSchedulerType.Exponential:\n scheduler = ExponentialLR(optimizer=self.optimizer,\n gamma=args.l_rate_exponential_gamma,\n last_epoch=self.last_epoch)\n elif args.l_rate_scheduler == LRSchedulerType.Step:\n scheduler = StepLR(optimizer=self.optimizer,\n step_size=args.l_rate_step_step_size,\n gamma=args.l_rate_step_gamma,\n last_epoch=self.last_epoch)\n elif args.l_rate_scheduler == LRSchedulerType.MultiStep:\n assert args.l_rate_multi_step_milestones is not None\n scheduler = MultiStepLR(optimizer=self.optimizer,\n milestones=args.l_rate_multi_step_milestones,\n gamma=args.l_rate_multi_step_gamma,\n last_epoch=self.last_epoch)\n elif args.l_rate_scheduler == LRSchedulerType.Polynomial:\n polynomial_lr = PolynomialLR(gamma=args.l_rate_polynomial_gamma,\n l_rate=args.l_rate,\n min_l_rate=args.min_l_rate,\n epochs_after_warmup=epochs_after_warmup)\n scheduler = LambdaLR(optimizer=self.optimizer,\n lr_lambda=polynomial_lr.get_lr,\n last_epoch=self.last_epoch)\n elif args.l_rate_scheduler == LRSchedulerType.Cosine:\n scheduler = CosineAnnealingLR(optimizer=self.optimizer,\n T_max=epochs_after_warmup,\n eta_min=args.min_l_rate,\n last_epoch=self.last_epoch)\n else:\n raise ValueError(\"Unknown learning rate scheduler {}\".format(args.l_rate_scheduler))\n return scheduler\n\n def state_dict(self) -> Dict:\n \"\"\"\n Returns a dictionary with all the values in this objects __dict__.\n It creates the dictionary entry for variables \"_scheduler\" and \"_warmup_scheduler\" separately, by calling\n state_dict for these variables.\n The state dict does not include the state of the optimizer.\n \"\"\"\n state_dict = {key: val for key, val in self.__dict__.items()\n if key != \"_scheduler\" and key != \"optimizer\" and key != \"_warmup\"}\n\n state_dict['_scheduler'] = self._scheduler.state_dict()\n state_dict['_warmup'] = self._warmup.state_dict()\n return state_dict\n\n def load_state_dict(self, state_dict: Dict) -> None:\n \"\"\"\n Initializes the current object with values from state_dict.\n Initializes variables \"_scheduler\" and \"_warmup_scheduler\" separately, by calling load_state_dict\n for these variables.\n \"\"\"\n top_level = {key: val for key, val in state_dict.items() if key != \"_scheduler\" and key != \"_warmup\"}\n self.__dict__.update(top_level)\n self._scheduler.load_state_dict(state_dict[\"_scheduler\"])\n self._warmup.load_state_dict(state_dict[\"_warmup\"])\n\n def step(self, epoch: int = None) -> None:\n # self.step() is called in the _LRScheduler.__init__, as the very last operation, when self.last_epoch == -1\n # Inside of the default implementation of self.step, it calls\n # self.last_epoch += 1\n # values = self.get_lr()\n # The values are then set in the optimizer, and stored in self._last_lr\n if epoch is not None:\n raise ValueError(\"Calling scheduler.step with an epoch argument will be deprecated.\")\n # self.step is called from within the base class constructor, _LRScheduler.__init__\n # The scheduler itself has already been initialized, and scheduler.step has also been called already in\n # the respective constructor. Avoid calling it again here.\n if self.last_epoch != -1:\n if self.last_epoch < self._warmup.warmup_epochs:\n self._warmup.step()\n else:\n self._scheduler.step()\n self.last_epoch += 1\n self._last_lr = get_current_learning_rates(self.optimizer)\n\n def get_last_lr(self) -> List[float]:\n return self._last_lr\n",
"# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nimport torch\n\nfrom typing import Union, Tuple\nfrom torch import nn\n\n# To use weights from a pretrained model, we need eps to match\n# https://github.com/google-research/big_transfer/blob/0bb237d6e34ab770b56502c90424d262e565a7f3/bit_pytorch/models.py#L30\neps = 1e-10\n\n\nclass WeightStandardizedConv2d(nn.Conv2d):\n \"\"\"\n Weight Standardization\n https://arxiv.org/pdf/1903.10520.pdf\n \"\"\"\n\n def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Tuple[int, int]],\n stride: Union[int, Tuple[int, int]] = 1,\n padding: Union[int, Tuple[int, int]] = 0,\n dilation: Union[int, Tuple[int, int]] = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = 'zeros'):\n super().__init__(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias,\n padding_mode=padding_mode)\n\n @staticmethod\n def standardize(weights: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Normalize weights on a per-kernel basis for all kernels.\n \"\"\"\n assert weights.ndim == 4 # type: ignore\n mean = torch.mean(weights, dim=(1, 2, 3), keepdim=True)\n variance = torch.var(weights, dim=(1, 2, 3), keepdim=True, unbiased=False)\n standardized_weights = (weights - mean) / torch.sqrt(variance + eps)\n return standardized_weights\n\n def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore\n standardized_weights = WeightStandardizedConv2d.standardize(self.weight)\n return self._conv_forward(input, standardized_weights, bias=None) # type: ignore\n"
] | [
[
"numpy.unique",
"numpy.stack",
"torch.tensor",
"numpy.concatenate",
"torch.no_grad",
"numpy.array"
],
[
"torch.nn.Sequential",
"torch.ones",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Tanh",
"numpy.any"
],
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.optim.lr_scheduler.LambdaLR",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.optim.lr_scheduler.StepLR"
],
[
"torch.var",
"torch.mean",
"torch.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
smartdolphin/variational-autoencoder | [
"999e00c1f630d1e3b6b433c965f87d236ba18668",
"999e00c1f630d1e3b6b433c965f87d236ba18668"
] | [
"util/metric.py",
"model/base.py"
] | [
"from collections import Counter\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\n\ndef __majority(arr):\n counter = Counter(arr)\n value, _ = counter.most_common(1)[0]\n return value\n\n\ndef clustering_accuracy(y_true, y_clustering):\n clustering_labels = list(set(y_clustering))\n new_labels = np.zeros_like(y_clustering)\n for clustering_label in clustering_labels:\n locator = y_clustering == clustering_label\n locations = np.argwhere(locator)\n real_labels = y_true[locations].ravel()\n major_label = __majority(real_labels)\n new_labels[locator] = major_label\n return accuracy_score(y_true, new_labels)\n\n\ndef confusion_matrix_majority(y_true, y_clustering):\n clustering_labels = list(set(y_clustering))\n new_labels = np.zeros_like(y_clustering)\n for clustering_label in clustering_labels:\n locator = y_clustering == clustering_label\n locations = np.argwhere(locator)\n real_labels = y_true[locations].ravel()\n major_label = __majority(real_labels)\n new_labels[locator] = major_label\n return confusion_matrix(y_true, new_labels)\n",
"from abc import abstractmethod, ABC\nfrom sklearn.metrics import log_loss, mean_squared_error\n\nclass Encoder(ABC):\n @abstractmethod\n def encode(self, x):\n pass\n\n @abstractmethod\n def decode(self, encoded_x):\n pass\n\n def recon_error(self, x, metric='cross_entropy'):\n encoded_x = self.encode(x)\n decoded_x = self.decode(encoded_x)\n if metric == 'cross_entropy':\n error = log_loss(x, decoded_x)\n elif metric == 'mean_square_error':\n error = mean_squared_error(x, decoded_x)\n else:\n raise ValueError('%s metric is not supported' % metric)\n return error"
] | [
[
"numpy.argwhere",
"numpy.zeros_like",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.accuracy_score"
],
[
"sklearn.metrics.log_loss",
"sklearn.metrics.mean_squared_error"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
patelshival/ga-dsmp | [
"c355d28daf50c51b1610930f963dcd17b770e17a"
] | [
"numpy-arrays/code.py"
] | [
"# --------------\n# Importing header files\r\nimport numpy as np\r\n\r\n# Path of the file has been stored in variable called 'path'\r\n\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\nprint(\"\\nData: \\n\\n\", data)\r\n\r\nprint(\"\\nType of data: \\n\\n\", type(data))\r\n\r\ncensus = np.concatenate((data, new_record), axis=0)\r\n\r\nprint(census)\r\n#Code starts here\r\n\n\n\n# --------------\n#Code starts here\r\nage=census[:,0]\r\n\r\nprint(age)\r\n\r\nmax_age = np.max(age)\r\nmin_age = np.min(age)\r\nage_mean = np.mean(age)\r\nage_std = np.std(age)\r\n\r\nprint(\"max of age : \", max_age)\r\nprint(\"min of age : \", min_age)\r\nprint(\"mean of age : \", age_mean)\r\nprint(\"standard deviation of age : \", age_std)\n\n\n# --------------\n#Code starts here\r\nrace_0 = census[census[:,2] == 0]\r\nrace_1 = census[census[:,2] == 1]\r\nrace_2 = census[census[:,2] == 2]\r\nrace_3 = census[census[:,2] == 3]\r\nrace_4 = census[census[:,2] == 4]\r\n\r\nlen_0 = len(race_0)\r\nlen_1 = len(race_1)\r\nlen_2 = len(race_2)\r\nlen_3 = len(race_3)\r\nlen_4 = len(race_4)\r\n\r\nminority_race = 3\r\nprint(race_0)\r\n\n\n\n# --------------\n#Code starts here\r\nsenior_citizens = census[census[:, 0] > 60]\r\nworking_hours = senior_citizens[:,6]\r\nworking_hours_sum = working_hours.sum()\r\nsenior_citizens_len = len(senior_citizens)\r\navg_working_hours = working_hours_sum / senior_citizens_len\r\nprint(avg_working_hours)\n\n\n# --------------\n#Code starts here\r\nhigh = census[census[:,1] > 10]\r\nlow = census[census[:,1] <= 10]\r\n\r\navg_pay_high = high[:, 7].mean()\r\navg_pay_low = low[:, 7].mean()\r\n\r\nif avg_pay_high > avg_pay_low:\r\n print(\"Better education leads to better pay\")\r\nelse:\r\n print(\"Better education does not lead to better pay\") \n\n\n"
] | [
[
"numpy.min",
"numpy.genfromtxt",
"numpy.concatenate",
"numpy.max",
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jloveric/high-order-layers-torch | [
"a50ccf0cf82c21fdda4c20c671e7d233a0b6f793"
] | [
"high_order_layers_torch/FunctionalConvolution.py"
] | [
"from .LagrangePolynomial import LagrangeExpand\nfrom pytorch_lightning import LightningModule, Trainer\n\nfrom high_order_layers_torch.PolynomialLayers import *\nfrom torch.nn import Conv2d\nimport torch.nn as nn\nimport torch\nfrom .utils import *\n\n\ndef conv2d_wrapper(\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n dilation: int = 1,\n groups: int = 1,\n padding_mode: str = 'zeros',\n weight_magnitude: float = 1.0,\n rescale_output: bool = False,\n verbose: bool = False,\n ** kwargs\n):\n \"\"\"\n Inputs need to be an exact clone of those in torch conv2d including\n defaults. Function allows you to pass extra arguments without braking\n conv2d.\n \"\"\"\n\n conv = Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n # Bias should always be false as the bias is already included in these methods.\n bias=False,\n padding_mode=padding_mode,\n )\n in_features = in_channels*kernel_size*kernel_size\n\n if verbose is True:\n print('in_channels', in_channels, 'out_channels', out_channels)\n print('conv.weight.shape', conv.weight.shape)\n\n # We don't want to use the standard conv initialization\n # since this is a bit different.\n if rescale_output is False:\n conv.weight.data.uniform_(-weight_magnitude/in_features,\n weight_magnitude/in_features)\n elif rescale_output is True:\n conv.weight.data.uniform_(-weight_magnitude, weight_magnitude)\n else:\n print('Using kaiming for weight initialization')\n\n return conv\n\n\nclass Expansion2d(nn.Module):\n def __init__(self, basis=None):\n \"\"\"\n Expand an input by a function defined by basis.\n\n Args :\n - basis: function to expand input by.\n \"\"\"\n super().__init__()\n if basis == None:\n raise Exception(\n 'You must define the basis function in ExpansionLayer2D')\n self.basis = basis\n\n def build(self, input_shape):\n pass\n\n def __call__(self, inputs):\n \"\"\"\n Expand input\n Args :\n inputs : Tensor of shape [batches, channels, height, width]\n Return :\n Tensor of shape [batches, channels*(basis size), height, width]\n \"\"\"\n res = self.basis(\n inputs) # outputs [basis_size, batches, channels, height, width]\n res = res.permute(1, 3, 4, 2, 0)\n res = torch.reshape(\n res, [res.shape[0], res.shape[1],\n res.shape[2], res.shape[3]*res.shape[4]]\n )\n res = res.permute(0, 3, 1, 2)\n return res\n\n\nclass Expansion1d(nn.Module):\n def __init__(self, basis=None):\n \"\"\"\n Expand an input by a function defined by basis.\n\n Args :\n - basis: function to expand input by.\n \"\"\"\n super().__init__()\n if basis == None:\n raise Exception(\n 'You must define the basis function in ExpansionLayer2D')\n self.basis = basis\n\n def build(self, input_shape):\n pass\n\n def __call__(self, inputs):\n \"\"\"\n Expand input\n Args :\n inputs : Tensor of shape [batches, channels, width]\n Return :\n Tensor of shape [batches, channels*(basis size), width]\n \"\"\"\n res = self.basis(\n inputs) # outputs [basis_size, batches, channels, width]\n res = res.permute(1, 3, 2, 0)\n res = torch.reshape(\n res, [res.shape[0], res.shape[1], res.shape[2]*res.shape[3]]\n )\n res = res.permute(0, 2, 1) # batches, basis_size*channels, width\n return res\n\n\nclass FourierConvolution2d(nn.Module):\n\n def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, *args, **kwargs):\n \"\"\"\n Fourier series convolutional layer.\n\n Args :\n - n : number of fourier series components. n=1 is a constant, n=3 contains both first sin an consine components.\n - in_channels : number of input channels\n - kernel_size : size of the kernel\n - length : Range of the polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points\n are in that range. Anything outside that range could grow.\n - rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,\n in effect taking the average. This is generally not necessary for the fourier series.\n \"\"\"\n super().__init__()\n self.poly = Expansion2d(FourierExpand(n, length))\n self._channels = n*in_channels\n self.conv = conv2d_wrapper(in_channels=self._channels,\n kernel_size=kernel_size, **kwargs)\n self._total_in = in_channels*kernel_size*kernel_size\n self._rescale = 1.0\n if rescale_output is True:\n self._rescale = 1.0/self._total_in\n\n def forward(self, x):\n x = self.poly(x)\n out = self.conv(x)\n return out*self._rescale\n\n\nclass PolynomialConvolution2d(nn.Module):\n def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, periodicity: float = None, *args, **kwargs):\n \"\"\"\n Polynomial convolutional layer.\n\n Args :\n - n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.\n - in_channels : number of input channels\n - kernel_size : size of the kernel\n - length : Range of the polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points\n are in that range. Anything outside that range could grow.\n - rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,\n in effect taking the average.\n \"\"\"\n super().__init__()\n self.poly = Expansion2d(LagrangeExpand(n, length=length))\n self._channels = n*in_channels\n self.periodicity = periodicity\n self.conv = conv2d_wrapper(in_channels=self._channels,\n kernel_size=kernel_size, **kwargs)\n self._total_in = in_channels*kernel_size*kernel_size\n self._rescale = 1.0\n if rescale_output is True:\n self._rescale = 1.0/self._total_in\n\n def forward(self, x):\n periodicity = self.periodicity\n if periodicity is not None:\n x = make_periodic(x, periodicity)\n x = self.poly(x)\n out = self.conv(x)\n return out*self._rescale\n\n\nclass PiecewisePolynomialConvolution2d(nn.Module):\n def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):\n \"\"\"\n Piecewise continuous polynomial convolutional layer. The boundary between each polynomial are continuous. \n\n Args :\n - n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.\n - segments: The number of segments in the piecewise polynomial.\n - in_channels : number of input channels\n - kernel_size : size of the kernel\n - length : Range of the piecewise polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points\n are in that range.\n - rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,\n in effect taking the average.\n \"\"\"\n super().__init__()\n self.poly = Expansion2d(\n PiecewisePolynomialExpand(n=n, segments=segments, length=length))\n self._channels = ((n-1)*segments+1)*in_channels\n self.periodicity = periodicity\n self.conv = conv2d_wrapper(in_channels=self._channels,\n kernel_size=kernel_size, **kwargs)\n self._total_in = in_channels*kernel_size*kernel_size\n self._rescale = 1.0\n if rescale_output is True:\n self._rescale = 1.0/self._total_in\n\n def forward(self, x):\n periodicity = self.periodicity\n if periodicity is not None:\n x = make_periodic(x, periodicity)\n x = self.poly(x)\n out = self.conv(x)\n return out*self._rescale\n\n\nclass PiecewiseDiscontinuousPolynomialConvolution2d(nn.Module):\n def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):\n \"\"\"\n Discontinuous piecewise polynomial convolutional layer. The boundary between each polynomial can be discontinuous. \n Args :\n - n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.\n - segments: The number of segments in the piecewise polynomial.\n - in_channels : number of input channels\n - kernel_size : size of the kernel\n - length : Range of the piecewise polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points\n are in that range.\n - rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,\n in effect taking the average.\n \"\"\"\n super().__init__()\n self.poly = Expansion2d(\n PiecewiseDiscontinuousPolynomialExpand(n=n, segments=segments, length=length))\n self._channels = n*segments*in_channels\n self.periodicity = periodicity\n self.conv = conv2d_wrapper(in_channels=self._channels,\n kernel_size=kernel_size, **kwargs)\n self._total_in = in_channels*kernel_size*kernel_size\n self._rescale = 1.0\n if rescale_output is True:\n self._rescale = 1.0/self._total_in\n\n def forward(self, x):\n periodicity = self.periodicity\n if periodicity is not None:\n x = make_periodic(x, periodicity)\n x = self.poly(x)\n out = self.conv(x)\n return out*self._rescale\n"
] | [
[
"torch.reshape",
"torch.nn.Conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KnockerPulsar/pytorch-image-models | [
"893f5dde27ae6b17389f738bd6e37160e2868c72"
] | [
"timm/models/byobnet.py"
] | [
"\"\"\" Bring-Your-Own-Blocks Network\r\n\r\nA flexible network w/ dataclass based config for stacking those NN blocks.\r\n\r\nThis model is currently used to implement the following networks:\r\n\r\nGPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)).\r\nPaper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090\r\nCode and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0\r\n\r\nRepVGG - repvgg_*\r\nPaper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\nCode and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT\r\n\r\nIn all cases the models have been modified to fit within the design of ByobNet. I've remapped\r\nthe original weights and verified accuracies.\r\n\r\nFor GPU Efficient nets, I used the original names for the blocks since they were for the most part\r\nthe same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some\r\nchanges introduced in RegNet were also present in the stem and bottleneck blocks for this model.\r\n\r\nA significant number of different network archs can be implemented here, including variants of the\r\nabove nets that include attention.\r\n\r\nHacked together by / copyright Ross Wightman, 2021.\r\n\"\"\"\r\nimport math\r\nfrom dataclasses import dataclass, field, replace\r\nfrom typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence\r\nfrom functools import partial\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\r\nfrom .helpers import build_model_with_cfg, named_apply\r\nfrom .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \\\r\n create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple\r\nfrom .registry import register_model\r\n\r\n__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']\r\n\r\n\r\ndef _cfg(url='', **kwargs):\r\n return {\r\n 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\r\n 'crop_pct': 0.875, 'interpolation': 'bilinear',\r\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\r\n 'first_conv': 'stem.conv', 'classifier': 'head.fc',\r\n **kwargs\r\n }\r\n\r\n\r\ndefault_cfgs = {\r\n # GPU-Efficient (ResNet) weights\r\n 'gernet_s': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'),\r\n 'gernet_m': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'),\r\n 'gernet_l': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth',\r\n input_size=(3, 256, 256), pool_size=(8, 8)),\r\n\r\n # RepVGG weights\r\n 'repvgg_a2': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n 'repvgg_b0': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n 'repvgg_b1': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n 'repvgg_b1g4': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n 'repvgg_b2': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n 'repvgg_b2g4': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n 'repvgg_b3': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n 'repvgg_b3g4': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth',\r\n first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),\r\n\r\n # experimental configs\r\n 'resnet51q': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',\r\n first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),\r\n test_input_size=(3, 288, 288), crop_pct=1.0),\r\n 'resnet61q': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8),\r\n test_input_size=(3, 288, 288), crop_pct=1.0, interpolation='bicubic'),\r\n\r\n 'resnext26ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'gcresnext26ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'seresnext26ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'eca_resnext26ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'bat_resnext26ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic',\r\n min_input_size=(3, 256, 256)),\r\n\r\n 'resnet32ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'resnet33ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'gcresnet33ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'seresnet33ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n 'eca_resnet33ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n\r\n 'gcresnet50t': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n\r\n 'gcresnext50ts': _cfg(\r\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',\r\n first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),\r\n}\r\n\r\n\r\n@dataclass\r\nclass ByoBlockCfg:\r\n type: Union[str, nn.Module]\r\n d: int # block depth (number of block repeats in stage)\r\n c: int # number of output channels for each block in stage\r\n s: int = 2 # stride of stage (first block)\r\n gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1\r\n br: float = 1. # bottleneck-ratio of blocks in stage\r\n\r\n # NOTE: these config items override the model cfgs that are applied to all blocks by default\r\n attn_layer: Optional[str] = None\r\n attn_kwargs: Optional[Dict[str, Any]] = None\r\n self_attn_layer: Optional[str] = None\r\n self_attn_kwargs: Optional[Dict[str, Any]] = None\r\n block_kwargs: Optional[Dict[str, Any]] = None\r\n\r\n\r\n@dataclass\r\nclass ByoModelCfg:\r\n blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]\r\n downsample: str = 'conv1x1'\r\n stem_type: str = '3x3'\r\n stem_pool: Optional[str] = 'maxpool'\r\n stem_chs: int = 32\r\n width_factor: float = 1.0\r\n num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0\r\n zero_init_last: bool = True # zero init last weight (usually bn) in residual path\r\n fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation\r\n\r\n act_layer: str = 'relu'\r\n norm_layer: str = 'batchnorm'\r\n\r\n # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there\r\n attn_layer: Optional[str] = None\r\n attn_kwargs: dict = field(default_factory=lambda: dict())\r\n self_attn_layer: Optional[str] = None\r\n self_attn_kwargs: dict = field(default_factory=lambda: dict())\r\n block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())\r\n\r\n\r\ndef _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):\r\n c = (64, 128, 256, 512)\r\n group_size = 0\r\n if groups > 0:\r\n group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0\r\n bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])\r\n return bcfg\r\n\r\n\r\ndef interleave_blocks(\r\n types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs\r\n) -> Tuple[ByoBlockCfg]:\r\n \"\"\" interleave 2 block types in stack\r\n \"\"\"\r\n assert len(types) == 2\r\n if isinstance(every, int):\r\n every = list(range(0 if first else every, d, every + 1))\r\n if not every:\r\n every = [d - 1]\r\n set(every)\r\n blocks = []\r\n for i in range(d):\r\n block_type = types[1] if i in every else types[0]\r\n blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]\r\n return tuple(blocks)\r\n\r\n\r\nmodel_cfgs = dict(\r\n gernet_l=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),\r\n ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),\r\n ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),\r\n ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),\r\n ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),\r\n ),\r\n stem_chs=32,\r\n stem_pool=None,\r\n num_features=2560,\r\n ),\r\n gernet_m=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),\r\n ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),\r\n ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),\r\n ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),\r\n ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),\r\n ),\r\n stem_chs=32,\r\n stem_pool=None,\r\n num_features=2560,\r\n ),\r\n gernet_s=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),\r\n ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),\r\n ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),\r\n ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),\r\n ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),\r\n ),\r\n stem_chs=13,\r\n stem_pool=None,\r\n num_features=1920,\r\n ),\r\n\r\n repvgg_a2=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n repvgg_b0=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n repvgg_b1=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n repvgg_b1g4=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n repvgg_b2=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n repvgg_b2g4=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n repvgg_b3=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n repvgg_b3g4=ByoModelCfg(\r\n blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),\r\n stem_type='rep',\r\n stem_chs=64,\r\n ),\r\n\r\n # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks\r\n # DW convs in last block, 2048 pre-FC, silu act \r\n resnet51q=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),\r\n ),\r\n stem_chs=128,\r\n stem_type='quad2',\r\n stem_pool=None,\r\n num_features=2048,\r\n act_layer='silu',\r\n ),\r\n\r\n # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks\r\n # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act \r\n resnet61q=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),\r\n ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),\r\n ),\r\n stem_chs=128,\r\n stem_type='quad',\r\n stem_pool=None,\r\n num_features=2048,\r\n act_layer='silu',\r\n block_kwargs=dict(extra_conv=True),\r\n ),\r\n\r\n # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act,\r\n # and a tiered stem w/ maxpool\r\n resnext26ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='maxpool',\r\n act_layer='silu',\r\n ),\r\n gcresnext26ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='maxpool',\r\n act_layer='silu',\r\n attn_layer='gca',\r\n ),\r\n seresnext26ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='maxpool',\r\n act_layer='silu',\r\n attn_layer='se',\r\n ),\r\n eca_resnext26ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='maxpool',\r\n act_layer='silu',\r\n attn_layer='eca',\r\n ),\r\n bat_resnext26ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='maxpool',\r\n act_layer='silu',\r\n attn_layer='bat',\r\n attn_kwargs=dict(block_size=8)\r\n ),\r\n\r\n # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool\r\n resnet32ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='',\r\n num_features=0,\r\n act_layer='silu',\r\n ),\r\n\r\n # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool\r\n resnet33ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='',\r\n num_features=1280,\r\n act_layer='silu',\r\n ),\r\n\r\n # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat \r\n # and a tiered stem w/ no maxpool\r\n gcresnet33ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='',\r\n num_features=1280,\r\n act_layer='silu',\r\n attn_layer='gca',\r\n ),\r\n seresnet33ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='',\r\n num_features=1280,\r\n act_layer='silu',\r\n attn_layer='se',\r\n ),\r\n eca_resnet33ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),\r\n ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='',\r\n num_features=1280,\r\n act_layer='silu',\r\n attn_layer='eca',\r\n ),\r\n\r\n gcresnet50t=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),\r\n ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),\r\n ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='',\r\n attn_layer='gca',\r\n ),\r\n\r\n gcresnext50ts=ByoModelCfg(\r\n blocks=(\r\n ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),\r\n ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),\r\n ),\r\n stem_chs=64,\r\n stem_type='tiered',\r\n stem_pool='maxpool',\r\n # stem_pool=None,\r\n act_layer='silu',\r\n attn_layer='gca',\r\n ),\r\n)\r\n\r\n\r\n@register_model\r\ndef gernet_l(pretrained=False, **kwargs):\r\n \"\"\" GEResNet-Large (GENet-Large from official impl)\r\n `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090\r\n \"\"\"\r\n return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef gernet_m(pretrained=False, **kwargs):\r\n \"\"\" GEResNet-Medium (GENet-Normal from official impl)\r\n `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090\r\n \"\"\"\r\n return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef gernet_s(pretrained=False, **kwargs):\r\n \"\"\" EResNet-Small (GENet-Small from official impl)\r\n `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090\r\n \"\"\"\r\n return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_a2(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-A2\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_b0(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-B0\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_b1(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-B1\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_b1g4(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-B1g4\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_b2(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-B2\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_b2g4(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-B2g4\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_b3(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-B3\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef repvgg_b3g4(pretrained=False, **kwargs):\r\n \"\"\" RepVGG-B3g4\r\n `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697\r\n \"\"\"\r\n return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef resnet51q(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef resnet61q(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef resnext26ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef gcresnext26ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef seresnext26ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef eca_resnext26ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef bat_resnext26ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef resnet32ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef resnet33ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef gcresnet33ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef seresnet33ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef eca_resnet33ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef gcresnet50t(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)\r\n\r\n\r\n@register_model\r\ndef gcresnext50ts(pretrained=False, **kwargs):\r\n \"\"\"\r\n \"\"\"\r\n return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)\r\n\r\n\r\ndef expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:\r\n if not isinstance(stage_blocks_cfg, Sequence):\r\n stage_blocks_cfg = (stage_blocks_cfg,)\r\n block_cfgs = []\r\n for i, cfg in enumerate(stage_blocks_cfg):\r\n block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]\r\n return block_cfgs\r\n\r\n\r\ndef num_groups(group_size, channels):\r\n if not group_size: # 0 or None\r\n return 1 # normal conv with 1 group\r\n else:\r\n # NOTE group_size == 1 -> depthwise conv\r\n assert channels % group_size == 0\r\n return channels // group_size\r\n\r\n\r\n@dataclass\r\nclass LayerFn:\r\n conv_norm_act: Callable = ConvBnAct\r\n norm_act: Callable = BatchNormAct2d\r\n act: Callable = nn.ReLU\r\n attn: Optional[Callable] = None\r\n self_attn: Optional[Callable] = None\r\n\r\n\r\nclass DownsampleAvg(nn.Module):\r\n def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None):\r\n \"\"\" AvgPool Downsampling as in 'D' ResNet variants.\"\"\"\r\n super(DownsampleAvg, self).__init__()\r\n layers = layers or LayerFn()\r\n avg_stride = stride if dilation == 1 else 1\r\n if stride > 1 or dilation > 1:\r\n avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d\r\n self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)\r\n else:\r\n self.pool = nn.Identity()\r\n self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)\r\n\r\n def forward(self, x):\r\n return self.conv(self.pool(x))\r\n\r\n\r\ndef create_downsample(downsample_type, layers: LayerFn, **kwargs):\r\n if downsample_type == 'avg':\r\n return DownsampleAvg(**kwargs)\r\n else:\r\n return layers.conv_norm_act(kwargs.pop('in_chs'), kwargs.pop('out_chs'), kernel_size=1, **kwargs)\r\n\r\n\r\nclass BasicBlock(nn.Module):\r\n \"\"\" ResNet Basic Block - kxk + kxk\r\n \"\"\"\r\n\r\n def __init__(\r\n self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0,\r\n downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,\r\n drop_path_rate=0.):\r\n super(BasicBlock, self).__init__()\r\n layers = layers or LayerFn()\r\n mid_chs = make_divisible(out_chs * bottle_ratio)\r\n groups = num_groups(group_size, mid_chs)\r\n\r\n if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:\r\n self.shortcut = create_downsample(\r\n downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],\r\n apply_act=False, layers=layers)\r\n else:\r\n self.shortcut = nn.Identity()\r\n\r\n self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])\r\n self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)\r\n self.conv2_kxk = layers.conv_norm_act(\r\n mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False)\r\n self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)\r\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()\r\n self.act = nn.Identity() if linear_out else layers.act(inplace=True)\r\n\r\n def init_weights(self, zero_init_last: bool = False):\r\n if zero_init_last:\r\n nn.init.zeros_(self.conv2_kxk.bn.weight)\r\n for attn in (self.attn, self.attn_last):\r\n if hasattr(attn, 'reset_parameters'):\r\n attn.reset_parameters()\r\n\r\n def forward(self, x):\r\n shortcut = self.shortcut(x)\r\n\r\n # residual path\r\n x = self.conv1_kxk(x)\r\n x = self.conv2_kxk(x)\r\n x = self.attn(x)\r\n x = self.drop_path(x)\r\n\r\n x = self.act(x + shortcut)\r\n return x\r\n\r\n\r\nclass BottleneckBlock(nn.Module):\r\n \"\"\" ResNet-like Bottleneck Block - 1x1 - kxk - 1x1\r\n \"\"\"\r\n\r\n def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,\r\n downsample='avg', attn_last=False, linear_out=False, extra_conv=False, layers: LayerFn = None,\r\n drop_block=None, drop_path_rate=0.):\r\n super(BottleneckBlock, self).__init__()\r\n layers = layers or LayerFn()\r\n mid_chs = make_divisible(out_chs * bottle_ratio)\r\n groups = num_groups(group_size, mid_chs)\r\n\r\n if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:\r\n self.shortcut = create_downsample(\r\n downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],\r\n apply_act=False, layers=layers)\r\n else:\r\n self.shortcut = nn.Identity()\r\n\r\n self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)\r\n self.conv2_kxk = layers.conv_norm_act(\r\n mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],\r\n groups=groups, drop_block=drop_block)\r\n self.conv2_kxk = layers.conv_norm_act(\r\n mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],\r\n groups=groups, drop_block=drop_block)\r\n if extra_conv:\r\n self.conv2b_kxk = layers.conv_norm_act(\r\n mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block)\r\n else:\r\n self.conv2b_kxk = nn.Identity()\r\n self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)\r\n self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)\r\n self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)\r\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()\r\n self.act = nn.Identity() if linear_out else layers.act(inplace=True)\r\n\r\n def init_weights(self, zero_init_last: bool = False):\r\n if zero_init_last:\r\n nn.init.zeros_(self.conv3_1x1.bn.weight)\r\n for attn in (self.attn, self.attn_last):\r\n if hasattr(attn, 'reset_parameters'):\r\n attn.reset_parameters()\r\n\r\n def forward(self, x):\r\n shortcut = self.shortcut(x)\r\n\r\n x = self.conv1_1x1(x)\r\n x = self.conv2_kxk(x)\r\n x = self.conv2b_kxk(x)\r\n x = self.attn(x)\r\n x = self.conv3_1x1(x)\r\n x = self.attn_last(x)\r\n x = self.drop_path(x)\r\n\r\n x = self.act(x + shortcut)\r\n return x\r\n\r\n\r\nclass DarkBlock(nn.Module):\r\n \"\"\" DarkNet-like (1x1 + 3x3 w/ stride) block\r\n\r\n The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models.\r\n This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet\r\n uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats).\r\n\r\n If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1)\r\n for more optimal compute.\r\n \"\"\"\r\n\r\n def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,\r\n downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,\r\n drop_path_rate=0.):\r\n super(DarkBlock, self).__init__()\r\n layers = layers or LayerFn()\r\n mid_chs = make_divisible(out_chs * bottle_ratio)\r\n groups = num_groups(group_size, mid_chs)\r\n\r\n if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:\r\n self.shortcut = create_downsample(\r\n downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],\r\n apply_act=False, layers=layers)\r\n else:\r\n self.shortcut = nn.Identity()\r\n\r\n self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)\r\n self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)\r\n self.conv2_kxk = layers.conv_norm_act(\r\n mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],\r\n groups=groups, drop_block=drop_block, apply_act=False)\r\n self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)\r\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()\r\n self.act = nn.Identity() if linear_out else layers.act(inplace=True)\r\n\r\n def init_weights(self, zero_init_last: bool = False):\r\n if zero_init_last:\r\n nn.init.zeros_(self.conv2_kxk.bn.weight)\r\n for attn in (self.attn, self.attn_last):\r\n if hasattr(attn, 'reset_parameters'):\r\n attn.reset_parameters()\r\n\r\n def forward(self, x):\r\n shortcut = self.shortcut(x)\r\n\r\n x = self.conv1_1x1(x)\r\n x = self.attn(x)\r\n x = self.conv2_kxk(x)\r\n x = self.attn_last(x)\r\n x = self.drop_path(x)\r\n x = self.act(x + shortcut)\r\n return x\r\n\r\n\r\nclass EdgeBlock(nn.Module):\r\n \"\"\" EdgeResidual-like (3x3 + 1x1) block\r\n\r\n A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed.\r\n Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is\r\n intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs.\r\n\r\n FIXME is there a more common 3x3 + 1x1 conv block to name this after?\r\n \"\"\"\r\n\r\n def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,\r\n downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None,\r\n drop_block=None, drop_path_rate=0.):\r\n super(EdgeBlock, self).__init__()\r\n layers = layers or LayerFn()\r\n mid_chs = make_divisible(out_chs * bottle_ratio)\r\n groups = num_groups(group_size, mid_chs)\r\n\r\n if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:\r\n self.shortcut = create_downsample(\r\n downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],\r\n apply_act=False, layers=layers)\r\n else:\r\n self.shortcut = nn.Identity()\r\n\r\n self.conv1_kxk = layers.conv_norm_act(\r\n in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],\r\n groups=groups, drop_block=drop_block)\r\n self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)\r\n self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)\r\n self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)\r\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()\r\n self.act = nn.Identity() if linear_out else layers.act(inplace=True)\r\n\r\n def init_weights(self, zero_init_last: bool = False):\r\n if zero_init_last:\r\n nn.init.zeros_(self.conv2_1x1.bn.weight)\r\n for attn in (self.attn, self.attn_last):\r\n if hasattr(attn, 'reset_parameters'):\r\n attn.reset_parameters()\r\n\r\n def forward(self, x):\r\n shortcut = self.shortcut(x)\r\n\r\n x = self.conv1_kxk(x)\r\n x = self.attn(x)\r\n x = self.conv2_1x1(x)\r\n x = self.attn_last(x)\r\n x = self.drop_path(x)\r\n x = self.act(x + shortcut)\r\n return x\r\n\r\n\r\nclass RepVggBlock(nn.Module):\r\n \"\"\" RepVGG Block.\r\n\r\n Adapted from impl at https://github.com/DingXiaoH/RepVGG\r\n\r\n This version does not currently support the deploy optimization. It is currently fixed in 'train' mode.\r\n \"\"\"\r\n\r\n def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,\r\n downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.):\r\n super(RepVggBlock, self).__init__()\r\n layers = layers or LayerFn()\r\n groups = num_groups(group_size, in_chs)\r\n\r\n use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]\r\n self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None\r\n self.conv_kxk = layers.conv_norm_act(\r\n in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],\r\n groups=groups, drop_block=drop_block, apply_act=False)\r\n self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)\r\n self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)\r\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()\r\n self.act = layers.act(inplace=True)\r\n\r\n def init_weights(self, zero_init_last: bool = False):\r\n # NOTE this init overrides that base model init with specific changes for the block type\r\n for m in self.modules():\r\n if isinstance(m, nn.BatchNorm2d):\r\n nn.init.normal_(m.weight, .1, .1)\r\n nn.init.normal_(m.bias, 0, .1)\r\n if hasattr(self.attn, 'reset_parameters'):\r\n self.attn.reset_parameters()\r\n\r\n def forward(self, x):\r\n if self.identity is None:\r\n x = self.conv_1x1(x) + self.conv_kxk(x)\r\n else:\r\n identity = self.identity(x)\r\n x = self.conv_1x1(x) + self.conv_kxk(x)\r\n x = self.drop_path(x) # not in the paper / official impl, experimental\r\n x = x + identity\r\n x = self.attn(x) # no attn in the paper / official impl, experimental\r\n x = self.act(x)\r\n return x\r\n\r\n\r\nclass SelfAttnBlock(nn.Module):\r\n \"\"\" ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1\r\n \"\"\"\r\n\r\n def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,\r\n downsample='avg', extra_conv=False, linear_out=False, post_attn_na=True, feat_size=None,\r\n layers: LayerFn = None, drop_block=None, drop_path_rate=0.):\r\n super(SelfAttnBlock, self).__init__()\r\n assert layers is not None\r\n mid_chs = make_divisible(out_chs * bottle_ratio)\r\n groups = num_groups(group_size, mid_chs)\r\n\r\n if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:\r\n self.shortcut = create_downsample(\r\n downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],\r\n apply_act=False, layers=layers)\r\n else:\r\n self.shortcut = nn.Identity()\r\n\r\n self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)\r\n if extra_conv:\r\n self.conv2_kxk = layers.conv_norm_act(\r\n mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],\r\n groups=groups, drop_block=drop_block)\r\n stride = 1 # striding done via conv if enabled\r\n else:\r\n self.conv2_kxk = nn.Identity()\r\n opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)\r\n # FIXME need to dilate self attn to have dilated network support, moop moop\r\n self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)\r\n self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()\r\n self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)\r\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()\r\n self.act = nn.Identity() if linear_out else layers.act(inplace=True)\r\n\r\n def init_weights(self, zero_init_last: bool = False):\r\n if zero_init_last:\r\n nn.init.zeros_(self.conv3_1x1.bn.weight)\r\n if hasattr(self.self_attn, 'reset_parameters'):\r\n self.self_attn.reset_parameters()\r\n\r\n def forward(self, x):\r\n shortcut = self.shortcut(x)\r\n\r\n x = self.conv1_1x1(x)\r\n x = self.conv2_kxk(x)\r\n x = self.self_attn(x)\r\n x = self.post_attn(x)\r\n x = self.conv3_1x1(x)\r\n x = self.drop_path(x)\r\n\r\n x = self.act(x + shortcut)\r\n return x\r\n\r\n\r\n_block_registry = dict(\r\n basic=BasicBlock,\r\n bottle=BottleneckBlock,\r\n dark=DarkBlock,\r\n edge=EdgeBlock,\r\n rep=RepVggBlock,\r\n self_attn=SelfAttnBlock,\r\n)\r\n\r\n\r\ndef register_block(block_type:str, block_fn: nn.Module):\r\n _block_registry[block_type] = block_fn\r\n\r\n\r\ndef create_block(block: Union[str, nn.Module], **kwargs):\r\n if isinstance(block, (nn.Module, partial)):\r\n return block(**kwargs)\r\n assert block in _block_registry, f'Unknown block type ({block}'\r\n return _block_registry[block](**kwargs)\r\n\r\n\r\nclass Stem(nn.Sequential):\r\n\r\n def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool',\r\n num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None):\r\n super().__init__()\r\n assert stride in (2, 4)\r\n layers = layers or LayerFn()\r\n\r\n if isinstance(out_chs, (list, tuple)):\r\n num_rep = len(out_chs)\r\n stem_chs = out_chs\r\n else:\r\n stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]\r\n\r\n self.stride = stride\r\n self.feature_info = [] # track intermediate features\r\n prev_feat = ''\r\n stem_strides = [2] + [1] * (num_rep - 1)\r\n if stride == 4 and not pool:\r\n # set last conv in stack to be strided if stride == 4 and no pooling layer\r\n stem_strides[-1] = 2\r\n\r\n num_act = num_rep if num_act is None else num_act\r\n # if num_act < num_rep, first convs in stack won't have bn + act\r\n stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act\r\n prev_chs = in_chs\r\n curr_stride = 1\r\n for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):\r\n layer_fn = layers.conv_norm_act if na else create_conv2d\r\n conv_name = f'conv{i + 1}'\r\n if i > 0 and s > 1:\r\n self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))\r\n self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))\r\n prev_chs = ch\r\n curr_stride *= s\r\n prev_feat = conv_name\r\n\r\n if pool and 'max' in pool.lower():\r\n self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))\r\n self.add_module('pool', nn.MaxPool2d(3, 2, 1))\r\n curr_stride *= 2\r\n prev_feat = 'pool'\r\n\r\n self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))\r\n assert curr_stride == stride\r\n\r\n\r\ndef create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None):\r\n layers = layers or LayerFn()\r\n assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3')\r\n if 'quad' in stem_type:\r\n # based on NFNet stem, stack of 4 3x3 convs\r\n num_act = 2 if 'quad2' in stem_type else None\r\n stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)\r\n elif 'tiered' in stem_type:\r\n # 3x3 stack of 3 convs as in my ResNet-T\r\n stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)\r\n elif 'deep' in stem_type:\r\n # 3x3 stack of 3 convs as in ResNet-D\r\n stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)\r\n elif 'rep' in stem_type:\r\n stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)\r\n elif '7x7' in stem_type:\r\n # 7x7 stem conv as in ResNet\r\n if pool_type:\r\n stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)\r\n else:\r\n stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)\r\n else:\r\n # 3x3 stem conv as in RegNet is the default\r\n if pool_type:\r\n stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)\r\n else:\r\n stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)\r\n\r\n if isinstance(stem, Stem):\r\n feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]\r\n else:\r\n feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)]\r\n return stem, feature_info\r\n\r\n\r\ndef reduce_feat_size(feat_size, stride=2):\r\n return None if feat_size is None else tuple([s // stride for s in feat_size])\r\n\r\n\r\ndef override_kwargs(block_kwargs, model_kwargs):\r\n \"\"\" Override model level attn/self-attn/block kwargs w/ block level\r\n\r\n NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs\r\n for the block if set to anything that isn't None.\r\n\r\n i.e. an empty block_kwargs dict will remove kwargs set at model level for that block\r\n \"\"\"\r\n out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs\r\n return out_kwargs or {} # make sure None isn't returned\r\n\r\n\r\ndef update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):\r\n layer_fns = block_kwargs['layers']\r\n\r\n # override attn layer / args with block local config\r\n attn_set = block_cfg.attn_layer is not None\r\n if attn_set or block_cfg.attn_kwargs is not None:\r\n # override attn layer config\r\n if attn_set and not block_cfg.attn_layer:\r\n # empty string for attn_layer type will disable attn for this block\r\n attn_layer = None\r\n else:\r\n attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)\r\n attn_layer = block_cfg.attn_layer or model_cfg.attn_layer\r\n attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None\r\n layer_fns = replace(layer_fns, attn=attn_layer)\r\n\r\n # override self-attn layer / args with block local cfg\r\n self_attn_set = block_cfg.self_attn_layer is not None\r\n if self_attn_set or block_cfg.self_attn_kwargs is not None:\r\n # override attn layer config\r\n if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == ''\r\n # empty string for self_attn_layer type will disable attn for this block\r\n self_attn_layer = None\r\n else:\r\n self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)\r\n self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer\r\n self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \\\r\n if self_attn_layer is not None else None\r\n layer_fns = replace(layer_fns, self_attn=self_attn_layer)\r\n\r\n block_kwargs['layers'] = layer_fns\r\n\r\n # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set\r\n block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))\r\n\r\n\r\ndef create_byob_stages(\r\n cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any],\r\n feat_size: Optional[int] = None,\r\n layers: Optional[LayerFn] = None,\r\n block_kwargs_fn: Optional[Callable] = update_block_kwargs):\r\n\r\n layers = layers or LayerFn()\r\n feature_info = []\r\n block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]\r\n depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]\r\n dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]\r\n dilation = 1\r\n net_stride = stem_feat['reduction']\r\n prev_chs = stem_feat['num_chs']\r\n prev_feat = stem_feat\r\n stages = []\r\n for stage_idx, stage_block_cfgs in enumerate(block_cfgs):\r\n stride = stage_block_cfgs[0].s\r\n if stride != 1 and prev_feat:\r\n feature_info.append(prev_feat)\r\n if net_stride >= output_stride and stride > 1:\r\n dilation *= stride\r\n stride = 1\r\n net_stride *= stride\r\n first_dilation = 1 if dilation in (1, 2) else 2\r\n\r\n blocks = []\r\n for block_idx, block_cfg in enumerate(stage_block_cfgs):\r\n out_chs = make_divisible(block_cfg.c * cfg.width_factor)\r\n group_size = block_cfg.gs\r\n if isinstance(group_size, Callable):\r\n group_size = group_size(out_chs, block_idx)\r\n block_kwargs = dict( # Blocks used in this model must accept these arguments\r\n in_chs=prev_chs,\r\n out_chs=out_chs,\r\n stride=stride if block_idx == 0 else 1,\r\n dilation=(first_dilation, dilation),\r\n group_size=group_size,\r\n bottle_ratio=block_cfg.br,\r\n downsample=cfg.downsample,\r\n drop_path_rate=dpr[stage_idx][block_idx],\r\n layers=layers,\r\n )\r\n if block_cfg.type in ('self_attn',):\r\n # add feat_size arg for blocks that support/need it\r\n block_kwargs['feat_size'] = feat_size\r\n block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)\r\n blocks += [create_block(block_cfg.type, **block_kwargs)]\r\n first_dilation = dilation\r\n prev_chs = out_chs\r\n if stride > 1 and block_idx == 0:\r\n feat_size = reduce_feat_size(feat_size, stride)\r\n\r\n stages += [nn.Sequential(*blocks)]\r\n prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')\r\n\r\n feature_info.append(prev_feat)\r\n return nn.Sequential(*stages), feature_info\r\n\r\n\r\ndef get_layer_fns(cfg: ByoModelCfg):\r\n act = get_act_layer(cfg.act_layer)\r\n norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act)\r\n conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act)\r\n attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None\r\n self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None\r\n layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)\r\n return layer_fn\r\n\r\n\r\nclass ByobNet(nn.Module):\r\n \"\"\" 'Bring-your-own-blocks' Net\r\n\r\n A flexible network backbone that allows building model stem + blocks via\r\n dataclass cfg definition w/ factory functions for module instantiation.\r\n\r\n Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act).\r\n \"\"\"\r\n def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32,\r\n zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.):\r\n super().__init__()\r\n self.num_classes = num_classes\r\n self.drop_rate = drop_rate\r\n layers = get_layer_fns(cfg)\r\n if cfg.fixed_input_size:\r\n assert img_size is not None, 'img_size argument is required for fixed input size model'\r\n feat_size = to_2tuple(img_size) if img_size is not None else None\r\n\r\n self.feature_info = []\r\n stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))\r\n self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers)\r\n self.feature_info.extend(stem_feat[:-1])\r\n feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])\r\n\r\n self.stages, stage_feat = create_byob_stages(\r\n cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size)\r\n self.feature_info.extend(stage_feat[:-1])\r\n\r\n prev_chs = stage_feat[-1]['num_chs']\r\n if cfg.num_features:\r\n self.num_features = int(round(cfg.width_factor * cfg.num_features))\r\n self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1)\r\n else:\r\n self.num_features = prev_chs\r\n self.final_conv = nn.Identity()\r\n self.feature_info += [\r\n dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')]\r\n\r\n self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)\r\n\r\n # init weights\r\n named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)\r\n\r\n def get_classifier(self):\r\n return self.head.fc\r\n\r\n def reset_classifier(self, num_classes, global_pool='avg'):\r\n self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)\r\n\r\n def forward_features(self, x):\r\n x = self.stem(x)\r\n x = self.stages(x)\r\n x = self.final_conv(x)\r\n return x\r\n\r\n def forward(self, x):\r\n x = self.forward_features(x)\r\n x = self.head(x)\r\n return x\r\n\r\n\r\ndef _init_weights(module, name='', zero_init_last=False):\r\n if isinstance(module, nn.Conv2d):\r\n fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels\r\n fan_out //= module.groups\r\n module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\r\n if module.bias is not None:\r\n module.bias.data.zero_()\r\n elif isinstance(module, nn.Linear):\r\n nn.init.normal_(module.weight, mean=0.0, std=0.01)\r\n if module.bias is not None:\r\n nn.init.zeros_(module.bias)\r\n elif isinstance(module, nn.BatchNorm2d):\r\n nn.init.ones_(module.weight)\r\n nn.init.zeros_(module.bias)\r\n elif hasattr(module, 'init_weights'):\r\n module.init_weights(zero_init_last=zero_init_last)\r\n\r\n\r\ndef _create_byobnet(variant, pretrained=False, **kwargs):\r\n return build_model_with_cfg(\r\n ByobNet, variant, pretrained,\r\n default_cfg=default_cfgs[variant],\r\n model_cfg=model_cfgs[variant],\r\n feature_cfg=dict(flatten_sequential=True),\r\n **kwargs)\r\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.MaxPool2d",
"torch.nn.Identity",
"torch.nn.init.ones_",
"torch.nn.init.normal_",
"torch.nn.init.zeros_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
whn09/incubator-tvm | [
"657a6fa6554cc8402eca225f80e1b2cc2803c71a",
"657a6fa6554cc8402eca225f80e1b2cc2803c71a"
] | [
"tests/python/relay/test_op_level3.py",
"tests/python/topi/python/test_topi_conv2d_nchw.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Support level3 operator test cases.\n\"\"\"\nimport numpy as np\nimport pytest\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.error import TVMError\nfrom tvm.relay import create_executor, transform\nfrom tvm.relay.testing import check_grad, run_infer_type\nimport tvm.testing\n\n\ndef test_zeros_ones():\n for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:\n y = op(shape=(124, 50), dtype=\"float64\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((124, 50), \"float64\")\n intrp = create_executor()\n intrp_res = intrp.evaluate(y).asnumpy()\n np.testing.assert_allclose(intrp_res, ref((124, 50), 'float64'))\n\ndef test_unary_identity():\n for op, ref in [(relay.zeros_like, np.zeros_like),\n (relay.ones_like, np.ones_like),\n (relay.ceil, np.ceil),\n (relay.floor, np.floor),\n (relay.trunc, np.trunc),\n (relay.round, np.round),\n (relay.abs, np.abs),\n (relay.copy, None), # np.copy\n (relay.negative, np.negative),\n (relay.sign, np.sign)]:\n shape = (8, 9, 4)\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n y = op(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, \"float32\")\n\n if ref is not None:\n data = np.random.rand(*shape).astype('float32')\n intrp = create_executor()\n op_res = intrp.evaluate(y, { x: relay.const(data) })\n ref_res = ref(data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\ndef test_cast():\n x = relay.var(\"x\", relay.TensorType((8, 9, 4), \"float32\"))\n y = x.astype(\"int32\")\n yy = run_infer_type(y)\n assert \"dtype=\" in yy.astext()\n assert yy.checked_type == relay.TensorType((8, 9, 4), \"int32\")\n\n x = relay.var(\"x\", relay.TensorType((8, 9, 4), \"float32\"))\n y = relay.cast(x, \"int32\")\n yy = run_infer_type(y)\n assert \"dtype=\" in yy.astext()\n assert yy.checked_type == relay.TensorType((8, 9, 4), \"int32\")\n\n\ndef test_clip():\n a = relay.var(\"a\", relay.TensorType((10, 4), \"float32\"))\n y = relay.clip(a, 1., 4.)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((10, 4), \"float32\")\n\n data = np.random.rand(10, 4).astype('float32')\n intrp = create_executor()\n op_res = intrp.evaluate(y, { a: relay.const(data) })\n ref_res = np.clip(data, 1., 4.)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\ndef test_fixed_point_multiply():\n # Test 23 * 1/16\n # [m,s] = [0.5, -3] = frexp(1/16)\n # M = 0.5*2^31 = 1073741824\n # so M = 1073741824 and s = -3\n\n a = relay.var(\"a\", relay.TensorType((10, 4), \"int32\"))\n y = relay.fixed_point_multiply(a, 1073741824, -3)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((10, 4), \"int32\")\n\n data = 23*np.ones((10, 4)).astype('int32')\n intrp = create_executor()\n op_res = intrp.evaluate(y, { a: relay.const(data) })\n ref_res = np.ones((10, 4)).astype('int32')\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, atol=1)\n\ndef test_reinterpret():\n a = relay.var(\"a\", relay.TensorType((1000, 4), \"float32\"))\n y = relay.reinterpret(a, \"int32\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1000, 4), \"int32\")\n\n data = np.random.randn(1000, 4).astype('float32') * 1000\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n ref_res = data.view(\"int32\")\n np.testing.assert_equal(op_res.asnumpy(), ref_res)\n\n\ndef test_approximate_transcendental():\n def C(x):\n return relay.expr.const(x, \"float32\")\n\n def approx_exp(x):\n # An approximation derived from Opus,\n # https://github.com/xiph/opus/blob/c1c247/celt/mathops.h#L147-L165\n x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))\n x = C(127.0) + x * C(1.44269504)\n xf = relay.floor(x)\n i = relay.cast(xf, \"int32\")\n x = x - xf\n Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))\n exponent = relay.left_shift(i, relay.expr.const(23, \"int32\"))\n exponent = relay.reinterpret(exponent, \"float32\")\n return exponent * Y\n\n def approximate_sigmoid(x):\n y = approx_exp(x)\n return y / (y + C(1.0))\n\n def approximate_tanh(x):\n x = x * C(2.0)\n y = approx_exp(x)\n return (y - C(1.0)) / (y + C(1.0))\n\n a = relay.var(\"a\", relay.TensorType((1000,), \"float32\"))\n y = approximate_sigmoid(a)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1000,), \"float32\")\n data = np.linspace(-5, 5, 1000).astype(\"float32\")\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n\n def reference_sigmoid(x):\n return np.exp(-np.logaddexp(0, -x))\n np.testing.assert_allclose(op_res.asnumpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)\n\n y = approximate_tanh(a)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1000,), \"float32\")\n data = np.linspace(-5, 5, 1000).astype(\"float32\")\n intrp = create_executor()\n op_res = intrp.evaluate(y, {a: relay.const(data)})\n\n def reference_tanh(x):\n return np.tanh(x)\n np.testing.assert_allclose(op_res.asnumpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)\n\n\ndef test_squeeze():\n def verify_squeeze(shape, dtype, axis):\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n squeeze = relay.squeeze(x, axis=axis)\n\n np_axis = tuple(axis) if axis is not None else None\n\n data = np.random.random_sample(shape).astype(dtype)\n intrp = create_executor()\n op_res = intrp.evaluate(squeeze, { x : relay.const(data) })\n ref_res = np.squeeze(data, axis=np_axis)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\n verify_squeeze((1, 3, 2, 5), \"float32\", None)\n verify_squeeze((1, 3, 1), \"float32\", [0])\n verify_squeeze((1, 2, 1, 2, 1), \"float32\", [0, 2])\n\n\ndef test_transpose_infer_type():\n n, t, d = te.size_var(\"n\"), te.size_var(\"t\"), 100\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.transpose(x, axes=(1, 0, 2))\n assert \"axes=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (t, n, 100), \"float32\")\n\n y = relay.transpose(x)\n assert \"axes=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (100, t, n), \"float32\")\n\n\[email protected]_gpu\ndef test_transpose():\n def verify_transpose(dshape, axes):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.transpose(x, axes=axes)\n\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n ref_res = np.transpose(x_data, axes=axes)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_transpose((2, 3, 4), (0, 2, 1))\n\n\ndef test_squeeze_infer_type():\n n, t, d = 1, 4, 1\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.squeeze(x, axis=(2,))\n assert \"axis=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (1, 4), \"float32\")\n\n n, t, d = 1, 4, 1\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.squeeze(x)\n assert \"axis=\" not in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (4,), \"float32\")\n\[email protected](raises=tvm._ffi.base.TVMError)\ndef test_squeeze_bad_axes_infer_type():\n n, t, d = 1, 4, 1\n x = relay.var(\"x\", relay.TensorType((n, t, d), \"float32\"))\n y = relay.squeeze(x, axis=(1,))\n yy = run_infer_type(y)\n\n\ndef test_reshape_infer_type():\n n, t, d1, d2 = 10, 20, 100, 20\n x = relay.var(\"x\", relay.TensorType((n, t, d1, d2), \"float32\"))\n y = relay.reshape(x, newshape=(n, t, 2000))\n assert \"newshape=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, t, 2000), \"float32\")\n\[email protected]_gpu\ndef test_reshape():\n def verify_reshape(shape, newshape, oshape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.reshape(x, newshape=newshape)\n zz = run_infer_type(z)\n assert \"newshape=\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(oshape, \"float32\")\n\n func = relay.Function([x], z)\n check_grad(func)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n ref_res = np.reshape(x_data, oshape)\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_reshape((2, 3, 4), (8, 3), (8, 3))\n verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))\n verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))\n verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))\n verify_reshape((2, 3, 4), (0, -1), (2, 12))\n verify_reshape((2, 3, 4), (-1, 0), (8, 3))\n verify_reshape((2, 3, 4), (2, -2), (2, 3, 4))\n verify_reshape((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1))\n verify_reshape((2, 3, 4), (-3, 4), (6, 4))\n verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))\n verify_reshape((2, 3, 4), (0, -3), (2, 12))\n verify_reshape((2, 3, 4), (-3, -2), (6, 4))\n verify_reshape((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4))\n verify_reshape((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4))\n\n\ndef test_reshape_fail():\n with pytest.raises(TVMError) as reshape_err:\n x = relay.var(\"x\", relay.TensorType([2,3], \"float32\"))\n z = relay.reshape(x, [7])\n zz = run_infer_type(z)\n\n\ndef test_reshape_like_infer_type():\n # concrete shape\n x = relay.var(\"x\", relay.TensorType((1, 2, 3), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((1,6), \"float32\"))\n z = relay.reshape_like(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((1, 6), \"float32\")\n\n # symbolic shape\n n, c, h, w = te.size_var(\"n\"), 2, 3, te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.var(\"y\", relay.TensorType((1, 8, 8), \"float32\"))\n z = relay.reshape_like(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((1, 8, 8), \"float32\")\n\n\[email protected]_gpu\ndef test_reshape_like():\n def verify_reshape_like(shape, oshape):\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n y_data = np.random.uniform(low=-1, high=1, size=oshape).astype(\"float32\")\n ref_res = np.reshape(x_data, y_data.shape)\n\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n y = relay.var(\"x\", relay.TensorType(oshape, \"float32\"))\n z = relay.reshape_like(x, y)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.ty.TensorType(ref_res.shape, \"float32\")\n\n func = relay.Function([x, y], z)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n verify_reshape_like((2, 3, 4), (1, 8, 3))\n verify_reshape_like((4, 7), (2, 7, 2))\n\ndef test_take_infer_type():\n def verify_take(dshape, indices_shape, oshape, axis=None):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n indices = relay.var(\"indices\", relay.TensorType(indices_shape, \"int32\"))\n y = relay.take(x, indices, axis=axis)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(oshape, \"float32\")\n\n d1, d2, d3 = te.var(\"d1\"), te.var(\"d2\"), te.var(\"d3\")\n d4, d5, d6 = te.var(\"d4\"), te.var(\"d5\"), te.var(\"d6\")\n verify_take((d1,), (1,), (1,), 0)\n verify_take((4,), (d1, d2), (d1, d2))\n verify_take((3, 3, 3), (1, d2), (1, d2))\n verify_take((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0)\n verify_take((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1)\n verify_take((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2)\n\[email protected]_gpu\ndef test_take():\n def verify_take(src_shape, indices_src, axis=None, mode=\"clip\"):\n src_dtype = \"float32\"\n indices_dtype = \"int32\"\n indices_src = np.array(indices_src, dtype=indices_dtype)\n x = relay.var(\"x\", relay.TensorType(src_shape, src_dtype))\n indices = relay.var(\"indices\", relay.TensorType(indices_src.shape, indices_dtype))\n z = relay.take(x, indices, axis=axis, mode=mode)\n\n func = relay.Function([x, indices], z)\n x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)\n np_mode = \"raise\" if mode == \"fast\" else mode\n ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, indices_src)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n verify_take((4,), [1])\n verify_take((4,), [[0,1,2,3]])\n verify_take((3,3,3), [[11,25]])\n verify_take((4,), [[0,1],[2,3]])\n verify_take((4,), [1], 0)\n verify_take((2,2), [[[1,0],[0,1]]], 0)\n verify_take((2,2), [[[1,0],[0,1]]], 1)\n verify_take((4,3,5,6), [[2,1,0,0]], -2)\n verify_take((3,4), [-5, 20])\n verify_take((3,4), [-5, 20], mode=\"wrap\")\n verify_take((3,4), [-1, 2], axis=0)\n verify_take((3,4), [-1, 2], axis=0, mode=\"wrap\")\n verify_take((3,4), [-1, 2], axis=1)\n verify_take((3,4), [-1, 2], axis=1, mode=\"wrap\")\n verify_take((3,3,3), [[11,25]], mode=\"fast\")\n verify_take((3,4), [0, 2], axis=0, mode=\"fast\")\n verify_take((3,4), [0, 2], axis=1, mode=\"fast\")\n\n\ndef test_split_infer_type():\n def verify_split(dshape, indices_or_sections, ret_type, axis=None):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, \"float32\"))\n y = relay.split(x, indices_or_sections, axis=axis)\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == ret_type\n\n idxd = tvm.tir.indexdiv\n\n d1, d2, d3, d4 = te.var(\"d1\"), te.var(\"d2\"), te.var(\"d3\"), te.var(\"d4\")\n axis = te.var(\"axis\")\n verify_split((5, 5, 2, 2), 5,\n relay.ty.TupleType(tvm.runtime.convert([\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\"),\n relay.ty.TensorType((5, 1, 2, 2), \"float32\")])),\n axis=1)\n verify_split((5, 5, 2, 2), 5,\n relay.ty.TupleType(tvm.runtime.convert([\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\"),\n relay.ty.TensorType((1, 5, 2, 2), \"float32\")])),\n axis=0)\n verify_split((d1, d2, d3, d4), 4,\n relay.ty.TupleType(tvm.runtime.convert([\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\"),\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\"),\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\"),\n relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), \"float32\")])),\n axis=2)\n verify_split((d1, d2, d3, d4), 2,\n relay.ty.TupleType(tvm.runtime.convert([\n relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), \"float32\"),\n relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), \"float32\")])),\n axis=0)\n verify_split((d1, d2, d3, d4), (2, 4, 7),\n relay.ty.TupleType(tvm.runtime.convert([\n relay.ty.TensorType((d1, 2, d3, d4), \"float32\"),\n relay.ty.TensorType((d1, 2, d3, d4), \"float32\"),\n relay.ty.TensorType((d1, 3, d3, d4), \"float32\"),\n relay.ty.TensorType((d1, (d2-7), d3, d4), \"float32\")])),\n axis=1)\n\ndef test_full_infer_type():\n # default settings: match input dtype\n x = relay.var(\"x\", relay.TensorType((), \"int8\"))\n y = relay.full(x, ())\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((), \"int8\")\n\n # change the shape and dtype\n x = relay.var(\"x\", relay.TensorType((), \"float32\"))\n y = relay.full(x, (1, 2), \"int8\")\n \"shape=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1, 2), \"int8\")\n\n\[email protected]_gpu\ndef test_full():\n def verify_full(fill_value, src_shape, dtype):\n x = relay.var(\"x\", relay.scalar_type(dtype))\n z = relay.full(x, src_shape, dtype)\n func = relay.Function([x], z)\n ref_res = np.full(src_shape, fill_value)\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(np.array(fill_value, dtype))\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_full(4, (1, 3, 4, 4), \"int32\")\n #verify_full(4, (1, 3, 4, 4), \"int64\") # This does not pass, python int32 is not upcast to int64, not sure how to fix it.\n verify_full(4.0, (1, 4), \"float32\")\n\n\ndef test_full_like_infer_type():\n # concrete shape\n base = relay.var(\"base\", relay.TensorType((1, 2, 3), \"float32\"))\n fill = relay.var(\"fill\", relay.TensorType((), \"float32\"))\n y = relay.full_like(base, fill)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((1, 2, 3), \"float32\")\n\n # symbolic shape\n n, c, h, w = te.size_var(\"n\"), 2, 3, te.size_var(\"w\")\n base = relay.var(\"base\", relay.TensorType((n, c, h, w), \"float32\"))\n fill = relay.var(\"fill\", relay.TensorType((), \"float32\"))\n y = relay.full_like(base, fill)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, w), \"float32\")\n\n\[email protected]_gpu\ndef test_full_like():\n def verify_full_like(base, fill_value, dtype):\n x_data = np.random.uniform(low=-1, high=1, size=base).astype(dtype)\n x = relay.var(\"x\", relay.TensorType(base, dtype))\n y = relay.var(\"y\", relay.scalar_type(dtype))\n z = relay.full_like(x, y)\n\n func = relay.Function([x, y], z)\n ref_res = np.full_like(x_data, fill_value)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, np.array(fill_value, dtype))\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_full_like((1, 3, 4, 4), 4, \"int32\")\n verify_full_like((1, 1), 44.0, \"float32\")\n\n\[email protected]_gpu\ndef test_infer_type_leaky_relu():\n n, c , h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.nn.leaky_relu(x, alpha=0.1)\n \"alpha=0.1\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, w), \"float32\")\n\n shape = (1, 5, 10, 10)\n dtype = \"float32\"\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n z = relay.nn.leaky_relu(x, alpha=0.1)\n assert \"alpha=0.1\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)\n ref_res = np.where(x_data > 0, x_data, x_data * 0.1)\n\n for target, ctx in tvm.testing.enabled_targets():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\ndef verify_infer_type_prelu(data, alpha, axis, output, dtype=\"float32\"):\n x = relay.var(\"data\", relay.TensorType(data, dtype))\n if alpha:\n y = relay.var(\"alpha\", relay.TensorType(alpha, dtype))\n else:\n y = relay.var(\"alpha\", relay.IncompleteType())\n z = relay.nn.prelu(x, y, axis=axis)\n zz = run_infer_type(z)\n if axis != 1:\n assert \"axis\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(output, dtype)\n if not alpha:\n axis = axis if axis else 1\n alpha_shape = (data[axis],)\n assert zz.args[1].checked_type == relay.TensorType(alpha_shape, \"float32\")\n\n if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:\n return\n\n func = relay.Function([x, y], z)\n x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)\n a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)\n\n if axis == 1:\n ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data\n else:\n ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data\n\n for target, ctx in tvm.testing.enabled_targets():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data, a_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data, a_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\n\[email protected]_gpu\ndef test_infer_type_prelu():\n n, c , h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), te.size_var(\"w\")\n verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w))\n verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c))\n verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w))\n verify_infer_type_prelu((n, h, w, c), None, 3, (n, h, w, c))\n verify_infer_type_prelu((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2))\n verify_infer_type_prelu((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3))\n verify_infer_type_prelu((1, 3, 2, 2), None, 1, (1, 3, 2, 2))\n verify_infer_type_prelu((1, 2, 2, 3), None, 3, (1, 2, 2, 3))\n\n\[email protected]_gpu\ndef test_arange():\n def verify_arange(start, stop, step):\n dtype = \"float32\"\n if start is None and step is None:\n x = relay.arange(relay.const(stop, dtype=dtype))\n ref_res = np.arange(stop).astype(dtype)\n elif start is None:\n x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))\n ref_res = np.arange(stop, step=step).astype(dtype)\n elif step is None:\n x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))\n ref_res = np.arange(start, stop).astype(dtype)\n else:\n x = relay.arange(\n relay.const(start, dtype=dtype),\n relay.const(stop, dtype=dtype),\n relay.const(step, dtype=dtype))\n ref_res = np.arange(start, stop, step).astype(dtype)\n\n func = relay.Function([], x)\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)()\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_arange(None, 20, None)\n verify_arange(None, 20, 2)\n verify_arange(1, 20, None)\n verify_arange(1, 20, 2)\n # arange doesnt' support floating point right now, see type relation\n # verify_arange(1, 20, 1.5)\n verify_arange(1, 20.5, None)\n verify_arange(1, 20, 3)\n verify_arange(20, 1, -1)\n # arange doesnt' support floating point right now, see type relation\n # verify_arange(20, 1, -1.5)\n\[email protected]_gpu\ndef test_meshgrid():\n def verify_meshgrid(lengths, indexing=\"ij\"):\n input_vars = []\n input_data = []\n for i, length in enumerate(lengths):\n input_name = \"x_{}\".format(i)\n if length == 0:\n # Scalar\n input_vars.append(relay.var(input_name, relay.scalar_type(\"float32\")))\n input_data.append(np.array(1, \"float32\"))\n else:\n input_vars.append(relay.var(input_name, relay.TensorType((length,), \"float32\")))\n input_data.append(np.arange(length).astype(\"float32\"))\n\n z = relay.meshgrid(input_vars, indexing=indexing).astuple()\n func = relay.Function(input_vars, z)\n # Get ref\n ref_res = np.meshgrid(*input_data, indexing=indexing)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(*input_data)\n assert len(op_res) == len(ref_res)\n for i in range(len(op_res)):\n tvm.testing.assert_allclose(op_res[i].asnumpy(), ref_res[i], rtol=1e-5)\n verify_meshgrid([3, 5])\n verify_meshgrid([4, 2], indexing=\"xy\")\n verify_meshgrid([3, 5, 2])\n verify_meshgrid([3, 1, 5], indexing=\"xy\")\n # Length 0 signifies scalar.\n verify_meshgrid([3, 5, 0])\n\[email protected]_gpu\ndef test_tile():\n def verify_tile(dshape, reps):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.tile(x, reps=reps)\n\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n ref_res = np.tile(x_data, reps=reps)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_tile((2, 3, 4), (3, 2, 1))\n verify_tile((2, 3, 4), (1, 2))\n verify_tile((2, 3), (3, 2, 1))\n\[email protected]_gpu\ndef test_repeat():\n def verify_repeat(dshape, repeats, axis):\n x = relay.Var(\"x\", relay.TensorType(dshape, \"float32\"))\n func = relay.Function([x], relay.repeat(x, repeats, axis))\n data = np.random.uniform(size=dshape).astype(\"float32\")\n ref_res = np.repeat(data, repeats, axis)\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_repeat((3,), 2, 0)\n verify_repeat((3, 10), 2, -1)\n verify_repeat((3, 2, 4), 3, 1)\n\[email protected]_gpu\ndef test_stack():\n def verify_stack(dshapes, axis):\n y = []\n for shape in dshapes:\n y.append(relay.var(\"input\", relay.TensorType(shape, \"float32\")))\n x = relay.Tuple(y)\n z = relay.stack(x, axis=axis)\n\n func = relay.Function(y, z)\n x_data = [np.random.normal(size=shape).astype(\"float32\") for shape in dshapes]\n ref_res = np.stack(x_data, axis=axis)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(*x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_stack([(2,), (2,), (2,)], -1)\n verify_stack([(2,), (2,), (2,)], 0)\n verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)\n verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)\n verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4)\n\n\[email protected]_gpu\ndef test_reverse():\n def verify_reverse(dshape, axis):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.reverse(x, axis=axis)\n zz = run_infer_type(z)\n\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n ref_res = np.flip(x_data, axis)\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_reverse((2, 3, 4), 1)\n verify_reverse((4, 7), 0)\n verify_reverse((2, 3, 4), -1)\n\n\[email protected]_gpu\ndef test_reverse_sequence():\n def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):\n seq_lengths_data = np.array(seq_lengths).astype(\"int32\")\n x = relay.var(\"x\", relay.TensorType(x_data.shape, str(x_data.dtype)))\n z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)\n zz = run_infer_type(z)\n assert zz.checked_type == x.type_annotation\n func = relay.Function([x], z)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = [[0, 5, 10, 15],\n [4, 1, 6, 11],\n [8, 9, 2, 7],\n [12, 13, 14, 3]]\n verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))\n verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))\n verify_reverse_sequence(indata.astype(\"float32\"), [1, 2, 3, 4], 1, 0, np.array(result).astype(\"float32\"))\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = [[0, 1, 2, 3],\n [5, 4, 6, 7],\n [10, 9, 8, 11],\n [15, 14, 13, 12]]\n verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))\n verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))\n verify_reverse_sequence(indata.astype(\"float32\"), [1, 2, 3, 4], 0, 1, np.array(result).astype(\"float32\"))\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = [[0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n [15, 14, 13, 12]]\n verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))\n\n indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype(\"int32\")\n result = [[[[18, 19, 20], [21, 22, 23], [24, 25, 26]],\n [[9, 10, 11], [12, 13, 14], [15, 16, 17]],\n [[0, 1, 2], [3, 4, 5], [6, 7, 8]]],\n [[[45, 46, 47], [48, 49, 50], [51, 52, 53]],\n [[36, 37, 38], [39, 40, 41], [42, 43, 44]],\n [[27, 28, 29], [30, 31, 32], [33, 34, 35]]]]\n verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))\n\n indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype(\"int32\")\n result = [[[[9, 10, 11], [21, 22, 23], [15, 16, 17]],\n [[0, 1, 2], [12, 13, 14], [6, 7, 8]],\n [[18, 19, 20], [3, 4, 5], [24, 25, 26]]],\n [[[36, 37, 38], [48, 49, 50], [42, 43, 44]],\n [[27, 28, 29], [39, 40, 41], [33, 34, 35]],\n [[45, 46, 47], [30, 31, 32], [51, 52, 53]]]]\n verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))\n\n indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype(\"int32\")\n result = []\n with pytest.raises(Exception) as execinfo:\n verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))\n\n assert \"For reverse_sequnece seq_lengths size should match with dimension of batch axis,\" \\\n \" but got dimension of batch_axis = 4, and seq_length size = 5\" in execinfo.value.args[0]\n\n\ndef test_scatter():\n\n def ref_scatter(data, indices, updates, axis=0):\n idx = np.indices(indices.shape).reshape(indices.ndim, -1)\n\n updated_idx = np.copy(idx)\n indices = indices.reshape(-1)\n for i in range(len(indices)):\n updated_idx[axis, i] = indices[i]\n scattered = np.copy(data)\n scattered[tuple(updated_idx)] = updates[tuple(idx)]\n return scattered\n\n def verify_scatter(dshape, ishape, axis=0):\n d = relay.var(\"d\", relay.TensorType(dshape, \"float32\"))\n i = relay.var(\"i\", relay.TensorType(ishape, \"int64\"))\n u = relay.var(\"u\", relay.TensorType(ishape, \"float32\"))\n z = relay.op.scatter(d, i, u, axis)\n\n func = relay.Function([d, i, u], z)\n\n data_np = np.random.uniform(size=dshape).astype(\"float32\")\n updates_np = np.random.uniform(size=ishape).astype(\"float32\")\n indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype(\"int64\")\n\n ref_res = ref_scatter(data_np, indices_np, updates_np, axis)\n # TODO(mbrookhart): expand testing when adding more backend schedules\n for target, ctx in [(\"llvm\", tvm.cpu())]:\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)\n tvm.testing.assert_allclose(\n op_res.asnumpy(), ref_res, rtol=1e-5)\n\n verify_scatter((10, ), (10, ), 0)\n verify_scatter((10, 5), (10, 5), -2)\n verify_scatter((10, 5), (10, 5), -1)\n verify_scatter((10, 5), (3, 5), 0)\n verify_scatter((12, 4), (7, 2), 1)\n verify_scatter((2, 3, 4), (1, 3, 4), 0)\n verify_scatter((2, 3, 4), (2, 1, 4), 1)\n verify_scatter((2, 3, 4), (2, 3, 1), 2)\n verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)\n verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)\n verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)\n verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)\n\n\ndef test_scatter_add():\n\n def ref_scatter_add(data, indices, updates, axis=0):\n output = np.copy(data)\n for index in np.ndindex(*indices.shape):\n new_index = list(index)\n new_index[axis] = indices[index]\n output[tuple(new_index)] += updates[index]\n return output\n\n def verify_scatter_add(dshape, ishape, axis=0):\n d = relay.var(\"d\", relay.TensorType(dshape, \"float32\"))\n i = relay.var(\"i\", relay.TensorType(ishape, \"int64\"))\n u = relay.var(\"u\", relay.TensorType(ishape, \"float32\"))\n z = relay.op.scatter_add(d, i, u, axis)\n\n func = relay.Function([d, i, u], z)\n\n data_np = np.random.uniform(size=dshape).astype(\"float32\")\n updates_np = np.random.uniform(size=ishape).astype(\"float32\")\n indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype(\"int64\")\n\n ref_res = ref_scatter_add(data_np, indices_np, updates_np, axis)\n # TODO(mbrookhart): expand testing when adding more backend schedules\n for target, ctx in [(\"llvm\", tvm.cpu())]:\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)\n tvm.testing.assert_allclose(\n op_res.asnumpy(), ref_res, rtol=1e-5)\n\n verify_scatter_add((10, ), (10, ), 0)\n verify_scatter_add((10, 5), (10, 5), -2)\n verify_scatter_add((10, 5), (10, 5), -1)\n verify_scatter_add((10, 5), (3, 5), 0)\n verify_scatter_add((12, 4), (7, 2), 1)\n verify_scatter_add((2, 3, 4), (1, 3, 4), 0)\n verify_scatter_add((2, 3, 4), (2, 1, 4), 1)\n verify_scatter_add((2, 3, 4), (2, 3, 1), 2)\n verify_scatter_add((2, 3, 4, 5), (1, 3, 4, 5), 0)\n verify_scatter_add((6, 3, 4, 5), (2, 3, 4, 5), 1)\n verify_scatter_add((2, 3, 8, 5), (2, 3, 1, 1), 2)\n verify_scatter_add((16, 16, 4, 5), (16, 16, 4, 5), 3)\n\n\[email protected]_gpu\ndef test_gather():\n def verify_gather(data, axis, indices, ref_res):\n data = np.asarray(data, dtype='float32')\n indices = np.asarray(indices, dtype='int32')\n ref_res = np.asarray(ref_res)\n\n d = relay.var(\"x\", relay.TensorType(data.shape, \"float32\"))\n i = relay.var(\"y\", relay.TensorType(indices.shape, \"int32\"))\n z = relay.gather(d, axis, i)\n\n func = relay.Function([d, i], z)\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data, indices)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res,\n rtol=1e-5)\n\n verify_gather([[1, 2], [3, 4]],\n 1,\n [[0, 0], [1, 0]],\n [[1, 1], [4, 3]])\n verify_gather([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],\n 0,\n [[[1, 0, 1], [1, 1, 0]]],\n [[[6, 1, 8], [9, 10, 5]]])\n verify_gather([[[-0.2321, -0.2024, -1.7624], [-0.3829, -0.4246, 0.2448],\n [0.1822, 0.2360, -0.8965], [0.4497, -0.2224, 0.6103]],\n [[0.0408, -0.7667, -0.4303], [-0.3216, 0.7489, -0.1502],\n [0.0144, -0.4699, -0.0064], [-0.0768, -1.6064, 1.3390]]],\n 1,\n [[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],\n [[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],\n [[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]]])\n verify_gather([[[0.3050, 1.6986, 1.1034], [0.7020, -0.6960, -2.1818],\n [0.3116, -0.5773, -0.9912], [0.0835, -1.3915, -1.0720]],\n [[0.1694, -0.6091, -0.6539], [-0.5234, -0.1218, 0.5084],\n [0.2374, -1.9537, -2.0078], [-0.5700, -1.0302, 0.1558]]],\n 2,\n [[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],\n [[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]]],\n [[[1.6986, 1.6986, 0.3050, 1.6986],\n [0.7020, 0.7020, -2.1818, -2.1818],\n [-0.5773, -0.9912, -0.5773, -0.9912],\n [-1.0720, -1.0720, -1.3915, 0.0835]],\n [[0.1694, 0.1694, -0.6091, -0.6539],\n [0.5084, 0.5084, -0.1218, -0.5234],\n [-1.9537, -2.0078, 0.2374, 0.2374],\n [-0.5700, 0.1558, -0.5700, 0.1558]]])\n\n\[email protected]_gpu\ndef test_gather_nd():\n def verify_gather_nd(xshape, yshape, y_data):\n x = relay.var(\"x\", relay.TensorType(xshape, \"float32\"))\n y = relay.var(\"y\", relay.TensorType(yshape, \"int32\"))\n z = relay.gather_nd(x, y)\n\n func = relay.Function([x, y], z)\n x_data = np.random.uniform(size=xshape).astype(\"float32\")\n ref_res = x_data[tuple(y_data)]\n\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])\n verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])\n verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])\n verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])\n\n\ndef _verify_infiniteness_ops(relay_op, ref_op):\n for dtype in ['float32', 'float16', 'float16', 'int32', 'int16']:\n shape = (2, 8, 8)\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n y = relay_op(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, \"bool\")\n\n data = np.random.uniform(size=shape).astype(dtype)\n if dtype.startswith('float'):\n data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty\n data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan\n\n intrp = create_executor()\n op_res = intrp.evaluate(y, {x: data})\n ref_res = ref_op(data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\n\ndef test_isfinite():\n _verify_infiniteness_ops(relay.isfinite, np.isfinite)\n\n\ndef test_isinf():\n _verify_infiniteness_ops(relay.isinf, np.isinf)\n\n\[email protected]_gpu\ndef test_unravel_index():\n def verify_unravel_index(indices, shape, dtype):\n x_data = np.array(indices).astype(dtype)\n y_data = np.array(shape).astype(dtype)\n x = relay.var(\"x\", relay.TensorType(x_data.shape, dtype))\n y = relay.var(\"y\", relay.TensorType(y_data.shape, dtype))\n\n z = relay.unravel_index(x, y)\n zz = run_infer_type(z)\n\n if len(x_data.shape) == 1:\n out_shape = [y_data.shape[0], x_data.shape[0]]\n else:\n out_shape = [y_data.shape[0]]\n assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)\n\n func = relay.Function([x, y], z)\n ref_res = np.unravel_index(x_data, y_data)\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n for dtype in [\"int64\", \"int32\"]:\n verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)\n verify_unravel_index([144], [5, 5, 5, 2], dtype)\n verify_unravel_index(144, [5, 5, 5, 2], dtype)\n verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)\n\n # In below example, 5 is out of bound for array of size 4.\n # Numpy implementation throws error for it\n # TVM implementation does not throw error instead it produces\n # output which is inline with Tensorflow\n # verify_unravel_index([0, 1, 2, 5], [2, 2], dtype)\n\[email protected]_gpu\ndef test_sparse_to_dense():\n def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):\n sparse_indices_data = np.array(sparse_indices)\n sparse_values_data = np.array(sparse_values)\n default_value_data = np.array(default_value)\n\n a = relay.var(\"a\", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype)))\n b = relay.var(\"b\", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype)))\n if default_value is None:\n args = [a, b]\n d = relay.sparse_to_dense(a, output_shape, b)\n else:\n c = relay.var(\"c\", relay.TensorType(default_value_data.shape, str(default_value_data.dtype)))\n args = [a, b, c]\n d = relay.sparse_to_dense(a, output_shape, b, c)\n\n zz = run_infer_type(d)\n assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))\n\n func = relay.Function(args, d)\n for target, ctx in tvm.testing.enabled_targets():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n if default_value is None:\n op_res = intrp.evaluate(func)(sparse_indices_data, sparse_values_data)\n else:\n op_res = intrp.evaluate(func)(\n sparse_indices_data, sparse_values_data, default_value_data\n )\n tvm.testing.assert_allclose(op_res.asnumpy(), xpected, rtol=1e-5)\n\n\n verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar\n verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector\n verify_sparse_to_dense([[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]) # nXd\n verify_sparse_to_dense(\n [[0, 0, 0], [1, 2, 3]],\n [1, 2],\n 4,\n [2, 3, 4],\n [[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]]\n ) # nXd\n verify_sparse_to_dense([0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]) # floats\n verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0]) # default value not specified\n\n #negative test cases\n #sparse indices should be ints\n #verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])\n #sparse_values should be 0d or 1d only\n #verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])\n #sparse_indices should not be > 2d tensor\n #verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[[3.1, 3.1, 3.1]]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])\n\nif __name__ == \"__main__\":\n test_cast()\n test_zeros_ones()\n test_unary_identity()\n test_clip()\n test_transpose_infer_type()\n test_transpose()\n test_reshape_infer_type()\n test_reshape()\n test_reshape_fail()\n test_reshape_like_infer_type()\n test_reshape_like()\n test_take_infer_type()\n test_take()\n test_full_infer_type()\n test_full()\n test_full_like_infer_type()\n test_full_like()\n test_infer_type_leaky_relu()\n test_infer_type_prelu()\n test_squeeze()\n test_squeeze_infer_type()\n test_squeeze_bad_axes_infer_type()\n test_split_infer_type()\n test_arange()\n test_meshgrid()\n test_reverse()\n test_stack()\n test_tile()\n test_repeat()\n test_gather_nd()\n test_isfinite()\n test_isinf()\n test_unravel_index()\n test_sparse_to_dense()\n test_fixed_point_multiply()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Example code to do convolution.\"\"\"\n\nimport numpy as np\nimport tvm\nfrom tvm import te\nfrom tvm import autotvm\nfrom tvm import topi\nimport tvm.topi.testing\nfrom tvm.contrib.pickle_memoize import memoize\nfrom tvm.topi.nn.util import get_pad_tuple\nfrom tvm.topi.util import get_const_tuple\n\nimport tvm.testing\n\ndef verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, add_bias=False, add_relu=False,\\\n use_cudnn=False):\n\n pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))\n padding_sum = pad_top + pad_left + pad_bottom + pad_right\n print(\"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)\" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation))\n\n in_height = in_width = in_size\n\n A = te.placeholder((batch, in_channel, in_height, in_width), name='A')\n W = te.placeholder((num_filter, in_channel, kernel, kernel), name='W')\n bias = te.placeholder((num_filter, 1, 1), name='bias')\n\n a_shape = get_const_tuple(A.shape)\n w_shape = get_const_tuple(W.shape)\n bias_shape = get_const_tuple(bias.shape)\n dtype = A.dtype\n\n @memoize(\"topi.tests.test_topi_conv2d_nchw.verify_conv2d_nchw\")\n def get_ref_data():\n a_np = np.random.uniform(size=a_shape).astype(dtype)\n w_np = np.random.uniform(size=w_shape).astype(dtype)\n b_np = np.random.uniform(size=bias_shape).astype(dtype)\n dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))\n c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)\n if add_bias:\n c_np += b_np\n if add_relu:\n c_np = np.maximum(c_np, 0)\n return a_np, w_np, b_np, c_np\n\n a_np, w_np, b_np, c_np = get_ref_data()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not tvm.testing.device_enabled(device):\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n\n if \"cudnn\" in device:\n fcompute, fschedule = topi.cuda.conv2d_cudnn, topi.cuda.schedule_conv2d_cudnn\n else:\n fcompute, fschedule = tvm.topi.testing.get_conv2d_nchw_implement(device)\n\n with tvm.target.Target(device):\n if \"cudnn\" in device:\n C = fcompute(A, W, (stride, stride), padding, (dilation, dilation), 1, \"NCHW\", dtype)\n else:\n C = fcompute(A, W, (stride, stride), padding, (dilation, dilation), dtype)\n if add_bias:\n C = topi.add(C, bias)\n if add_relu:\n C = topi.nn.relu(C)\n s = fschedule([C])\n\n a = tvm.nd.array(a_np, ctx)\n w = tvm.nd.array(w_np, ctx)\n b = tvm.nd.array(b_np, ctx)\n\n c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)\n if add_bias:\n func = tvm.build(s, [A, W, bias, C], device, name=\"relu_%d_%d_%d_%d_%d_%d_%d_%d\" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation))\n func(a, w, b, c)\n else:\n func = tvm.build(s, [A, W, C], device, name=\"relu_%d_%d_%d_%d_%d_%d_%d_%d\" % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation))\n func(a, w, c)\n tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-4)\n\n for device, ctx in tvm.testing.enabled_targets():\n with autotvm.tophub.context(device): # load tophub pre-tuned parameters\n check_device(device)\n\n if use_cudnn:\n check_device(\"cuda -model=unknown -libs=cudnn\")\n\n\[email protected]_gpu\ndef test_conv2d_nchw():\n # ResNet18 workloads\n verify_conv2d_nchw(1, 3, 224, 64, 7, 2, 3)\n verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1)\n verify_conv2d_nchw(1, 64, 56, 64, 1, 1, 0)\n verify_conv2d_nchw(1, 64, 56, 128, 3, 2, 1)\n verify_conv2d_nchw(1, 64, 56, 128, 1, 2, 0)\n verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1)\n verify_conv2d_nchw(1, 128, 28, 256, 3, 2, 1)\n verify_conv2d_nchw(1, 128, 28, 256, 1, 2, 0)\n verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1)\n verify_conv2d_nchw(1, 256, 14, 512, 3, 2, 1)\n verify_conv2d_nchw(1, 256, 14, 512, 1, 2, 0)\n verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1)\n\n # bias, relu\n verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_relu=True)\n verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_bias=True)\n verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_bias=True, add_relu=True)\n\n # dilation = 2\n verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, dilation=2)\n\n # batch size\n verify_conv2d_nchw(4, 64, 56, 64, 3, 1, 1)\n verify_conv2d_nchw(9, 64, 56, 64, 3, 1, 1)\n\n # weird workloads\n verify_conv2d_nchw(2, 2, 2, 2, 2, 2, 2)\n verify_conv2d_nchw(3, 3, 3, 3, 3, 3, 3)\n verify_conv2d_nchw(4, 4, 4, 4, 4, 4, 4)\n verify_conv2d_nchw(5, 5, 5, 5, 5, 5, 5)\n verify_conv2d_nchw(6, 6, 6, 6, 6, 6, 6)\n\n # disable these tests due to some bugs of llvm with nvptx\n # verify_conv2d_nchw(1, 1, 1, 1, 1, 1, 1, dilation=1)\n # verify_conv2d_nchw(1, 1, 1, 1, 1, 1, 1, dilation=2)\n # verify_conv2d_nchw(2, 13, 71, 59, 3, 1, 1)\n\n # inception v3 workloads\n verify_conv2d_nchw(1, 3, 299, 32, 3, 2, 0)\n verify_conv2d_nchw(1, 32, 149, 32, 3, 1, 0)\n verify_conv2d_nchw(1, 32, 147, 64, 3, 1, 1)\n verify_conv2d_nchw(1, 64, 73, 80, 1, 1, 0)\n verify_conv2d_nchw(1, 80, 73, 192, 3, 1, 0)\n verify_conv2d_nchw(1, 192, 35, 64, 1, 1, 0)\n verify_conv2d_nchw(1, 192, 35, 48, 1, 1, 0)\n verify_conv2d_nchw(1, 48, 35, 64, 5, 1, 2)\n verify_conv2d_nchw(1, 64, 35, 96, 3, 1, 1)\n verify_conv2d_nchw(1, 96, 35, 96, 3, 1, 1)\n verify_conv2d_nchw(1, 192, 35, 32, 1, 1, 0)\n verify_conv2d_nchw(1, 256, 35, 64, 1, 1, 0)\n verify_conv2d_nchw(1, 256, 35, 48, 1, 1, 0)\n verify_conv2d_nchw(1, 288, 35, 64, 1, 1, 0)\n verify_conv2d_nchw(1, 288, 35, 48, 1, 1, 0)\n verify_conv2d_nchw(1, 288, 35, 384, 3, 2, 0)\n verify_conv2d_nchw(1, 96, 35, 96, 3, 2, 0)\n verify_conv2d_nchw(1, 768, 17, 192, 1, 1, 0)\n verify_conv2d_nchw(1, 768, 17, 128, 1, 1, 0)\n verify_conv2d_nchw(1, 128, 17, 128, 1, 1, 0)\n verify_conv2d_nchw(1, 128, 17, 192, 7, 1, 3)\n verify_conv2d_nchw(1, 128, 17, 128, 7, 1, 3)\n verify_conv2d_nchw(1, 128, 17, 192, 1, 1, 0)\n verify_conv2d_nchw(1, 768, 17, 160, 1, 1, 0)\n # disable these tests due to some bugs of llvm with nvptx\n # verify_conv2d_nchw(1, 160, 17, 160, 1, 1, 0)\n verify_conv2d_nchw(1, 160, 17, 192, 7, 1, 3)\n verify_conv2d_nchw(1, 160, 17, 160, 7, 1, 3)\n verify_conv2d_nchw(1, 160, 17, 192, 1, 1, 0)\n verify_conv2d_nchw(1, 192, 17, 192, 1, 1, 0)\n verify_conv2d_nchw(1, 192, 17, 192, 7, 1, 3)\n verify_conv2d_nchw(1, 192, 17, 320, 3, 2, 0)\n verify_conv2d_nchw(1, 192, 17, 192, 3, 2, 0)\n verify_conv2d_nchw(1, 1280, 8, 320, 1, 1, 0)\n verify_conv2d_nchw(1, 1280, 8, 384, 1, 1, 0)\n verify_conv2d_nchw(1, 384, 8, 384, 1, 1, 0)\n verify_conv2d_nchw(1, 384, 8, 384, 3, 1, 1)\n verify_conv2d_nchw(1, 1280, 8, 448, 1, 1, 0)\n verify_conv2d_nchw(1, 448, 8, 384, 3, 1, 1)\n verify_conv2d_nchw(1, 1280, 8, 192, 1, 1, 0)\n verify_conv2d_nchw(1, 2048, 8, 320, 1, 1, 0)\n verify_conv2d_nchw(1, 2048, 8, 384, 1, 1, 0)\n verify_conv2d_nchw(1, 2048, 8, 448, 1, 1, 0)\n verify_conv2d_nchw(1, 2048, 8, 192, 1, 1, 0)\n verify_conv2d_nchw(1, 1024, 19, 84, 3, 1, 1)\n verify_conv2d_nchw(1, 2048, 10, 126, 3, 1, 1)\n verify_conv2d_nchw(1, 512, 5, 126, 3, 1, 1)\n verify_conv2d_nchw(1, 256, 3, 126, 3, 1, 1)\n\n # Asymmetric padding\n verify_conv2d_nchw(1, 3, 35, 64, 7, 2, (0, 0, 1, 1))\n verify_conv2d_nchw(1, 64, 8, 128, 3, 1, (3, 3, 2, 2))\n verify_conv2d_nchw(1, 64, 8, 64, 1, 1, (1, 2, 2, 1))\n verify_conv2d_nchw(1, 64, 17, 192, 1, 1, (1, 2))\n verify_conv2d_nchw(1, 64, 8, 64, 3, 1, (3, 1))\n verify_conv2d_nchw(1, 128, 8, 384, 3, 1, (0, 2))\n verify_conv2d_nchw(1, 64, 35, 64, 3, 1, (1, 2), use_cudnn=True)\n verify_conv2d_nchw(1, 64, 8, 64, 1, 1, \"VALID\")\n verify_conv2d_nchw(1, 388, 8, 64, 3, 1, \"VALID\")\n verify_conv2d_nchw(1, 64, 10, 48, 3, 1, \"VALID\", use_cudnn=True)\n verify_conv2d_nchw(1, 512, 19, 64, 1, 1, \"SAME\")\n verify_conv2d_nchw(1, 64, 5, 32, 2, 1, \"SAME\")\n verify_conv2d_nchw(1, 64, 8, 64, 3, 1, \"SAME\", use_cudnn=True)\n verify_conv2d_nchw(1, 64, 8, 64, 3, 1, (1, 2, 2, 1), add_relu=True)\n verify_conv2d_nchw(1, 64, 8, 64, 5, 2, (1, 3), add_bias=True)\n verify_conv2d_nchw(1, 64, 8, 64, 3, 1, \"VALID\", add_bias=True, add_relu=True)\n verify_conv2d_nchw(1, 64, 8, 64, 24, 1, \"SAME\", add_bias=True, add_relu=True)\n\n\nif __name__ == \"__main__\":\n test_conv2d_nchw()\n"
] | [
[
"numpy.take",
"numpy.linspace",
"numpy.asarray",
"numpy.squeeze",
"numpy.random.random_sample",
"numpy.random.randn",
"numpy.where",
"numpy.random.randint",
"numpy.clip",
"numpy.reshape",
"numpy.arange",
"numpy.stack",
"numpy.full",
"numpy.copy",
"numpy.repeat",
"numpy.unravel_index",
"numpy.full_like",
"numpy.random.rand",
"numpy.transpose",
"numpy.meshgrid",
"numpy.flip",
"numpy.array",
"numpy.tanh",
"numpy.logaddexp",
"numpy.tile",
"numpy.indices",
"numpy.ones",
"numpy.random.uniform",
"numpy.random.normal",
"numpy.ndindex"
],
[
"numpy.random.uniform",
"numpy.maximum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jhsmit/awesome-panel | [
"53f7754f7c505a2666f6724df26c851ae942ec40"
] | [
"application/pages/training_analysis/services/fit_file_services.py"
] | [
"\"\"\"In this module we provide services for working with fit files.\r\n\r\nResources\r\n\r\n- fitparse package: [GitHub](https://github.com/dtcooper/python-fitparse) and \\\r\n [Docs](http://dtcooper.github.io/python-fitparse/)\r\n- fitdecode pacakge: [GitHub](https://github.com/polyvertex/fitdecode) and \\\r\n [Read the Docs](https://fitdecode.readthedocs.io/en/latest/)\r\n- [FIT on Wikipedia](https://wiki.openstreetmap.org/wiki/FIT)\r\n- [Download FIT SDK](https://www.thisisant.com/resources/fit).\r\n\"\"\"\r\n\r\nfrom typing import Union\r\n\r\nimport fitparse\r\nimport pandas as pd\r\n\r\nUNIT_CONVERSION = {\r\n \"speed\": {\"from\": \"10*6m/s\", \"to\": \"km/h\", \"factor\": 0.0036,},\r\n \"enhanced_speed\": {\"from\": \"10*6m/s\", \"to\": \"km/h\", \"factor\": 3.6,},\r\n \"altitude\": {\"from\": \"unknown\", \"to\": \"m\", \"factor\": 0.03855343881175331,},\r\n \"position_long\": {\"from\": \"semicircles\", \"to\": \"degrees\", \"factor\": (180.0 / 2 ** 31),},\r\n \"position_lat\": {\"from\": \"semicircles\", \"to\": \"degrees\", \"factor\": (180.0 / 2 ** 31),},\r\n}\r\n\r\n\r\ndef parse_fit_file(file: Union[fitparse.base.FitFile, bytes, str,]) -> pd.DataFrame:\r\n \"\"\"Converts a fit_file to a dataframe\r\n\r\n Args:\r\n file (Union[fitparse.base.FitFile, bytes, str]): The fit file to parse\r\n\r\n Raises:\r\n ValueError: If the file is not in a supported format\r\n\r\n Returns:\r\n pd.DataFrame: A DataFrame with the data\r\n \"\"\"\r\n if isinstance(file, (bytes, str,),):\r\n fit_file = fitparse.FitFile(file)\r\n elif isinstance(file, fitparse.base.FitFile,):\r\n fit_file = file\r\n else:\r\n raise ValueError(f\"{type(file)} is not supported!\")\r\n\r\n return _parse_records(fit_file.get_messages(\"record\"))\r\n\r\n\r\ndef _parse_records(records,):\r\n data = [record.get_values() for record in records]\r\n training_data = pd.DataFrame(data)\r\n _convert_units(training_data)\r\n return training_data\r\n\r\n\r\ndef _convert_units(training_data_row: pd.DataFrame,):\r\n columns = set(UNIT_CONVERSION.keys()).intersection(set(training_data_row.columns))\r\n for column in columns:\r\n training_data_row[column] *= UNIT_CONVERSION[column][\"factor\"]\r\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rainwoodman/tensorflow | [
"9b7ff60faa841f0473facf618cb5b66b9cb99b5e",
"9b7ff60faa841f0473facf618cb5b66b9cb99b5e",
"9b7ff60faa841f0473facf618cb5b66b9cb99b5e",
"9b7ff60faa841f0473facf618cb5b66b9cb99b5e",
"9b7ff60faa841f0473facf618cb5b66b9cb99b5e"
] | [
"tensorflow/python/kernel_tests/sparse_xent_op_test.py",
"tensorflow/lite/python/interpreter.py",
"tensorflow/python/keras/feature_column/sequence_feature_column.py",
"tensorflow/python/keras/engine/base_layer_utils_test.py",
"tensorflow/python/ops/stateful_random_ops_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for SparseSoftmaxCrossEntropyWithLogits op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport time\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import backprop as backprop_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import sparse_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import test\n\n\nclass SparseXentTest(test.TestCase):\n\n def _npXent(self, features, labels):\n features = np.reshape(features, [-1, features.shape[-1]])\n labels = np.reshape(labels, [-1])\n batch_dim = 0\n class_dim = 1\n batch_size = features.shape[batch_dim]\n e = np.exp(features - np.reshape(\n np.amax(\n features, axis=class_dim), [batch_size, 1]))\n probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])\n labels_mat = np.zeros_like(probs).astype(probs.dtype)\n labels_mat[np.arange(batch_size), labels] = 1.0\n bp = (probs - labels_mat)\n l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)\n return l, bp\n\n def _testXent(self, np_features, np_labels):\n np_loss, np_backprop = self._npXent(np_features, np_labels)\n with self.cached_session():\n loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(\n np_features, np_labels)\n tf_loss, tf_backprop = self.evaluate([loss, backprop])\n self.assertAllCloseAccordingToType(np_loss, tf_loss)\n self.assertAllCloseAccordingToType(np_backprop, tf_backprop)\n\n def testSingleClass(self):\n for label_dtype in np.int32, np.int64:\n with self.cached_session():\n loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(\n np.array([[1.], [-1.], [0.]]).astype(np.float32),\n np.array([0, 0, 0]).astype(label_dtype))\n tf_loss, tf_backprop = self.evaluate([loss, backprop])\n self.assertAllClose([0.0, 0.0, 0.0], tf_loss)\n self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)\n\n @test_util.run_gpu_only()\n def testInvalidLabelGPU(self):\n features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],\n [1., 2., 3., 4.]]\n labels = [4, 3, 0, -1]\n loss, backprop = self.evaluate(\n gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))\n self.assertAllClose([[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],\n [-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],\n backprop,\n rtol=1e-3,\n atol=1e-3)\n self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],\n loss,\n rtol=1e-3,\n atol=1e-3)\n\n @test_util.run_in_graph_and_eager_modes(use_gpu=False)\n @test_util.disable_xla(\"XLA cannot assert inside of a kernel.\")\n def testInvalidLabelCPU(self):\n features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],\n [1., 2., 3., 4.]]\n labels = [4, 3, 0, -1]\n with self.assertRaisesRegex(\n (errors_impl.InvalidArgumentError, errors_impl.UnknownError),\n \"Received a label value of\"):\n self.evaluate(\n gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))\n\n def testNpXent(self):\n # We create 2 batches of logits for testing.\n # batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.\n # batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.\n features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]\n labels = [3, 0]\n\n # For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25\n # With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]\n # The loss for this batch is -log(0.25) = 1.386\n #\n # For batch 1, we have:\n # exp(0) = 1\n # exp(1) = 2.718\n # exp(2) = 7.389\n # exp(3) = 20.085\n # SUM = 31.192\n # So we have as probabilities:\n # exp(0) / SUM = 0.032\n # exp(1) / SUM = 0.087\n # exp(2) / SUM = 0.237\n # exp(3) / SUM = 0.644\n # With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]\n # The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]\n # = [1.3862, 3.4420]\n np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))\n self.assertAllClose(\n np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),\n np_backprop,\n rtol=1.e-3,\n atol=1.e-3)\n self.assertAllClose(\n np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)\n\n def testShapeMismatch(self):\n with self.session():\n with self.assertRaisesRegex(ValueError, \".*Rank mismatch:*\"):\n nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])\n\n def testScalar(self):\n with self.session():\n with self.assertRaisesRegex(ValueError, \".*Logits cannot be scalars*\"):\n nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=constant_op.constant(0), logits=constant_op.constant(1.0))\n\n def testLabelsPlaceholderScalar(self):\n with ops_lib.Graph().as_default(), self.session():\n labels = array_ops.placeholder(np.int32)\n y = nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=[[7.]])\n with self.assertRaisesOpError(\"labels must be 1-D\"):\n y.eval(feed_dict={labels: 0})\n\n def testVector(self):\n with self.session():\n loss = nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=constant_op.constant(0), logits=constant_op.constant([1.0]))\n self.assertAllClose(0.0, self.evaluate(loss))\n\n def testFloat(self):\n for label_dtype in np.int32, np.int64:\n self._testXent(\n np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),\n np.array([3, 0]).astype(label_dtype))\n\n def testDouble(self):\n for label_dtype in np.int32, np.int64:\n self._testXent(\n np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),\n np.array([0, 3]).astype(label_dtype))\n\n def testHalf(self):\n for label_dtype in np.int32, np.int64:\n self._testXent(\n np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),\n np.array([3, 0]).astype(label_dtype))\n\n def testEmpty(self):\n self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))\n\n @test_util.run_in_graph_and_eager_modes(use_gpu=True)\n def testGradient(self):\n with self.session() as sess:\n l = constant_op.constant([3, 0, 1], name=\"l\")\n f = constant_op.constant(\n [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],\n shape=[3, 4],\n dtype=dtypes.float64,\n name=\"f\")\n\n def xent(f):\n # gradient_checker_v2.computee_gradient doesn't take int32/int64.\n # labels must be of type int32/int64, so passing them separately here.\n return nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=l, logits=f, name=\"xent\")\n\n theoretical, numerical = gradient_checker_v2.compute_gradient(xent, [f])\n\n if not context.executing_eagerly():\n # Check that no extra computation performed. When only first derivative\n # is requested, second derivative must not be computed. So when there is\n # no second derivative, there is no `BatchMatMul` op in the graph.\n op_names = [\n op.op_def.name for op in sess.graph.get_operations() if op.op_def\n ]\n self.assertNotIn(\"BatchMatMul\", op_names)\n self.assertNotIn(\"BatchMatMulV2\", op_names)\n\n tol = 5e-8\n self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)\n\n def testSecondGradient(self):\n with self.session() as sess:\n l = constant_op.constant([3, 0, 1], name=\"l\")\n f = constant_op.constant(\n [0.3, 0.4, 0.1, 1.2, 0.1, 1.9, 0.1, 0.7, 0.8, 0.2, 1.3, 1.3],\n shape=[3, 4],\n dtype=dtypes.float64,\n name=\"f\")\n\n def xent_grad(f):\n if not context.executing_eagerly():\n return gradients_impl.gradients(\n nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=l, logits=f, name=\"xent\"), [f])[0]\n with backprop_lib.GradientTape() as tape:\n tape.watch(f)\n return tape.gradient(\n nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=l, logits=f, name=\"xent\"), [f])[0]\n\n theoretical, numerical = gradient_checker_v2.compute_gradient(\n xent_grad, [f])\n\n if not context.executing_eagerly():\n # Check that second derivative is calculated.\n # (it is equivalent to being `BatchMatMul` op in the graph because of\n # implementation of xentropy grad)\n op_names = [\n op.op_def.name for op in sess.graph.get_operations() if op.op_def\n ]\n self.assertIn(\"BatchMatMulV2\", op_names)\n\n tol = 5e-8\n self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)\n\n @test_util.run_in_graph_and_eager_modes(use_gpu=True)\n def _testHighDim(self, features, labels):\n np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))\n # manually reshape loss\n np_loss = np.reshape(np_loss, np.array(labels).shape)\n tf_loss = nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=features)\n if not context.executing_eagerly():\n tf_backprop = tf_loss.op.inputs[0].op.outputs[1]\n else:\n with backprop_lib.GradientTape() as tape:\n features = constant_op.constant(features)\n tape.watch(features)\n tf_backprop = tape.gradient(\n nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=features), [features])[0]\n tf_backprop = array_ops.reshape(tf_backprop, np_backprop.shape)\n\n self.assertAllCloseAccordingToType(np_loss, tf_loss)\n self.assertAllCloseAccordingToType(np_backprop, tf_backprop)\n\n def testHighDim(self):\n features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]\n labels = [[3], [0]]\n self._testHighDim(features, labels)\n\n def testHighDim2(self):\n features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],\n [[1., 2., 3., 4.], [5., 6., 7., 8.]]]\n labels = [[3, 2], [0, 3]]\n self._testHighDim(features, labels)\n\n def testScalarHandling(self):\n with ops_lib.Graph().as_default(), self.session(use_gpu=False) as sess:\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \".*labels must be 1-D.*\"):\n labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])\n logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])\n ce = nn_ops.sparse_softmax_cross_entropy_with_logits(\n labels=array_ops.squeeze(labels), logits=logits)\n labels_v2 = np.zeros((1, 1), dtype=np.int32)\n logits_v2 = np.random.randn(1, 3)\n sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})\n\n\ndef _sparse_vs_dense_xent_benchmark_dense(labels, logits):\n labels = array_ops.identity(labels)\n logits = array_ops.identity(logits)\n with ops_lib.device(\"/cpu:0\"): # Sparse-to-dense must be on CPU\n batch_size = array_ops.shape(logits)[0]\n num_entries = array_ops.shape(logits)[1]\n length = batch_size * num_entries\n labels += num_entries * math_ops.range(batch_size)\n target = sparse_ops.sparse_to_dense(labels,\n array_ops.stack([length]), 1.0, 0.0)\n target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))\n crossent = nn_ops.softmax_cross_entropy_with_logits(\n labels=target, logits=logits, name=\"SequenceLoss/CrossEntropy\")\n crossent_sum = math_ops.reduce_sum(crossent)\n grads = gradients_impl.gradients([crossent_sum], [logits])[0]\n\n return (crossent_sum, grads)\n\n\ndef _sparse_vs_dense_xent_benchmark_sparse(labels, logits):\n # Using sparse_softmax_cross_entropy_with_logits\n labels = labels.astype(np.int64)\n labels = array_ops.identity(labels)\n logits = array_ops.identity(logits)\n crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(\n logits, labels, name=\"SequenceLoss/CrossEntropy\")\n crossent_sum = math_ops.reduce_sum(crossent)\n grads = gradients_impl.gradients([crossent_sum], [logits])[0]\n\n return (crossent_sum, grads)\n\n\ndef sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):\n config = config_pb2.ConfigProto()\n config.allow_soft_placement = True\n config.gpu_options.per_process_gpu_memory_fraction = 0.3\n labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)\n logits = np.random.randn(batch_size, num_entries).astype(np.float32)\n\n def _timer(sess, ops):\n # Warm in\n for _ in range(20):\n sess.run(ops)\n\n # Timing run\n start = time.time()\n for _ in range(20):\n sess.run(ops)\n end = time.time()\n\n return (end - start) / 20.0 # Average runtime per iteration\n\n # Using sparse_to_dense and softmax_cross_entropy_with_logits\n with session.Session(config=config) as sess:\n if not use_gpu:\n with ops_lib.device(\"/cpu:0\"):\n ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)\n else:\n ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)\n delta_dense = _timer(sess, ops)\n\n # Using sparse_softmax_cross_entropy_with_logits\n with session.Session(config=config) as sess:\n if not use_gpu:\n with test_util.device(\"/cpu:0\"):\n ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)\n else:\n ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)\n delta_sparse = _timer(sess, ops)\n\n print(\"%d \\t %d \\t %s \\t %f \\t %f \\t %f\" % (batch_size, num_entries, use_gpu,\n delta_dense, delta_sparse,\n delta_sparse / delta_dense))\n\n\ndef main(_):\n print(\"Sparse Xent vs. SparseToDense + Xent\")\n print(\"batch \\t depth \\t gpu \\t dt(dense) \\t dt(sparse) \"\n \"\\t dt(sparse)/dt(dense)\")\n for use_gpu in (False, True):\n for batch_size in (32, 64, 128):\n for num_entries in (100, 1000, 10000):\n sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)\n sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)\n sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)\n\n\nif __name__ == \"__main__\":\n if \"--benchmarks\" in sys.argv:\n sys.argv.remove(\"--benchmarks\")\n app.run()\n else:\n test.main()\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python TF-Lite interpreter.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ctypes\nimport platform\nimport sys\nimport os\n\nimport numpy as np\n\n# pylint: disable=g-import-not-at-top\nif not os.path.splitext(__file__)[0].endswith(\n os.path.join('tflite_runtime', 'interpreter')):\n # This file is part of tensorflow package.\n from tensorflow.lite.python.interpreter_wrapper import _pywrap_tensorflow_interpreter_wrapper as _interpreter_wrapper\n from tensorflow.python.util.tf_export import tf_export as _tf_export\nelse:\n # This file is part of tflite_runtime package.\n from tflite_runtime import _pywrap_tensorflow_interpreter_wrapper as _interpreter_wrapper\n\n def _tf_export(*x, **kwargs):\n del x, kwargs\n return lambda x: x\n\n\nclass Delegate(object):\n \"\"\"Python wrapper class to manage TfLiteDelegate objects.\n\n The shared library is expected to have two functions:\n TfLiteDelegate* tflite_plugin_create_delegate(\n char**, char**, size_t, void (*report_error)(const char *))\n void tflite_plugin_destroy_delegate(TfLiteDelegate*)\n\n The first one creates a delegate object. It may return NULL to indicate an\n error (with a suitable error message reported by calling report_error()).\n The second one destroys delegate object and must be called for every\n created delegate object. Passing NULL as argument value is allowed, i.e.\n\n tflite_plugin_destroy_delegate(tflite_plugin_create_delegate(...))\n\n always works.\n \"\"\"\n\n def __init__(self, library, options=None):\n \"\"\"Loads delegate from the shared library.\n\n Args:\n library: Shared library name.\n options: Dictionary of options that are required to load the delegate. All\n keys and values in the dictionary should be serializable. Consult the\n documentation of the specific delegate for required and legal options.\n (default None)\n\n Raises:\n RuntimeError: This is raised if the Python implementation is not CPython.\n \"\"\"\n\n # TODO(b/136468453): Remove need for __del__ ordering needs of CPython\n # by using explicit closes(). See implementation of Interpreter __del__.\n if platform.python_implementation() != 'CPython':\n raise RuntimeError('Delegates are currently only supported into CPython'\n 'due to missing immediate reference counting.')\n\n self._library = ctypes.pydll.LoadLibrary(library)\n self._library.tflite_plugin_create_delegate.argtypes = [\n ctypes.POINTER(ctypes.c_char_p),\n ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,\n ctypes.CFUNCTYPE(None, ctypes.c_char_p)\n ]\n self._library.tflite_plugin_create_delegate.restype = ctypes.c_void_p\n\n # Convert the options from a dictionary to lists of char pointers.\n options = options or {}\n options_keys = (ctypes.c_char_p * len(options))()\n options_values = (ctypes.c_char_p * len(options))()\n for idx, (key, value) in enumerate(options.items()):\n options_keys[idx] = str(key).encode('utf-8')\n options_values[idx] = str(value).encode('utf-8')\n\n class ErrorMessageCapture(object):\n\n def __init__(self):\n self.message = ''\n\n def report(self, x):\n self.message += x if isinstance(x, str) else x.decode('utf-8')\n\n capture = ErrorMessageCapture()\n error_capturer_cb = ctypes.CFUNCTYPE(None, ctypes.c_char_p)(capture.report)\n # Do not make a copy of _delegate_ptr. It is freed by Delegate's finalizer.\n self._delegate_ptr = self._library.tflite_plugin_create_delegate(\n options_keys, options_values, len(options), error_capturer_cb)\n if self._delegate_ptr is None:\n raise ValueError(capture.message)\n\n def __del__(self):\n # __del__ can not be called multiple times, so if the delegate is destroyed.\n # don't try to destroy it twice.\n if self._library is not None:\n self._library.tflite_plugin_destroy_delegate.argtypes = [ctypes.c_void_p]\n self._library.tflite_plugin_destroy_delegate(self._delegate_ptr)\n self._library = None\n\n def _get_native_delegate_pointer(self):\n \"\"\"Returns the native TfLiteDelegate pointer.\n\n It is not safe to copy this pointer because it needs to be freed.\n\n Returns:\n TfLiteDelegate *\n \"\"\"\n return self._delegate_ptr\n\n\n@_tf_export('lite.experimental.load_delegate')\ndef load_delegate(library, options=None):\n \"\"\"Returns loaded Delegate object.\n\n Args:\n library: Name of shared library containing the\n [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates).\n options: Dictionary of options that are required to load the delegate. All\n keys and values in the dictionary should be convertible to str. Consult\n the documentation of the specific delegate for required and legal options.\n (default None)\n\n Returns:\n Delegate object.\n\n Raises:\n ValueError: Delegate failed to load.\n RuntimeError: If delegate loading is used on unsupported platform.\n \"\"\"\n try:\n delegate = Delegate(library, options)\n except ValueError as e:\n raise ValueError('Failed to load delegate from {}\\n{}'.format(\n library, str(e)))\n return delegate\n\n\nclass SignatureRunner(object):\n \"\"\"SignatureRunner class for running TFLite models using SignatureDef.\n\n This class should be instantiated through TFLite Interpreter only using\n get_signature_runner method on Interpreter.\n Example,\n signature = interpreter.get_signature_runner(\"my_signature\")\n result = signature(input_1=my_input_1, input_2=my_input_2)\n print(result[\"my_output\"])\n print(result[\"my_second_output\"])\n All names used are this specific SignatureDef names.\n\n Notes:\n No other function on this object or on the interpreter provided should be\n called while this object call has not finished.\n \"\"\"\n\n def __init__(self, interpreter=None, signature_def_name=None):\n \"\"\"Constructor.\n\n Args:\n interpreter: Interpreter object that is already initialized with the\n requested model.\n signature_def_name: SignatureDef names to be used.\n \"\"\"\n if not interpreter:\n raise ValueError('None interpreter provided.')\n if not signature_def_name:\n raise ValueError('None signature_def_name provided.')\n self._interpreter = interpreter\n self._signature_def_name = signature_def_name\n signature_defs = interpreter._get_full_signature_list()\n if signature_def_name not in signature_defs:\n raise ValueError('Invalid signature_def_name provided.')\n self._signature_def = signature_defs[signature_def_name]\n self._outputs = self._signature_def['outputs'].items()\n self._inputs = self._signature_def['inputs']\n\n def __call__(self, **kwargs):\n \"\"\"Runs the SignatureDef given the provided inputs in arguments.\n\n Args:\n **kwargs: key,value for inputs to the model. Key is the SignatureDef input\n name. Value is numpy array with the value.\n\n Returns:\n dictionary of the results from the model invoke.\n Key in the dictionary is SignatureDef output name.\n Value is the result Tensor.\n \"\"\"\n\n if len(kwargs) != len(self._inputs):\n raise ValueError(\n 'Invalid number of inputs provided for running a SignatureDef, '\n 'expected %s vs provided %s' % (len(kwargs), len(self._inputs)))\n # Resize input tensors\n for input_name, value in kwargs.items():\n if input_name not in self._inputs:\n raise ValueError('Invalid Input name (%s) for SignatureDef' %\n input_name)\n self._interpreter.resize_tensor_input(self._inputs[input_name],\n value.shape)\n # Allocate tensors.\n self._interpreter.allocate_tensors()\n # Set the input values.\n for input_name, value in kwargs.items():\n self._interpreter._set_input_tensor(\n input_name, value=value, method_name=self._signature_def_name)\n self._interpreter.invoke()\n result = {}\n for output_name, output_index in self._outputs:\n result[output_name] = self._interpreter.get_tensor(output_index)\n return result\n\n\n@_tf_export('lite.Interpreter')\nclass Interpreter(object):\n \"\"\"Interpreter interface for TensorFlow Lite Models.\n\n This makes the TensorFlow Lite interpreter accessible in Python.\n It is possible to use this interpreter in a multithreaded Python environment,\n but you must be sure to call functions of a particular instance from only\n one thread at a time. So if you want to have 4 threads running different\n inferences simultaneously, create an interpreter for each one as thread-local\n data. Similarly, if you are calling invoke() in one thread on a single\n interpreter but you want to use tensor() on another thread once it is done,\n you must use a synchronization primitive between the threads to ensure invoke\n has returned before calling tensor().\n \"\"\"\n\n def __init__(self,\n model_path=None,\n model_content=None,\n experimental_delegates=None,\n num_threads=None):\n \"\"\"Constructor.\n\n Args:\n model_path: Path to TF-Lite Flatbuffer file.\n model_content: Content of model.\n experimental_delegates: Experimental. Subject to change. List of\n [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)\n objects returned by lite.load_delegate().\n num_threads: Sets the number of threads used by the interpreter and\n available to CPU kernels. If not set, the interpreter will use an\n implementation-dependent default number of threads. Currently, only a\n subset of kernels, such as conv, support multi-threading.\n\n Raises:\n ValueError: If the interpreter was unable to create.\n \"\"\"\n if not hasattr(self, '_custom_op_registerers'):\n self._custom_op_registerers = []\n if model_path and not model_content:\n custom_op_registerers_by_name = [\n x for x in self._custom_op_registerers if isinstance(x, str)\n ]\n custom_op_registerers_by_func = [\n x for x in self._custom_op_registerers if not isinstance(x, str)\n ]\n self._interpreter = (\n _interpreter_wrapper.CreateWrapperFromFile(\n model_path, custom_op_registerers_by_name,\n custom_op_registerers_by_func))\n if not self._interpreter:\n raise ValueError('Failed to open {}'.format(model_path))\n elif model_content and not model_path:\n custom_op_registerers_by_name = [\n x for x in self._custom_op_registerers if isinstance(x, str)\n ]\n custom_op_registerers_by_func = [\n x for x in self._custom_op_registerers if not isinstance(x, str)\n ]\n # Take a reference, so the pointer remains valid.\n # Since python strings are immutable then PyString_XX functions\n # will always return the same pointer.\n self._model_content = model_content\n self._interpreter = (\n _interpreter_wrapper.CreateWrapperFromBuffer(\n model_content, custom_op_registerers_by_name,\n custom_op_registerers_by_func))\n elif not model_content and not model_path:\n raise ValueError('`model_path` or `model_content` must be specified.')\n else:\n raise ValueError('Can\\'t both provide `model_path` and `model_content`')\n\n if num_threads is not None:\n if not isinstance(num_threads, int):\n raise ValueError('type of num_threads should be int')\n if num_threads < 1:\n raise ValueError('num_threads should >= 1')\n self._interpreter.SetNumThreads(num_threads)\n\n # Each delegate is a wrapper that owns the delegates that have been loaded\n # as plugins. The interpreter wrapper will be using them, but we need to\n # hold them in a list so that the lifetime is preserved at least as long as\n # the interpreter wrapper.\n self._delegates = []\n if experimental_delegates:\n self._delegates = experimental_delegates\n for delegate in self._delegates:\n self._interpreter.ModifyGraphWithDelegate(\n delegate._get_native_delegate_pointer()) # pylint: disable=protected-access\n self._signature_defs = self.get_signature_list()\n\n def __del__(self):\n # Must make sure the interpreter is destroyed before things that\n # are used by it like the delegates. NOTE this only works on CPython\n # probably.\n # TODO(b/136468453): Remove need for __del__ ordering needs of CPython\n # by using explicit closes(). See implementation of Interpreter __del__.\n self._interpreter = None\n self._delegates = None\n\n def allocate_tensors(self):\n self._ensure_safe()\n return self._interpreter.AllocateTensors()\n\n def _safe_to_run(self):\n \"\"\"Returns true if there exist no numpy array buffers.\n\n This means it is safe to run tflite calls that may destroy internally\n allocated memory. This works, because in the wrapper.cc we have made\n the numpy base be the self._interpreter.\n \"\"\"\n # NOTE, our tensor() call in cpp will use _interpreter as a base pointer.\n # If this environment is the only _interpreter, then the ref count should be\n # 2 (1 in self and 1 in temporary of sys.getrefcount).\n return sys.getrefcount(self._interpreter) == 2\n\n def _ensure_safe(self):\n \"\"\"Makes sure no numpy arrays pointing to internal buffers are active.\n\n This should be called from any function that will call a function on\n _interpreter that may reallocate memory e.g. invoke(), ...\n\n Raises:\n RuntimeError: If there exist numpy objects pointing to internal memory\n then we throw.\n \"\"\"\n if not self._safe_to_run():\n raise RuntimeError(\"\"\"There is at least 1 reference to internal data\n in the interpreter in the form of a numpy array or slice. Be sure to\n only hold the function returned from tensor() if you are using raw\n data access.\"\"\")\n\n # Experimental and subject to change\n def _get_op_details(self, op_index):\n \"\"\"Gets a dictionary with arrays of ids for tensors involved with an op.\n\n Args:\n op_index: Operation/node index of node to query.\n\n Returns:\n a dictionary containing the index, op name, and arrays with lists of the\n indices for the inputs and outputs of the op/node.\n \"\"\"\n op_index = int(op_index)\n op_name = self._interpreter.NodeName(op_index)\n op_inputs = self._interpreter.NodeInputs(op_index)\n op_outputs = self._interpreter.NodeOutputs(op_index)\n\n details = {\n 'index': op_index,\n 'op_name': op_name,\n 'inputs': op_inputs,\n 'outputs': op_outputs,\n }\n\n return details\n\n def _get_tensor_details(self, tensor_index):\n \"\"\"Gets tensor details.\n\n Args:\n tensor_index: Tensor index of tensor to query.\n\n Returns:\n A dictionary containing the following fields of the tensor:\n 'name': The tensor name.\n 'index': The tensor index in the interpreter.\n 'shape': The shape of the tensor.\n 'quantization': Deprecated, use 'quantization_parameters'. This field\n only works for per-tensor quantization, whereas\n 'quantization_parameters' works in all cases.\n 'quantization_parameters': The parameters used to quantize the tensor:\n 'scales': List of scales (one if per-tensor quantization)\n 'zero_points': List of zero_points (one if per-tensor quantization)\n 'quantized_dimension': Specifies the dimension of per-axis\n quantization, in the case of multiple scales/zero_points.\n\n Raises:\n ValueError: If tensor_index is invalid.\n \"\"\"\n tensor_index = int(tensor_index)\n tensor_name = self._interpreter.TensorName(tensor_index)\n tensor_size = self._interpreter.TensorSize(tensor_index)\n tensor_size_signature = self._interpreter.TensorSizeSignature(tensor_index)\n tensor_type = self._interpreter.TensorType(tensor_index)\n tensor_quantization = self._interpreter.TensorQuantization(tensor_index)\n tensor_quantization_params = self._interpreter.TensorQuantizationParameters(\n tensor_index)\n tensor_sparsity_params = self._interpreter.TensorSparsityParameters(\n tensor_index)\n\n if not tensor_type:\n raise ValueError('Could not get tensor details')\n\n details = {\n 'name': tensor_name,\n 'index': tensor_index,\n 'shape': tensor_size,\n 'shape_signature': tensor_size_signature,\n 'dtype': tensor_type,\n 'quantization': tensor_quantization,\n 'quantization_parameters': {\n 'scales': tensor_quantization_params[0],\n 'zero_points': tensor_quantization_params[1],\n 'quantized_dimension': tensor_quantization_params[2],\n },\n 'sparsity_parameters': tensor_sparsity_params\n }\n\n return details\n\n # Experimental and subject to change\n def _get_ops_details(self):\n \"\"\"Gets op details for every node.\n\n Returns:\n A list of dictionaries containing arrays with lists of tensor ids for\n tensors involved in the op.\n \"\"\"\n return [\n self._get_op_details(idx) for idx in range(self._interpreter.NumNodes())\n ]\n\n def get_tensor_details(self):\n \"\"\"Gets tensor details for every tensor with valid tensor details.\n\n Tensors where required information about the tensor is not found are not\n added to the list. This includes temporary tensors without a name.\n\n Returns:\n A list of dictionaries containing tensor information.\n \"\"\"\n tensor_details = []\n for idx in range(self._interpreter.NumTensors()):\n try:\n tensor_details.append(self._get_tensor_details(idx))\n except ValueError:\n pass\n return tensor_details\n\n def get_input_details(self):\n \"\"\"Gets model input details.\n\n Returns:\n A list of input details.\n \"\"\"\n return [\n self._get_tensor_details(i) for i in self._interpreter.InputIndices()\n ]\n\n def set_tensor(self, tensor_index, value):\n \"\"\"Sets the value of the input tensor.\n\n Note this copies data in `value`.\n\n If you want to avoid copying, you can use the `tensor()` function to get a\n numpy buffer pointing to the input buffer in the tflite interpreter.\n\n Args:\n tensor_index: Tensor index of tensor to set. This value can be gotten from\n the 'index' field in get_input_details.\n value: Value of tensor to set.\n\n Raises:\n ValueError: If the interpreter could not set the tensor.\n \"\"\"\n self._interpreter.SetTensor(tensor_index, value)\n\n def resize_tensor_input(self, input_index, tensor_size, strict=False):\n \"\"\"Resizes an input tensor.\n\n Args:\n input_index: Tensor index of input to set. This value can be gotten from\n the 'index' field in get_input_details.\n tensor_size: The tensor_shape to resize the input to.\n strict: Only unknown dimensions can be resized when `strict` is True.\n Unknown dimensions are indicated as `-1` in the `shape_signature`\n attribute of a given tensor. (default False)\n\n Raises:\n ValueError: If the interpreter could not resize the input tensor.\n\n Usage:\n ```\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.resize_tensor_input(0, [num_test_images, 224, 224, 3])\n interpreter.allocate_tensors()\n interpreter.set_tensor(0, test_images)\n interpreter.invoke()\n ```\n \"\"\"\n self._ensure_safe()\n # `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size\n # parameter.\n tensor_size = np.array(tensor_size, dtype=np.int32)\n self._interpreter.ResizeInputTensor(input_index, tensor_size, strict)\n\n def get_output_details(self):\n \"\"\"Gets model output details.\n\n Returns:\n A list of output details.\n \"\"\"\n return [\n self._get_tensor_details(i) for i in self._interpreter.OutputIndices()\n ]\n\n def get_signature_list(self):\n \"\"\"Gets list of SignatureDefs in the model.\n\n Example,\n ```\n signatures = interpreter.get_signature_list()\n print(signatures)\n\n # {\n # 'add': {'inputs': ['x', 'y'], 'outputs': ['output_0']}\n # }\n\n Then using the names in the signature list you can get a callable from\n get_signature_runner().\n ```\n\n Returns:\n A list of SignatureDef details in a dictionary structure.\n It is keyed on the SignatureDef method name, and the value holds\n dictionary of inputs and outputs.\n \"\"\"\n full_signature_defs = self._interpreter.GetSignatureDefs()\n for _, signature_def in full_signature_defs.items():\n signature_def['inputs'] = list(signature_def['inputs'].keys())\n signature_def['outputs'] = list(signature_def['outputs'].keys())\n return full_signature_defs\n\n def _get_full_signature_list(self):\n \"\"\"Gets list of SignatureDefs in the model.\n\n Example,\n ```\n signatures = interpreter._get_full_signature_list()\n print(signatures)\n\n # {\n # 'add': {'inputs': {'x': 1, 'y': 0}, 'outputs': {'output_0': 4}}\n # }\n\n Then using the names in the signature list you can get a callable from\n get_signature_runner().\n ```\n\n Returns:\n A list of SignatureDef details in a dictionary structure.\n It is keyed on the SignatureDef method name, and the value holds\n dictionary of inputs and outputs.\n \"\"\"\n return self._interpreter.GetSignatureDefs()\n\n def _set_input_tensor(self, input_name, value, method_name=None):\n \"\"\"Sets the value of the input tensor.\n\n Input tensor is identified by `input_name` in the SignatureDef identified\n by `method_name`.\n If the model has a single SignatureDef then you can pass None as\n `method_name`.\n\n Note this copies data in `value`.\n\n Example,\n ```\n input_data = np.array([1.2, 1.4], np.float32)\n signatures = interpreter.get_signature_list()\n print(signatures)\n # {\n # 'add': {'inputs': {'x': 1, 'y': 0}, 'outputs': {'output_0': 4}}\n # }\n interpreter._set_input_tensor(input_name='x', value=input_data,\n method_name='add_fn')\n ```\n\n Args:\n input_name: Name of the output tensor in the SignatureDef.\n value: Value of tensor to set as a numpy array.\n method_name: The exported method name for the SignatureDef, it can be None\n if and only if the model has a single SignatureDef. Default value is\n None.\n\n Raises:\n ValueError: If the interpreter could not set the tensor. Or\n if `method_name` is None and model doesn't have a single\n Signature.\n \"\"\"\n if method_name is None:\n if len(self._signature_defs) != 1:\n raise ValueError(\n 'SignatureDef method_name is None and model has {0} Signatures. '\n 'None is only allowed when the model has 1 SignatureDef'.format(\n len(self._signature_defs)))\n else:\n method_name = next(iter(self._signature_defs))\n self._interpreter.SetInputTensorFromSignatureDefName(\n input_name, method_name, value)\n\n def get_signature_runner(self, method_name=None):\n \"\"\"Gets callable for inference of specific SignatureDef.\n\n Example usage,\n ```\n interpreter = tf.lite.Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n fn = interpreter.get_signature_runner('div_with_remainder')\n output = fn(x=np.array([3]), y=np.array([2]))\n print(output)\n # {\n # 'quotient': array([1.], dtype=float32)\n # 'remainder': array([1.], dtype=float32)\n # }\n ```\n\n None can be passed for method_name if the model has a single Signature only.\n\n All names used are this specific SignatureDef names.\n\n\n Args:\n method_name: The exported method name for the SignatureDef, it can be None\n if and only if the model has a single SignatureDef. Default value is\n None.\n\n Returns:\n This returns a callable that can run inference for SignatureDef defined\n by argument 'method_name'.\n The callable will take key arguments corresponding to the arguments of the\n SignatureDef, that should have numpy values.\n The callable will returns dictionary that maps from output names to numpy\n values of the computed results.\n\n Raises:\n ValueError: If passed method_name is invalid.\n \"\"\"\n if method_name is None:\n if len(self._signature_defs) != 1:\n raise ValueError(\n 'SignatureDef method_name is None and model has {0} Signatures. '\n 'None is only allowed when the model has 1 SignatureDef'.format(\n len(self._signature_defs)))\n else:\n method_name = next(iter(self._signature_defs))\n return SignatureRunner(interpreter=self, signature_def_name=method_name)\n\n def get_tensor(self, tensor_index):\n \"\"\"Gets the value of the output tensor (get a copy).\n\n If you wish to avoid the copy, use `tensor()`. This function cannot be used\n to read intermediate results.\n\n Args:\n tensor_index: Tensor index of tensor to get. This value can be gotten from\n the 'index' field in get_output_details.\n\n Returns:\n a numpy array.\n \"\"\"\n return self._interpreter.GetTensor(tensor_index)\n\n def tensor(self, tensor_index):\n \"\"\"Returns function that gives a numpy view of the current tensor buffer.\n\n This allows reading and writing to this tensors w/o copies. This more\n closely mirrors the C++ Interpreter class interface's tensor() member, hence\n the name. Be careful to not hold these output references through calls\n to `allocate_tensors()` and `invoke()`. This function cannot be used to read\n intermediate results.\n\n Usage:\n\n ```\n interpreter.allocate_tensors()\n input = interpreter.tensor(interpreter.get_input_details()[0][\"index\"])\n output = interpreter.tensor(interpreter.get_output_details()[0][\"index\"])\n for i in range(10):\n input().fill(3.)\n interpreter.invoke()\n print(\"inference %s\" % output())\n ```\n\n Notice how this function avoids making a numpy array directly. This is\n because it is important to not hold actual numpy views to the data longer\n than necessary. If you do, then the interpreter can no longer be invoked,\n because it is possible the interpreter would resize and invalidate the\n referenced tensors. The NumPy API doesn't allow any mutability of the\n the underlying buffers.\n\n WRONG:\n\n ```\n input = interpreter.tensor(interpreter.get_input_details()[0][\"index\"])()\n output = interpreter.tensor(interpreter.get_output_details()[0][\"index\"])()\n interpreter.allocate_tensors() # This will throw RuntimeError\n for i in range(10):\n input.fill(3.)\n interpreter.invoke() # this will throw RuntimeError since input,output\n ```\n\n Args:\n tensor_index: Tensor index of tensor to get. This value can be gotten from\n the 'index' field in get_output_details.\n\n Returns:\n A function that can return a new numpy array pointing to the internal\n TFLite tensor state at any point. It is safe to hold the function forever,\n but it is not safe to hold the numpy array forever.\n \"\"\"\n return lambda: self._interpreter.tensor(self._interpreter, tensor_index)\n\n def invoke(self):\n \"\"\"Invoke the interpreter.\n\n Be sure to set the input sizes, allocate tensors and fill values before\n calling this. Also, note that this function releases the GIL so heavy\n computation can be done in the background while the Python interpreter\n continues. No other function on this object should be called while the\n invoke() call has not finished.\n\n Raises:\n ValueError: When the underlying interpreter fails raise ValueError.\n \"\"\"\n self._ensure_safe()\n self._interpreter.Invoke()\n\n def reset_all_variables(self):\n return self._interpreter.ResetVariableTensors()\n\n # Experimental and subject to change.\n def _native_handle(self):\n \"\"\"Returns a pointer to the underlying tflite::Interpreter instance.\n\n This allows extending tflite.Interpreter's functionality in a custom C++\n function. Consider how that may work in a custom pybind wrapper:\n\n m.def(\"SomeNewFeature\", ([](py::object handle) {\n auto* interpreter =\n reinterpret_cast<tflite::Interpreter*>(handle.cast<intptr_t>());\n ...\n }))\n\n and corresponding Python call:\n\n SomeNewFeature(interpreter.native_handle())\n\n Note: This approach is fragile. Users must guarantee the C++ extension build\n is consistent with the tflite.Interpreter's underlying C++ build.\n \"\"\"\n return self._interpreter.interpreter()\n\n\nclass InterpreterWithCustomOps(Interpreter):\n \"\"\"Interpreter interface for TensorFlow Lite Models that accepts custom ops.\n\n The interface provided by this class is experimental and therefore not exposed\n as part of the public API.\n\n Wraps the tf.lite.Interpreter class and adds the ability to load custom ops\n by providing the names of functions that take a pointer to a BuiltinOpResolver\n and add a custom op.\n \"\"\"\n\n def __init__(self,\n model_path=None,\n model_content=None,\n experimental_delegates=None,\n custom_op_registerers=None):\n \"\"\"Constructor.\n\n Args:\n model_path: Path to TF-Lite Flatbuffer file.\n model_content: Content of model.\n experimental_delegates: Experimental. Subject to change. List of\n [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)\n objects returned by lite.load_delegate().\n custom_op_registerers: List of str (symbol names) or functions that take a\n pointer to a MutableOpResolver and register a custom op. When passing\n functions, use a pybind function that takes a uintptr_t that can be\n recast as a pointer to a MutableOpResolver.\n\n Raises:\n ValueError: If the interpreter was unable to create.\n \"\"\"\n self._custom_op_registerers = custom_op_registerers or []\n super(InterpreterWithCustomOps, self).__init__(\n model_path=model_path,\n model_content=model_content,\n experimental_delegates=experimental_delegates)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"This API defines FeatureColumn for sequential input.\n\nNOTE: This API is a work in progress and will likely be changing frequently.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.feature_column import feature_column_v2 as fc\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras.feature_column import base_feature_layer as kfc\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.util.tf_export import keras_export\n\n# pylint: disable=protected-access\n\n\n@keras_export('keras.experimental.SequenceFeatures')\nclass SequenceFeatures(kfc._BaseFeaturesLayer):\n \"\"\"A layer for sequence input.\n\n All `feature_columns` must be sequence dense columns with the same\n `sequence_length`. The output of this method can be fed into sequence\n networks, such as RNN.\n\n The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.\n `T` is the maximum sequence length for this batch, which could differ from\n batch to batch.\n\n If multiple `feature_columns` are given with `Di` `num_elements` each, their\n outputs are concatenated. So, the final `Tensor` has shape\n `[batch_size, T, D0 + D1 + ... + Dn]`.\n\n Example:\n\n ```python\n\n import tensorflow as tf\n\n # Behavior of some cells or feature columns may depend on whether we are in\n # training or inference mode, e.g. applying dropout.\n training = True\n rating = tf.feature_column.sequence_numeric_column('rating')\n watches = tf.feature_column.sequence_categorical_column_with_identity(\n 'watches', num_buckets=1000)\n watches_embedding = tf.feature_column.embedding_column(watches,\n dimension=10)\n columns = [rating, watches_embedding]\n\n features = {\n 'rating': tf.sparse.from_dense([[1.0,1.1, 0, 0, 0],\n [2.0,2.1,2.2, 2.3, 2.5]]),\n 'watches': tf.sparse.from_dense([[2, 85, 0, 0, 0],[33,78, 2, 73, 1]])\n }\n\n sequence_input_layer = tf.keras.experimental.SequenceFeatures(columns)\n sequence_input, sequence_length = sequence_input_layer(\n features, training=training)\n sequence_length_mask = tf.sequence_mask(sequence_length)\n hidden_size = 32\n rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)\n rnn_layer = tf.keras.layers.RNN(rnn_cell)\n outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)\n ```\n \"\"\"\n\n def __init__(\n self,\n feature_columns,\n trainable=True,\n name=None,\n **kwargs):\n \"\"\"\"Constructs a SequenceFeatures layer.\n\n Args:\n feature_columns: An iterable of dense sequence columns. Valid columns are\n - `embedding_column` that wraps a `sequence_categorical_column_with_*`\n - `sequence_numeric_column`.\n trainable: Boolean, whether the layer's variables will be updated via\n gradient descent during training.\n name: Name to give to the SequenceFeatures.\n **kwargs: Keyword arguments to construct a layer.\n\n Raises:\n ValueError: If any of the `feature_columns` is not a\n `SequenceDenseColumn`.\n \"\"\"\n super(SequenceFeatures, self).__init__(\n feature_columns=feature_columns,\n trainable=trainable,\n name=name,\n expected_column_type=fc.SequenceDenseColumn,\n **kwargs)\n\n @property\n def _is_feature_layer(self):\n return True\n\n def _target_shape(self, input_shape, total_elements):\n return (input_shape[0], input_shape[1], total_elements)\n\n def call(self, features, training=None):\n \"\"\"Returns sequence input corresponding to the `feature_columns`.\n\n Args:\n features: A dict mapping keys to tensors.\n training: Python boolean or None, indicating whether to the layer is being\n run in training mode. This argument is passed to the call method of any\n `FeatureColumn` that takes a `training` argument. For example, if a\n `FeatureColumn` performed dropout, the column could expose a `training`\n argument to control whether the dropout should be applied. If `None`,\n defaults to `tf.keras.backend.learning_phase()`.\n\n\n Returns:\n An `(input_layer, sequence_length)` tuple where:\n - input_layer: A float `Tensor` of shape `[batch_size, T, D]`.\n `T` is the maximum sequence length for this batch, which could differ\n from batch to batch. `D` is the sum of `num_elements` for all\n `feature_columns`.\n - sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence\n length for each example.\n\n Raises:\n ValueError: If features are not a dictionary.\n \"\"\"\n if not isinstance(features, dict):\n raise ValueError('We expected a dictionary here. Instead we got: ',\n features)\n if training is None:\n training = backend.learning_phase()\n transformation_cache = fc.FeatureTransformationCache(features)\n output_tensors = []\n sequence_lengths = []\n\n for column in self._feature_columns:\n with backend.name_scope(column.name):\n try:\n dense_tensor, sequence_length = column.get_sequence_dense_tensor(\n transformation_cache, self._state_manager, training=training)\n except TypeError:\n dense_tensor, sequence_length = column.get_sequence_dense_tensor(\n transformation_cache, self._state_manager)\n # Flattens the final dimension to produce a 3D Tensor.\n output_tensors.append(self._process_dense_tensor(column, dense_tensor))\n sequence_lengths.append(sequence_length)\n\n # Check and process sequence lengths.\n fc._verify_static_batch_size_equality(sequence_lengths,\n self._feature_columns)\n sequence_length = _assert_all_equal_and_return(sequence_lengths)\n\n return self._verify_and_concat_tensors(output_tensors), sequence_length\n\n\ndef _assert_all_equal_and_return(tensors, name=None):\n \"\"\"Asserts that all tensors are equal and returns the first one.\"\"\"\n with backend.name_scope(name or 'assert_all_equal'):\n if len(tensors) == 1:\n return tensors[0]\n assert_equal_ops = []\n for t in tensors[1:]:\n assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))\n with ops.control_dependencies(assert_equal_ops):\n return array_ops.identity(tensors[0])\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import combinations\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import test\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass TrackableWeightHandlerTest(keras_parameterized.TestCase):\n\n def get_table_handler(self):\n # Note: There is some repetition in these tests' setup. However, Tensorflow\n # does not play nicely with a separate setUp() call (causing errors related\n # to graph building), so we have to use a called setup instead of a setUp()\n # call.\n table = lookup_ops.MutableHashTable(\n key_dtype=dtypes.string, value_dtype=dtypes.int32, default_value=0)\n return base_layer_utils.TrackableWeightHandler(table)\n\n def test_get_num_tensors(self):\n table_handler = self.get_table_handler()\n self.assertEqual(2, table_handler.num_tensors)\n\n def test_get_and_set_weights(self):\n table_handler = self.get_table_handler()\n\n table_data = {b'a': 1, b'b': 2, b'c': 3}\n table_handler.set_weights(\n [list(table_data.keys()),\n list(table_data.values())])\n weights = backend.batch_get_value(table_handler.get_tensors())\n weight_data = {key: value for key, value in zip(weights[0], weights[1])}\n self.assertDictEqual(table_data, weight_data)\n\n def test_get_and_set_weights_does_not_add_ops(self):\n table_handler = self.get_table_handler()\n table_data = {b'a': 1, b'b': 2, b'c': 3}\n table_handler.set_weights(\n [list(table_data.keys()),\n list(table_data.values())])\n _ = backend.batch_get_value(table_handler.get_tensors())\n backend.get_session().graph.finalize()\n table_handler.set_weights(\n [list(table_data.keys()),\n list(table_data.values())])\n _ = backend.batch_get_value(table_handler.get_tensors())\n\n\[email protected](combinations.combine(mode=['eager']))\nclass OpLayerTest(keras_parameterized.TestCase):\n\n def test_tensor_op_layer(self):\n int_values = keras.Input(shape=(2,), dtype=dtypes.int32)\n float_values = math_ops.cast(int_values, dtypes.float32)\n model = keras.Model(int_values, float_values)\n model.compile(loss='mse')\n\n input_data = np.array([[1, 2], [3, 4]], dtype=np.int32)\n expected = [[1.0, 2.0], [3.0, 4.0]]\n output = model.predict(input_data)\n self.assertAllClose(expected, output)\n\n def test_ragged_op_layer(self):\n with testing_utils.use_keras_tensors_scope(False):\n with self.assertRaisesRegex(\n ValueError, '(?ms)Keras automatic op wrapping'\n '.*Ragged tensors encountered: '\n r'\\[tf.RaggedTensor\\(values=Tensor\\(\"Cast:0\", shape=\\((\\?|None),\\), '\n r'dtype=float32\\), row_splits=Tensor\\(\"Placeholder_1:0\", '\n r'shape=\\((\\?|None),\\), dtype=int64\\)\\)\\]'):\n int_values = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)\n float_values = math_ops.cast(int_values, dtypes.float32)\n _ = keras.Model(int_values, float_values)\n\n def test_sparse_op_layer(self):\n with testing_utils.use_keras_tensors_scope(False):\n with self.assertRaisesRegex(\n ValueError, \"(?ms)Keras automatic op wrapping\"\n r\".*Sparse ops encountered: \\[\\<tf\\.Operation 'Cast' type=Cast\\>\\]\"):\n int_values = keras.Input(shape=(None,), dtype=dtypes.int32, sparse=True)\n float_values = math_ops.cast(int_values, dtypes.float32)\n _ = keras.Model(int_values, float_values)\n\n def test_ragged_op_layer_keras_tensors(self):\n int_values = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)\n float_values = math_ops.cast(int_values, dtypes.float32)\n model = keras.Model(int_values, float_values)\n model.compile(loss='mse')\n\n input_data = ragged_factory_ops.constant(\n [[1, 2], [3, 4]], dtype=np.int32)\n expected = [[1.0, 2.0], [3.0, 4.0]]\n output = model.predict(input_data)\n self.assertIsInstance(output, ragged_tensor.RaggedTensor)\n self.assertAllClose(expected, output)\n\n def test_sparse_op_layer_keras_tensors(self):\n int_values = keras.Input(shape=(None,), dtype=dtypes.int32, sparse=True)\n float_values = math_ops.cast(int_values, dtypes.float32)\n _ = keras.Model(int_values, float_values)\n model = keras.Model(int_values, float_values)\n model.compile(loss='mse')\n\n input_data = sparse_ops.from_dense(\n np.array([[1, 2], [3, 4]], dtype=np.int32))\n expected = [[1.0, 2.0], [3.0, 4.0]]\n output = model.predict(input_data)\n self.assertIsInstance(output, sparse_tensor.SparseTensor)\n self.assertAllClose(expected, sparse_ops.sparse_tensor_to_dense(output))\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for stateful_random_ops.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.distribute import values as dist_values\nfrom tensorflow.python.distribute.mirrored_strategy import MirroredStrategy\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.kernel_tests.random import util as \\\nrandom_test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_random_ops\nfrom tensorflow.python.ops import gen_stateful_random_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import stateful_random_ops as \\\nrandom\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training.tracking import util as tracking_util\n\n\ng_seeded = None\ng_unseeded = None\n\n\nGPU_FLOATS = [dtypes.float16, dtypes.float32, dtypes.float64]\nCPU_FLOATS = GPU_FLOATS + [dtypes.bfloat16]\nFLOATS = GPU_FLOATS\nINTS = [dtypes.int32, dtypes.int64]\n\n\nclass StatefulRandomOpsTest(test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(StatefulRandomOpsTest, self).setUp()\n physical_devices = config.list_physical_devices(\"CPU\")\n config.set_logical_device_configuration(\n physical_devices[0], [\n context.LogicalDeviceConfiguration(),\n context.LogicalDeviceConfiguration()\n ])\n\n def testCreateRNGStateIntSeed(self):\n \"\"\"Tests `create_rng_state` when `seed` is int.\"\"\"\n # using leading 'F' to test overflow tolerance\n state = random.create_rng_state(0xFFFF222233334444FFAA666677778888,\n random.RNG_ALG_PHILOX)\n self.assertAllEqual(\n list(map(random._uint_to_int,\n [0xFFAA666677778888, 0xFFFF222233334444] +\n [0] * (random.PHILOX_STATE_SIZE - 2))),\n state)\n\n def assertAllDifferent(self, tensors):\n \"\"\"Checks that there are no duplicate elements anywhere among the tensors.\n\n Args:\n tensors: a list of tensors. They can have different shapes.\n \"\"\"\n tensors = [array_ops.reshape(t, shape=[-1]) for t in tensors]\n ls = array_ops.concat(tensors, axis=0).numpy().tolist()\n self.assertAllEqual(len(ls), len(set(ls)))\n\n @test_util.run_v2_only\n def testNonDeterministicInts(self):\n \"\"\"Tests that non_deterministic_ints returns different results every time.\n\n This test is flaky, but with very low probability of failing.\n \"\"\"\n shape = [2, 3]\n dtype = dtypes.int64\n a = random.non_deterministic_ints(shape=shape, dtype=dtype)\n self.assertAllEqual(shape, a.shape)\n self.assertEqual(dtype, a.dtype)\n b = random.non_deterministic_ints(shape, dtype=dtype)\n self.assertAllDifferent([a, b])\n\n @test_util.run_v2_only\n def testBatchSeeds(self):\n \"\"\"Test for batch seeds.\n \"\"\"\n shape = [2, 3]\n count = 6\n gen = random.Generator.from_seed(1234)\n keys1 = gen._make_int64_keys(shape=shape)\n keys2 = gen._make_int64_keys(shape=shape)\n self.assertAllDifferent([keys1, keys2])\n seeds1 = gen.make_seeds(count=count)\n seeds2 = gen.make_seeds(count=count)\n self.assertAllDifferent([seeds1[0, :], seeds2[0, :]])\n gens = gen.split(count=count)\n self.assertAllEqual(count, len(gens))\n randoms = [g.uniform_full_int(shape=shape, dtype=dtypes.int32)\n for g in gens]\n self.assertAllDifferent(randoms)\n # Tests graph mode.\n @def_function.function\n def f():\n return gen.make_seeds(count=count)\n for _ in range(3):\n f()\n\n def assertRegex(self, pattern, text):\n self.assertTrue(\n re.search(pattern, text),\n \"Can't find pattern '%s' in text '%s'\" % (pattern, text))\n\n @test_util.run_v2_only\n @test_util.run_cuda_only\n def testCrossDeviceSplit(self):\n \"\"\"Tests that a CPU RNG can split into RNGs on GPU.\n \"\"\"\n with ops.device(\"/device:CPU:0\"):\n gen = random.Generator.from_seed(1234) # gen is on CPU\n self.assertRegex(\"CPU\", gen.state.device)\n with ops.device(test_util.gpu_device_name()):\n gens = gen.split(count=10) # gens are on GPU\n self.assertRegex(\"GPU\", gens[0].state.device)\n\n @test_util.run_v2_only\n def testReset(self):\n shape = [2, 3]\n gen = random.Generator.from_seed(0)\n for resetter in [\n lambda g: g.reset(state=[1, 2, 3]),\n lambda g: g.reset_from_seed(1234),\n lambda g: g.reset_from_key_counter(key=1, counter=[2, 3]),\n ]:\n resetter(gen)\n expected_normal = gen.normal(shape)\n @def_function.function\n def f(resetter):\n resetter(gen)\n return gen.normal(shape)\n def check_results(expected_normal, v):\n self.assertAllEqual(expected_normal, v)\n check_results(expected_normal, f(resetter))\n check_results(expected_normal, f(resetter))\n\n @test_util.run_v2_only\n def testGeneratorCreation(self):\n \"\"\"Tests generator creation, in both eager and tf.function.\n\n The interaction between Generator creation and defun should be the same as\n tf.Variable.\n \"\"\"\n shape = [2, 3]\n alg = random.RNG_ALG_PHILOX\n for constructor in [\n lambda: random.Generator(state=[1, 2, 3], alg=alg),\n lambda: random.Generator.from_seed(1234),\n lambda: random.Generator.from_key_counter( # pylint: disable=g-long-lambda\n key=1, counter=[2, 3], alg=alg),\n ]:\n gen = constructor()\n # Tests tf.function\n expected_normal1 = gen.normal(shape)\n expected_normal2 = gen.normal(shape)\n global g_seeded\n g_seeded = None\n @def_function.function\n def f(constructor):\n global g_seeded\n # defun'ed function should only create variables once\n if g_seeded is None:\n g_seeded = constructor()\n return g_seeded.normal(shape)\n def check_results(expected_normal, v):\n self.assertAllEqual(expected_normal, v)\n check_results(expected_normal1, f(constructor))\n check_results(expected_normal2, f(constructor))\n\n @parameterized.parameters([\n (\"philox\", random.RNG_ALG_PHILOX, random.Algorithm.PHILOX),\n (\"threefry\", random.RNG_ALG_THREEFRY, random.Algorithm.THREEFRY)])\n @test_util.run_v2_only\n def testAlg(self, name, int_id, enum_id):\n g_by_name = random.Generator.from_seed(1234, name)\n g_by_int = random.Generator.from_seed(1234, int_id)\n g_by_enum = random.Generator.from_seed(1234, enum_id)\n self.assertEqual(g_by_name.algorithm, g_by_int.algorithm)\n self.assertEqual(g_by_name.algorithm, g_by_enum.algorithm)\n\n @test_util.run_v2_only\n def testGeneratorCreationWithVar(self):\n \"\"\"Tests creating generator with a variable.\n \"\"\"\n alg = random.RNG_ALG_PHILOX\n state = [1, 2, 3]\n var = variables.Variable(state, dtype=random.STATE_TYPE)\n g = random.Generator(state=state, alg=alg)\n g_var = random.Generator(state=var, alg=alg)\n shape = [2, 3]\n g.normal(shape)\n g_var.normal(shape)\n self.assertAllEqual(g.state.read_value(), var.read_value())\n\n @test_util.run_v2_only\n def testGeneratorCreationUnseeded(self):\n \"\"\"Tests generator creation, the unseeded case.\"\"\"\n shape = [2, 3]\n global g_unseeded\n g_unseeded = None\n @def_function.function\n def f():\n global g_unseeded\n # defun'ed function should only create variables once\n if g_unseeded is None:\n g_unseeded = random.Generator.from_non_deterministic_state()\n return g_unseeded.normal(shape)\n self.assertAllEqual(shape, f().shape)\n\n @test_util.run_v2_only\n def testGeneratorCopy(self):\n \"\"\"Tests copying a generator.\"\"\"\n g = random.Generator.from_seed(0)\n g_copy = random.Generator(g)\n self.assertAllEqual(g.algorithm, g_copy.algorithm)\n self.assertAllEqual(g.state.read_value(), g_copy.state.read_value())\n # Tests tf.function\n global g_seeded\n g_seeded = None\n # Do the same in tf.function\n @def_function.function\n def f():\n global g_seeded\n # defun'ed function should only create variables once\n if g_seeded is None:\n g_seeded = random.Generator(g)\n self.assertAllEqual(g.algorithm, g_seeded.algorithm)\n self.assertAllEqual(g.state.read_value(), g_seeded.state.read_value())\n f()\n\n @test_util.run_v1_only(\n (\"This test is specifically for checking TF1 compatibility. \"\n \"It cannot run under TF2.\"))\n def testTF1(self):\n seed = 1234\n shape = [2, 3]\n expected_normal1 = constant_op.constant(\n [[0.9356609, 1.0854305, -0.93788373],\n [-0.50615472, 1.31697023, 0.71375787]], dtype=dtypes.float32)\n expected_normal2 = constant_op.constant(\n [[-0.3964749, 0.8369565, -0.30946946],\n [1.1206646, 1.00852597, -0.10185789]], dtype=dtypes.float32)\n with self.cached_session() as sess:\n gen1 = random.Generator.from_seed(seed)\n gen2 = random.Generator.from_non_deterministic_state()\n sess.run((gen1.state.initializer, gen2.state.initializer))\n r1 = gen1.normal(shape, dtype=dtypes.float32)\n r2 = gen2.normal(shape, dtype=dtypes.float32)\n def f():\n return sess.run((r1, r2))\n def check_results(expected_normal, v1, v2):\n self.assertAllClose(expected_normal, v1, rtol=1e-5, atol=1e-5)\n self.assertAllEqual(shape, v2.shape)\n check_results(expected_normal1, *f())\n check_results(expected_normal2, *f())\n\n @test_util.run_v2_only\n @test_util.also_run_as_tf_function\n def testEagerAndDefun(self):\n \"\"\"A simple test to make sure the op works in eager and defunned mode.\"\"\"\n random.get_global_generator().normal((3,))\n\n @test_util.run_v2_only\n def testOpSeedSelectionAfterSetSeed(self):\n \"\"\"Tests that op-seed selection is reset after reseting global generator.\n\n Fixing GitHub issue 9171:\n https://github.com/tensorflow/tensorflow/issues/9171\n \"\"\"\n shape = (3,)\n random.get_global_generator().reset_from_seed(1)\n a = random.get_global_generator().normal(shape)\n random.get_global_generator().reset_from_seed(1)\n b = random.get_global_generator().normal(shape)\n self.assertAllEqual(a, b)\n\n # Now do the above again using accelerated ('defun'ed) computation\n @def_function.function\n def f():\n return random.get_global_generator().normal(shape)\n\n random.get_global_generator().reset_from_seed(1)\n c = f()\n random.get_global_generator().reset_from_seed(1)\n d = f()\n self.assertAllEqual(c, d)\n self.assertAllEqual(a, c)\n\n @test_util.run_v2_only\n def testOpSeedSelectionNotSensitive(self):\n \"\"\"Test that op-seed selection is not sensitive to trivial changes.\n\n Test that op-seed selection is not sensitive to trivial computation\n (i.e. graph) changes.\n\n Fixing b/32087099\n \"\"\"\n def f(include_print):\n shape = constant_op.constant([5])\n if include_print:\n shape = logging_ops.Print(shape, [shape])\n return random.get_global_generator().normal(shape)\n\n def compare(fst_includes_print, snd_includes_print):\n random.get_global_generator().reset_from_seed(50)\n fst = f(fst_includes_print)\n random.get_global_generator().reset_from_seed(50)\n snd = f(snd_includes_print)\n self.assertAllEqual(fst, snd)\n # Now do the above again using accelerated (defunned) 'f'.\n # Running 'f' with two different Boolean arguments should cause\n # two different graphs to be generated, hence demonstrating the\n # insensitivity to graph changes.\n f_acc = def_function.function(f)\n random.get_global_generator().reset_from_seed(50)\n fst = f_acc(fst_includes_print)\n random.get_global_generator().reset_from_seed(50)\n snd = f_acc(snd_includes_print)\n self.assertAllEqual(fst, snd)\n\n compare(False, False)\n compare(True, True)\n compare(True, False)\n\n @test_util.run_v2_only\n def testKey(self):\n key = 1234\n gen = random.Generator(state=[0, 0, key], alg=random.RNG_ALG_PHILOX)\n got = gen.key\n self.assertAllEqual(key, got)\n @def_function.function\n def f():\n return gen.key\n got = f()\n self.assertAllEqual(key, got)\n\n @test_util.run_v2_only\n def testSkip(self):\n key = 1234\n counter = 5678\n gen = random.Generator(state=[counter, 0, key], alg=random.RNG_ALG_PHILOX)\n delta = 432\n gen.skip(delta)\n new_counter = gen.state[0]\n self.assertAllEqual(counter + delta * 256, new_counter)\n\n def _sameAsOldRandomOps(self, device, floats):\n def compare(dtype, old, new):\n seed1, seed2 = 79, 25\n # note how the two seeds for the old op correspond to the seed for the new\n # op\n with ops.device(device):\n gen = random.Generator(state=[0, seed2, seed1],\n alg=random.RNG_ALG_PHILOX)\n\n # create a graph for the old op in order to call it many times\n @def_function.function\n def run_old():\n with ops.device(device):\n return old(dtype, seed1, seed2)\n\n def run_new():\n with ops.device(device):\n return new(dtype, gen)\n\n for _ in range(5):\n self.assertAllEqual(run_old(), run_new())\n\n shape = constant_op.constant([4, 7])\n minval = 128\n maxval = 256\n\n # passing `dtype` around to compress go/gpylint-faq#cell-var-from-loop and\n # go/gpylint-faq#undefined-loop-variable\n def old_normal(dtype, seed1, seed2):\n return gen_random_ops.random_standard_normal(\n shape, dtype=dtype, seed=seed1, seed2=seed2)\n def new_normal(dtype, gen):\n return gen._standard_normal(shape, dtype=dtype)\n def old_truncated_normal(dtype, seed1, seed2):\n return gen_random_ops.truncated_normal(\n shape, dtype=dtype, seed=seed1, seed2=seed2)\n def new_truncated_normal(dtype, gen):\n return gen._truncated_normal(shape, dtype=dtype)\n def old_uniform_int(dtype, seed1, seed2):\n minval2 = constant_op.constant(minval, dtype=dtype)\n maxval2 = constant_op.constant(maxval, dtype=dtype)\n return gen_random_ops.random_uniform_int(\n shape, minval=minval2, maxval=maxval2, seed=seed1, seed2=seed2)\n def new_uniform_int(dtype, gen):\n return gen.uniform(shape, minval=minval, maxval=maxval, dtype=dtype)\n def old_uniform(dtype, seed1, seed2):\n return gen_random_ops.random_uniform(\n shape, dtype=dtype, seed=seed1, seed2=seed2)\n def new_uniform(dtype, gen):\n return gen._uniform(shape, dtype=dtype)\n\n for dtype in floats:\n compare(dtype, old_normal, new_normal)\n compare(dtype, old_truncated_normal, new_truncated_normal)\n compare(dtype, old_uniform, new_uniform)\n for dtype in INTS:\n compare(dtype, old_uniform_int, new_uniform_int)\n\n @test_util.run_v2_only\n def testSameAsOldRandomOpsCPU(self):\n \"\"\"Tests that the generated numbers are the same as the old random_ops.py.\n\n The CPU version.\n \"\"\"\n self._sameAsOldRandomOps(\"/device:CPU:0\", CPU_FLOATS)\n\n @test_util.run_v2_only\n @test_util.run_cuda_only\n def testSameAsOldRandomOpsGPU(self):\n \"\"\"Tests that the generated numbers are the same as the old random_ops.py.\n\n The GPU version.\n \"\"\"\n self._sameAsOldRandomOps(test_util.gpu_device_name(), GPU_FLOATS)\n\n @parameterized.parameters(INTS + [dtypes.uint32, dtypes.uint64])\n @test_util.run_v2_only\n @test_util.run_cuda_only\n def testGPUEqualsCPU(self, dtype):\n \"\"\"Tests that GPU and CPU generate the same integer outputs.\"\"\"\n seed = 1234\n shape = [315, 49]\n with ops.device(\"/device:CPU:0\"):\n cpu = random.Generator.from_seed(seed).uniform_full_int(\n shape=shape, dtype=dtype)\n with ops.device(test_util.gpu_device_name()):\n gpu = random.Generator.from_seed(seed).uniform_full_int(\n shape=shape, dtype=dtype)\n self.assertAllEqual(cpu, gpu)\n\n @parameterized.parameters(FLOATS + INTS)\n @test_util.run_v2_only\n def testUniformIsInRange(self, dtype):\n minval = 2\n maxval = 33\n size = 1000\n gen = random.Generator.from_seed(1234)\n x = gen.uniform(\n shape=[size], dtype=dtype, minval=minval, maxval=maxval).numpy()\n self.assertTrue(np.all(x >= minval))\n self.assertTrue(np.all(x < maxval))\n\n @parameterized.parameters(FLOATS)\n @test_util.run_v2_only\n def testNormalIsFinite(self, dtype):\n gen = random.Generator.from_seed(1234)\n x = gen.normal(shape=[10000], dtype=dtype).numpy()\n self.assertTrue(np.all(np.isfinite(x)))\n\n @parameterized.parameters(FLOATS + INTS)\n @test_util.run_v2_only\n def testDistributionOfUniform(self, dtype):\n \"\"\"Use Pearson's Chi-squared test to test for uniformity.\"\"\"\n n = 1000\n seed = 12\n gen = random.Generator.from_seed(seed)\n maxval = 1\n if dtype.is_integer:\n maxval = 100\n x = gen.uniform(shape=[n], maxval=maxval, dtype=dtype).numpy()\n if maxval > 1:\n # Normalize y to range [0, 1).\n x = x.astype(float) / maxval\n # Tests that the values are distributed amongst 10 bins with equal\n # probability. 16.92 is the Chi^2 value for 9 degrees of freedom with\n # p=0.05. This test is probabilistic and would be flaky if the random\n # seed were not fixed.\n val = random_test_util.chi_squared(x, 10)\n self.assertLess(val, 16.92)\n\n @parameterized.parameters(FLOATS)\n @test_util.run_v2_only\n def testDistributionOfNormal(self, dtype):\n \"\"\"Use Anderson-Darling test to test distribution appears normal.\"\"\"\n n = 1000\n gen = random.Generator.from_seed(1234)\n x = gen.normal(shape=[n], dtype=dtype).numpy()\n # The constant 2.492 is the 5% critical value for the Anderson-Darling\n # test where the mean and variance are known. This test is probabilistic\n # so to avoid flakiness the seed is fixed.\n self.assertLess(\n random_test_util.anderson_darling(x.astype(float)), 2.492)\n\n @test_util.run_v2_only\n def testErrors(self):\n \"\"\"Tests that proper errors are raised.\n \"\"\"\n shape = [2, 3]\n gen = random.Generator.from_seed(1234)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n r\"must have shape \\[\\], not\"):\n gen_stateful_random_ops.stateful_standard_normal_v2(\n gen.state.handle, [0, 0], shape)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n r\"must have shape \\[\\], not\"):\n gen_stateful_random_ops.rng_skip(\n gen.state.handle, gen.algorithm, [0, 0])\n with self.assertRaisesWithPredicateMatch(\n TypeError, \"EagerTensor of dtype int64\"):\n gen_stateful_random_ops.stateful_standard_normal_v2(\n gen.state.handle, 1.1, shape)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n \"Unsupported algorithm id\"):\n gen_stateful_random_ops.stateful_standard_normal_v2(\n gen.state.handle, 123, shape)\n var = variables.Variable([0, 0], dtype=dtypes.int32)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n \"dtype of RNG state variable must be int64, not\"):\n gen_stateful_random_ops.stateful_standard_normal_v2(\n var.handle, random.RNG_ALG_PHILOX, shape)\n var = variables.Variable([[0]], dtype=dtypes.int64)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n \"RNG state must have one and only one dimension, not\"):\n gen_stateful_random_ops.stateful_standard_normal_v2(\n var.handle, random.RNG_ALG_PHILOX, shape)\n var = variables.Variable([0], dtype=dtypes.int64)\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n \"For the Philox algorithm, the size of state must be at least\"):\n gen_stateful_random_ops.stateful_standard_normal_v2(\n var.handle, random.RNG_ALG_PHILOX, shape)\n with self.assertRaisesWithPredicateMatch(\n ValueError,\n \"minval must be a scalar; got a tensor of shape \"):\n @def_function.function\n def f():\n gen.uniform(shape=shape, minval=array_ops.zeros(shape, \"int32\"),\n maxval=100, dtype=\"int32\")\n f()\n with self.assertRaisesWithPredicateMatch(\n ValueError,\n \"maxval must be a scalar; got a tensor of shape \"):\n @def_function.function\n def f2():\n gen.uniform(\n shape=shape, minval=0, maxval=array_ops.ones(shape, \"int32\") * 100,\n dtype=\"int32\")\n f2()\n\n @test_util.run_v2_only\n def testGetGlobalGeneratorWithXla(self):\n \"\"\"Demonstrates using the global generator with XLA.\"\"\"\n # This test was passing before because soft placement silently picked the\n # CPU kernel.\n # TODO(wangpeng): Remove this skip\n self.skipTest(\"NonDeterministicInts lacks XLA kernel.\")\n\n if not config.list_physical_devices(\"XLA_CPU\"):\n self.skipTest(\"No XLA_CPU device available.\")\n\n random.set_global_generator(None)\n\n @def_function.function(jit_compile=True)\n def make_seed():\n generator = random.get_global_generator()\n state = array_ops.identity(generator.state, name=\"state\")\n return generator.uniform_full_int((2,), dtypes.int32, name=\"seed\"), state\n\n with ops.device(\"/device:XLA_CPU:0\"):\n seed, state = make_seed()\n self.assertTrue(np.all(np.isfinite(seed.numpy())))\n random.get_global_generator().reset(state)\n self.assertAllEqual(make_seed()[0], seed)\n\n @test_util.run_v2_only\n def testSetGlobalGeneratorBadWithDefun(self):\n \"\"\"Demonstrates that set_global_generator don't work properly with defun.\n \"\"\"\n shape = (3,)\n\n @def_function.function\n def f():\n return random.get_global_generator().normal(shape)\n\n random.set_global_generator(random.Generator.from_seed(50))\n with self.assertRaisesWithPredicateMatch(\n errors.NotFoundError, \"Resource .+ does not exist\"):\n _ = f()\n random.set_global_generator(random.Generator.from_seed(50))\n _ = f()\n\n @test_util.run_v2_only\n def testFunctionArg(self):\n \"\"\"Tests that RNG can be used as tf.function's argument.\n \"\"\"\n shape = [2, 3]\n @def_function.function\n def f(gen):\n return gen.normal(shape)\n g1 = random.Generator.from_seed(1)\n g2 = random.Generator.from_seed(1)\n res1 = f(g1)\n res2 = g2.normal(shape)\n self.assertAllEqual(res1, res2)\n self.assertAllEqual(g1.state.read_value(), g2.state.read_value())\n\n @test_util.run_v2_only\n def testCreateOutsideMirroredStrat(self):\n \"\"\"Tests RNG/MirrorStrategy interaction #1.\n\n If an RNG is created outside a DS scope, all replicas will access the\n same RNG object, and accesses are serialized.\n \"\"\"\n shape = [3, 4]\n dtype = dtypes.int32\n gen = random.Generator.from_seed(1234)\n strat = MirroredStrategy(devices=[\"cpu:0\", \"cpu:1\"])\n with strat.scope():\n def f():\n t1 = gen.uniform_full_int(shape=shape, dtype=dtype)\n t2 = gen.uniform_full_int(shape=shape, dtype=dtype)\n t = array_ops.stack([t1, t2])\n return t\n results = strat.extended.call_for_each_replica(\n fn=f)\n values = results.values\n self.assertAllEqual(2, len(values))\n self.assertAllDifferent(values)\n\n @test_util.run_v2_only\n def testMirroredStratParaAsync(self):\n \"\"\"Tests RNG/MirrorStrategy interaction #2.\n\n The user can create n independent RNGs outside strategy.scope(), where n\n is the number of replicas, and give one to each replica. The replicas can\n thus get different random-number streams.\n \"\"\"\n shape = [3, 4]\n dtype = dtypes.int32\n gens = random.get_global_generator().split(count=2)\n devices = [\"cpu:0\", \"cpu:1\"]\n strat = MirroredStrategy(devices=devices)\n # Use `PerReplica` to specify which `gen` is sent to which replica\n gens = dist_values.PerReplica([[g] for g in gens])\n with strat.scope():\n def f(gen):\n t1 = gen.uniform_full_int(shape=shape, dtype=dtype)\n t2 = gen.uniform_full_int(shape=shape, dtype=dtype)\n t = array_ops.stack([t1, t2])\n return t\n results = strat.extended.call_for_each_replica(\n fn=f, args=gens)\n local_results = strat.experimental_local_results(results)\n self.assertAllEqual(2, len(local_results))\n self.assertAllDifferent(local_results)\n\n @test_util.run_v2_only\n def testUniformFullInt(self):\n \"\"\"Tests full-range int uniform.\n \"\"\"\n shape = [3, 4]\n dtype = dtypes.int32\n g = random.Generator.from_seed(1)\n r1 = g.uniform(shape=shape, dtype=dtype, minval=None)\n g = random.Generator.from_seed(1)\n r2 = g.uniform_full_int(shape=shape, dtype=dtype)\n self.assertAllEqual(r1, r2)\n\n @test_util.run_v2_only\n def testRestore(self):\n \"\"\"Tests save and restore.\n \"\"\"\n fname = os.path.join(self.get_temp_dir(), \"checkpoint\")\n g = random.Generator.from_seed(1)\n cp = tracking_util.Checkpoint(g=g)\n def write_restore_compare():\n cp.write(fname)\n r1 = g.uniform([], dtype=dtypes.uint32, minval=None)\n cp.restore(fname)\n r2 = g.uniform([], dtype=dtypes.uint32, minval=None)\n self.assertAllEqual(r1, r2)\n # Run multiple times so that cp.write is called in various RNG states\n for _ in range(2):\n write_restore_compare()\n\n\nif __name__ == \"__main__\":\n config.set_soft_device_placement(False)\n test.main()\n"
] | [
[
"tensorflow.python.platform.app.run",
"numpy.amax",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.squeeze",
"numpy.random.randn",
"numpy.zeros_like",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.random.randint",
"tensorflow.python.framework.test_util.disable_xla",
"numpy.reshape",
"numpy.arange",
"tensorflow.python.ops.nn_ops.softmax_cross_entropy_with_logits",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.test_util.device",
"numpy.zeros",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"tensorflow.python.framework.test_util.run_gpu_only",
"tensorflow.python.ops.math_ops.reduce_sum",
"numpy.log",
"tensorflow.python.client.session.Session",
"numpy.array",
"numpy.sum",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.gen_nn_ops.sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.array",
"tensorflow.python.util.tf_export.tf_export"
],
[
"tensorflow.python.feature_column.feature_column_v2._verify_static_batch_size_equality",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.feature_column.feature_column_v2.FeatureTransformationCache"
],
[
"tensorflow.python.keras.Input",
"tensorflow.python.keras.Model",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.lookup_ops.MutableHashTable",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.testing_utils.use_keras_tensors_scope",
"tensorflow.python.keras.backend.get_session",
"numpy.array",
"tensorflow.python.keras.combinations.combine",
"tensorflow.python.keras.engine.base_layer_utils.TrackableWeightHandler",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.framework.config.list_physical_devices",
"tensorflow.python.ops.stateful_random_ops.get_global_generator",
"tensorflow.python.ops.gen_random_ops.random_uniform",
"tensorflow.python.framework.config.set_soft_device_placement",
"tensorflow.python.ops.gen_random_ops.random_uniform_int",
"tensorflow.python.ops.logging_ops.Print",
"tensorflow.python.ops.stateful_random_ops.Generator.from_key_counter",
"numpy.all",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.stateful_random_ops.Generator.from_non_deterministic_state",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.stateful_random_ops.non_deterministic_ints",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.gen_random_ops.truncated_normal",
"tensorflow.python.ops.stateful_random_ops.Generator",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.gen_stateful_random_ops.rng_skip",
"tensorflow.python.ops.gen_stateful_random_ops.stateful_standard_normal_v2",
"tensorflow.python.ops.stateful_random_ops.Generator.from_seed",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.distribute.values.PerReplica",
"tensorflow.python.distribute.mirrored_strategy.MirroredStrategy",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.eager.context.LogicalDeviceConfiguration",
"tensorflow.python.ops.array_ops.concat",
"numpy.isfinite",
"tensorflow.python.ops.gen_random_ops.random_standard_normal",
"tensorflow.python.ops.stateful_random_ops.create_rng_state",
"tensorflow.python.kernel_tests.random.util.chi_squared",
"tensorflow.python.ops.stateful_random_ops.set_global_generator",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.test_util.gpu_device_name",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.12",
"2.6",
"2.7",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8"
]
}
] |
stephanefschwarz/EMET | [
"92ab8b0a53bbdfe5618353f0055eba98ae93f53f"
] | [
"emet/hypothesis/generate_dictionary.py"
] | [
"import sys\nimport pandas as pd\nimport requests\nimport nltk\nnltk.download('stopwords')\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\n\nfrom bs4 import BeautifulSoup\n\n# --- open dataset --- #\ndata = pd.read_csv('./dataset/translated_twitter_posts.csv')\n\ndocuments = data['translated_posts']\n\n# --- create an instance of tokenizer --- #\n\npremises = []\n\ntokenizer = RegexpTokenizer(r'\\w+')\n\nprogress = 0\ntotal_posts = documents.shape[0]\n\nfor document in documents:\n sentence = ''\n tokens = tokenizer.tokenize(document)\n for token in tokens:\n\n if not token in stopwords.words('english'):\n try:\n request = requests.get(\"http://www.urbandictionary.com/define.php?term={}\".format(token))\n extract_mening = BeautifulSoup(request.content, 'html.parser')\n meaning = extract_mening.find(\"div\",attrs={\"class\":\"meaning\"})\n if meaning != None:\n meaning = meaning.text\n sentence = sentence + meaning + ' '\n else:\n sentence = sentence + token + ' '\n except Exception as e:\n print('Exception at token ', token, '\\n', e)\n else:\n sentence = sentence + token + ' '\n\n premises.append(sentence)\n\n progress = progress + 1\n percentage = round((progress / total_posts) * 100, 2)\n\n output_print = \"{}% | {}/{}\".format(percentage, progress, total_posts)\n\n # Poor way to show a progress bar :|\n sys.stdout.write(\"\\r {:<70}\".format(output_print))\n sys.stdout.flush()\n\ndata['premises'] = premises\ndata.to_csv('./dataset/premises_twitter_posts.csv')\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gujralsanyam22/models | [
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"d96f8f043dbe2b5ca8ea1785f57df8faf68d8875",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3"
] | [
"official/nlp/bert/run_squad.py",
"official/vision/detection/dataloader/tf_example_decoder.py",
"research/object_detection/export_tflite_ssd_graph_lib.py",
"research/slim/nets/nasnet/pnasnet.py",
"official/modeling/hyperparams/base_config_test.py",
"research/object_detection/models/center_net_resnet_feature_extractor.py",
"official/vision/detection/utils/object_detection/target_assigner.py",
"research/object_detection/predictors/heads/keypoint_head.py",
"research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py",
"research/lstm_object_detection/meta_architectures/lstm_ssd_meta_arch_test.py",
"official/vision/beta/modeling/layers/box_matcher.py",
"official/vision/beta/dataloaders/classification_input.py",
"research/object_detection/utils/object_detection_evaluation_test.py",
"official/vision/detection/main.py",
"research/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py",
"official/nlp/modeling/models/xlnet_test.py",
"research/sequence_projection/prado/common_layer.py",
"research/slim/nets/resnet_v1.py",
"research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py",
"official/vision/detection/utils/object_detection/box_coder.py",
"research/object_detection/box_coders/faster_rcnn_box_coder_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0 in TF 2.x.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport time\n\n# Import libraries\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gin\nimport tensorflow as tf\nfrom official.common import distribute_utils\nfrom official.nlp.bert import configs as bert_configs\nfrom official.nlp.bert import run_squad_helper\nfrom official.nlp.bert import tokenization\nfrom official.nlp.data import squad_lib as squad_lib_wp\nfrom official.utils.misc import keras_utils\n\n\nflags.DEFINE_string('vocab_file', None,\n 'The vocabulary file that the BERT model was trained on.')\n\n# More flags can be found in run_squad_helper.\nrun_squad_helper.define_common_squad_flags()\n\nFLAGS = flags.FLAGS\n\n\ndef train_squad(strategy,\n input_meta_data,\n custom_callbacks=None,\n run_eagerly=False,\n init_checkpoint=None,\n sub_model_export_name=None):\n \"\"\"Run bert squad training.\"\"\"\n bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)\n init_checkpoint = init_checkpoint or FLAGS.init_checkpoint\n run_squad_helper.train_squad(strategy, input_meta_data, bert_config,\n custom_callbacks, run_eagerly, init_checkpoint,\n sub_model_export_name=sub_model_export_name)\n\n\ndef predict_squad(strategy, input_meta_data):\n \"\"\"Makes predictions for the squad dataset.\"\"\"\n bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n run_squad_helper.predict_squad(\n strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)\n\n\ndef eval_squad(strategy, input_meta_data):\n \"\"\"Evaluate on the squad dataset.\"\"\"\n bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n eval_metrics = run_squad_helper.eval_squad(\n strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)\n return eval_metrics\n\n\ndef export_squad(model_export_path, input_meta_data):\n \"\"\"Exports a trained model as a `SavedModel` for inference.\n\n Args:\n model_export_path: a string specifying the path to the SavedModel directory.\n input_meta_data: dictionary containing meta data about input and model.\n\n Raises:\n Export path is not specified, got an empty string or None.\n \"\"\"\n bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)\n run_squad_helper.export_squad(model_export_path, input_meta_data, bert_config)\n\n\ndef main(_):\n gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)\n\n with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:\n input_meta_data = json.loads(reader.read().decode('utf-8'))\n\n if FLAGS.mode == 'export_only':\n export_squad(FLAGS.model_export_path, input_meta_data)\n return\n\n # Configures cluster spec for multi-worker distribution strategy.\n if FLAGS.num_gpus > 0:\n _ = distribute_utils.configure_cluster(FLAGS.worker_hosts, FLAGS.task_index)\n strategy = distribute_utils.get_distribution_strategy(\n distribution_strategy=FLAGS.distribution_strategy,\n num_gpus=FLAGS.num_gpus,\n all_reduce_alg=FLAGS.all_reduce_alg,\n tpu_address=FLAGS.tpu)\n\n if 'train' in FLAGS.mode:\n if FLAGS.log_steps:\n custom_callbacks = [keras_utils.TimeHistory(\n batch_size=FLAGS.train_batch_size,\n log_steps=FLAGS.log_steps,\n logdir=FLAGS.model_dir,\n )]\n else:\n custom_callbacks = None\n\n train_squad(\n strategy,\n input_meta_data,\n custom_callbacks=custom_callbacks,\n run_eagerly=FLAGS.run_eagerly,\n sub_model_export_name=FLAGS.sub_model_export_name,\n )\n if 'predict' in FLAGS.mode:\n predict_squad(strategy, input_meta_data)\n if 'eval' in FLAGS.mode:\n eval_metrics = eval_squad(strategy, input_meta_data)\n f1_score = eval_metrics['final_f1']\n logging.info('SQuAD eval F1-score: %f', f1_score)\n summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval')\n summary_writer = tf.summary.create_file_writer(summary_dir)\n with summary_writer.as_default():\n # TODO(lehou): write to the correct step number.\n tf.summary.scalar('F1-score', f1_score, step=0)\n summary_writer.flush()\n # Also write eval_metrics to json file.\n squad_lib_wp.write_to_json_files(\n eval_metrics, os.path.join(summary_dir, 'eval_metrics.json'))\n time.sleep(60)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('bert_config_file')\n flags.mark_flag_as_required('model_dir')\n app.run(main)\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tensorflow Example proto decoder for object detection.\n\nA decoder to decode string tensors containing serialized tensorflow.Example\nprotos for object detection.\n\"\"\"\nimport tensorflow as tf\n\n\nclass TfExampleDecoder(object):\n \"\"\"Tensorflow Example proto decoder.\"\"\"\n\n def __init__(self, include_mask=False):\n self._include_mask = include_mask\n self._keys_to_features = {\n 'image/encoded':\n tf.io.FixedLenFeature((), tf.string),\n 'image/source_id':\n tf.io.FixedLenFeature((), tf.string),\n 'image/height':\n tf.io.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.io.FixedLenFeature((), tf.int64),\n 'image/object/bbox/xmin':\n tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax':\n tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin':\n tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax':\n tf.io.VarLenFeature(tf.float32),\n 'image/object/class/label':\n tf.io.VarLenFeature(tf.int64),\n 'image/object/area':\n tf.io.VarLenFeature(tf.float32),\n 'image/object/is_crowd':\n tf.io.VarLenFeature(tf.int64),\n }\n if include_mask:\n self._keys_to_features.update({\n 'image/object/mask':\n tf.io.VarLenFeature(tf.string),\n })\n\n def _decode_image(self, parsed_tensors):\n \"\"\"Decodes the image and set its static shape.\"\"\"\n image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)\n image.set_shape([None, None, 3])\n return image\n\n def _decode_boxes(self, parsed_tensors):\n \"\"\"Concat box coordinates in the format of [ymin, xmin, ymax, xmax].\"\"\"\n xmin = parsed_tensors['image/object/bbox/xmin']\n xmax = parsed_tensors['image/object/bbox/xmax']\n ymin = parsed_tensors['image/object/bbox/ymin']\n ymax = parsed_tensors['image/object/bbox/ymax']\n return tf.stack([ymin, xmin, ymax, xmax], axis=-1)\n\n def _decode_masks(self, parsed_tensors):\n \"\"\"Decode a set of PNG masks to the tf.float32 tensors.\"\"\"\n def _decode_png_mask(png_bytes):\n mask = tf.squeeze(\n tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)\n mask = tf.cast(mask, dtype=tf.float32)\n mask.set_shape([None, None])\n return mask\n\n height = parsed_tensors['image/height']\n width = parsed_tensors['image/width']\n masks = parsed_tensors['image/object/mask']\n return tf.cond(\n pred=tf.greater(tf.size(input=masks), 0),\n true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),\n false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))\n\n def _decode_areas(self, parsed_tensors):\n xmin = parsed_tensors['image/object/bbox/xmin']\n xmax = parsed_tensors['image/object/bbox/xmax']\n ymin = parsed_tensors['image/object/bbox/ymin']\n ymax = parsed_tensors['image/object/bbox/ymax']\n return tf.cond(\n tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),\n lambda: parsed_tensors['image/object/area'],\n lambda: (xmax - xmin) * (ymax - ymin))\n\n def decode(self, serialized_example):\n \"\"\"Decode the serialized example.\n\n Args:\n serialized_example: a single serialized tf.Example string.\n\n Returns:\n decoded_tensors: a dictionary of tensors with the following fields:\n - image: a uint8 tensor of shape [None, None, 3].\n - source_id: a string scalar tensor.\n - height: an integer scalar tensor.\n - width: an integer scalar tensor.\n - groundtruth_classes: a int64 tensor of shape [None].\n - groundtruth_is_crowd: a bool tensor of shape [None].\n - groundtruth_area: a float32 tensor of shape [None].\n - groundtruth_boxes: a float32 tensor of shape [None, 4].\n - groundtruth_instance_masks: a float32 tensor of shape\n [None, None, None].\n - groundtruth_instance_masks_png: a string tensor of shape [None].\n \"\"\"\n parsed_tensors = tf.io.parse_single_example(\n serialized=serialized_example, features=self._keys_to_features)\n for k in parsed_tensors:\n if isinstance(parsed_tensors[k], tf.SparseTensor):\n if parsed_tensors[k].dtype == tf.string:\n parsed_tensors[k] = tf.sparse.to_dense(\n parsed_tensors[k], default_value='')\n else:\n parsed_tensors[k] = tf.sparse.to_dense(\n parsed_tensors[k], default_value=0)\n\n image = self._decode_image(parsed_tensors)\n boxes = self._decode_boxes(parsed_tensors)\n areas = self._decode_areas(parsed_tensors)\n is_crowds = tf.cond(\n tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),\n lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),\n lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long\n if self._include_mask:\n masks = self._decode_masks(parsed_tensors)\n\n decoded_tensors = {\n 'image': image,\n 'source_id': parsed_tensors['image/source_id'],\n 'height': parsed_tensors['image/height'],\n 'width': parsed_tensors['image/width'],\n 'groundtruth_classes': parsed_tensors['image/object/class/label'],\n 'groundtruth_is_crowd': is_crowds,\n 'groundtruth_area': areas,\n 'groundtruth_boxes': boxes,\n }\n if self._include_mask:\n decoded_tensors.update({\n 'groundtruth_instance_masks': masks,\n 'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],\n })\n return decoded_tensors\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Exports an SSD detection model to use with tf-lite.\n\nSee export_tflite_ssd_graph.py for usage.\n\"\"\"\nimport os\nimport tempfile\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.core.protobuf import saver_pb2\nfrom object_detection import exporter\nfrom object_detection.builders import graph_rewriter_builder\nfrom object_detection.builders import model_builder\nfrom object_detection.builders import post_processing_builder\nfrom object_detection.core import box_list\nfrom object_detection.utils import tf_version\n\n_DEFAULT_NUM_CHANNELS = 3\n_DEFAULT_NUM_COORD_BOX = 4\n\nif tf_version.is_tf1():\n from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=g-import-not-at-top\n\n\ndef get_const_center_size_encoded_anchors(anchors):\n \"\"\"Exports center-size encoded anchors as a constant tensor.\n\n Args:\n anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor\n boxes\n\n Returns:\n encoded_anchors: a float32 constant tensor of shape [num_anchors, 4]\n containing the anchor boxes.\n \"\"\"\n anchor_boxlist = box_list.BoxList(anchors)\n y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()\n num_anchors = y.get_shape().as_list()\n\n with tf.Session() as sess:\n y_out, x_out, h_out, w_out = sess.run([y, x, h, w])\n encoded_anchors = tf.constant(\n np.transpose(np.stack((y_out, x_out, h_out, w_out))),\n dtype=tf.float32,\n shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],\n name='anchors')\n return encoded_anchors\n\n\ndef append_postprocessing_op(frozen_graph_def,\n max_detections,\n max_classes_per_detection,\n nms_score_threshold,\n nms_iou_threshold,\n num_classes,\n scale_values,\n detections_per_class=100,\n use_regular_nms=False,\n additional_output_tensors=()):\n \"\"\"Appends postprocessing custom op.\n\n Args:\n frozen_graph_def: Frozen GraphDef for SSD model after freezing the\n checkpoint\n max_detections: Maximum number of detections (boxes) to show\n max_classes_per_detection: Number of classes to display per detection\n nms_score_threshold: Score threshold used in Non-maximal suppression in\n post-processing\n nms_iou_threshold: Intersection-over-union threshold used in Non-maximal\n suppression in post-processing\n num_classes: number of classes in SSD detector\n scale_values: scale values is a dict with following key-value pairs\n {y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode\n centersize boxes\n detections_per_class: In regular NonMaxSuppression, number of anchors used\n for NonMaxSuppression per class\n use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of\n Fast NMS.\n additional_output_tensors: Array of additional tensor names to output.\n Tensors are appended after postprocessing output.\n\n Returns:\n transformed_graph_def: Frozen GraphDef with postprocessing custom op\n appended\n TFLite_Detection_PostProcess custom op node has four outputs:\n detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box\n locations\n detection_classes: a float32 tensor of shape [1, num_boxes]\n with class indices\n detection_scores: a float32 tensor of shape [1, num_boxes]\n with class scores\n num_boxes: a float32 tensor of size 1 containing the number of detected\n boxes\n \"\"\"\n new_output = frozen_graph_def.node.add()\n new_output.op = 'TFLite_Detection_PostProcess'\n new_output.name = 'TFLite_Detection_PostProcess'\n new_output.attr['_output_quantized'].CopyFrom(\n attr_value_pb2.AttrValue(b=True))\n new_output.attr['_output_types'].list.type.extend([\n types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT,\n types_pb2.DT_FLOAT\n ])\n new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom(\n attr_value_pb2.AttrValue(b=True))\n new_output.attr['max_detections'].CopyFrom(\n attr_value_pb2.AttrValue(i=max_detections))\n new_output.attr['max_classes_per_detection'].CopyFrom(\n attr_value_pb2.AttrValue(i=max_classes_per_detection))\n new_output.attr['nms_score_threshold'].CopyFrom(\n attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))\n new_output.attr['nms_iou_threshold'].CopyFrom(\n attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))\n new_output.attr['num_classes'].CopyFrom(\n attr_value_pb2.AttrValue(i=num_classes))\n\n new_output.attr['y_scale'].CopyFrom(\n attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))\n new_output.attr['x_scale'].CopyFrom(\n attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))\n new_output.attr['h_scale'].CopyFrom(\n attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))\n new_output.attr['w_scale'].CopyFrom(\n attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))\n new_output.attr['detections_per_class'].CopyFrom(\n attr_value_pb2.AttrValue(i=detections_per_class))\n new_output.attr['use_regular_nms'].CopyFrom(\n attr_value_pb2.AttrValue(b=use_regular_nms))\n\n new_output.input.extend(\n ['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])\n # Transform the graph to append new postprocessing op\n input_names = []\n output_names = ['TFLite_Detection_PostProcess'\n ] + list(additional_output_tensors)\n transforms = ['strip_unused_nodes']\n transformed_graph_def = TransformGraph(frozen_graph_def, input_names,\n output_names, transforms)\n return transformed_graph_def\n\n\ndef export_tflite_graph(pipeline_config,\n trained_checkpoint_prefix,\n output_dir,\n add_postprocessing_op,\n max_detections,\n max_classes_per_detection,\n detections_per_class=100,\n use_regular_nms=False,\n binary_graph_name='tflite_graph.pb',\n txt_graph_name='tflite_graph.pbtxt',\n additional_output_tensors=()):\n \"\"\"Exports a tflite compatible graph and anchors for ssd detection model.\n\n Anchors are written to a tensor and tflite compatible graph\n is written to output_dir/tflite_graph.pb.\n\n Args:\n pipeline_config: a pipeline.proto object containing the configuration for\n SSD model to export.\n trained_checkpoint_prefix: a file prefix for the checkpoint containing the\n trained parameters of the SSD model.\n output_dir: A directory to write the tflite graph and anchor file to.\n add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a\n TFLite_Detection_PostProcess custom op\n max_detections: Maximum number of detections (boxes) to show\n max_classes_per_detection: Number of classes to display per detection\n detections_per_class: In regular NonMaxSuppression, number of anchors used\n for NonMaxSuppression per class\n use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of\n Fast NMS.\n binary_graph_name: Name of the exported graph file in binary format.\n txt_graph_name: Name of the exported graph file in text format.\n additional_output_tensors: Array of additional tensor names to output.\n Additional tensors are appended to the end of output tensor list.\n\n Raises:\n ValueError: if the pipeline config contains models other than ssd or uses an\n fixed_shape_resizer and provides a shape as well.\n \"\"\"\n tf.gfile.MakeDirs(output_dir)\n if pipeline_config.model.WhichOneof('model') != 'ssd':\n raise ValueError('Only ssd models are supported in tflite. '\n 'Found {} in config'.format(\n pipeline_config.model.WhichOneof('model')))\n\n num_classes = pipeline_config.model.ssd.num_classes\n nms_score_threshold = {\n pipeline_config.model.ssd.post_processing.batch_non_max_suppression\n .score_threshold\n }\n nms_iou_threshold = {\n pipeline_config.model.ssd.post_processing.batch_non_max_suppression\n .iou_threshold\n }\n scale_values = {}\n scale_values['y_scale'] = {\n pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale\n }\n scale_values['x_scale'] = {\n pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale\n }\n scale_values['h_scale'] = {\n pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale\n }\n scale_values['w_scale'] = {\n pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale\n }\n\n image_resizer_config = pipeline_config.model.ssd.image_resizer\n image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')\n num_channels = _DEFAULT_NUM_CHANNELS\n if image_resizer == 'fixed_shape_resizer':\n height = image_resizer_config.fixed_shape_resizer.height\n width = image_resizer_config.fixed_shape_resizer.width\n if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:\n num_channels = 1\n shape = [1, height, width, num_channels]\n else:\n raise ValueError(\n 'Only fixed_shape_resizer'\n 'is supported with tflite. Found {}'.format(\n image_resizer_config.WhichOneof('image_resizer_oneof')))\n\n image = tf.placeholder(\n tf.float32, shape=shape, name='normalized_input_image_tensor')\n\n detection_model = model_builder.build(\n pipeline_config.model, is_training=False)\n predicted_tensors = detection_model.predict(image, true_image_shapes=None)\n # The score conversion occurs before the post-processing custom op\n _, score_conversion_fn = post_processing_builder.build(\n pipeline_config.model.ssd.post_processing)\n class_predictions = score_conversion_fn(\n predicted_tensors['class_predictions_with_background'])\n\n with tf.name_scope('raw_outputs'):\n # 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]\n # containing the encoded box predictions. Note that these are raw\n # predictions and no Non-Max suppression is applied on them and\n # no decode center size boxes is applied to them.\n tf.identity(predicted_tensors['box_encodings'], name='box_encodings')\n # 'raw_outputs/class_predictions': a float32 tensor of shape\n # [1, num_anchors, num_classes] containing the class scores for each anchor\n # after applying score conversion.\n tf.identity(class_predictions, name='class_predictions')\n # 'anchors': a float32 tensor of shape\n # [4, num_anchors] containing the anchors as a constant node.\n tf.identity(\n get_const_center_size_encoded_anchors(predicted_tensors['anchors']),\n name='anchors')\n\n # Add global step to the graph, so we know the training step number when we\n # evaluate the model.\n tf.train.get_or_create_global_step()\n\n # graph rewriter\n is_quantized = pipeline_config.HasField('graph_rewriter')\n if is_quantized:\n graph_rewriter_config = pipeline_config.graph_rewriter\n graph_rewriter_fn = graph_rewriter_builder.build(\n graph_rewriter_config, is_training=False)\n graph_rewriter_fn()\n\n if pipeline_config.model.ssd.feature_extractor.HasField('fpn'):\n exporter.rewrite_nn_resize_op(is_quantized)\n\n # freeze the graph\n saver_kwargs = {}\n if pipeline_config.eval_config.use_moving_averages:\n saver_kwargs['write_version'] = saver_pb2.SaverDef.V1\n moving_average_checkpoint = tempfile.NamedTemporaryFile()\n exporter.replace_variable_values_with_moving_averages(\n tf.get_default_graph(), trained_checkpoint_prefix,\n moving_average_checkpoint.name)\n checkpoint_to_use = moving_average_checkpoint.name\n else:\n checkpoint_to_use = trained_checkpoint_prefix\n\n saver = tf.train.Saver(**saver_kwargs)\n input_saver_def = saver.as_saver_def()\n frozen_graph_def = exporter.freeze_graph_with_def_protos(\n input_graph_def=tf.get_default_graph().as_graph_def(),\n input_saver_def=input_saver_def,\n input_checkpoint=checkpoint_to_use,\n output_node_names=','.join([\n 'raw_outputs/box_encodings', 'raw_outputs/class_predictions',\n 'anchors'\n ] + list(additional_output_tensors)),\n restore_op_name='save/restore_all',\n filename_tensor_name='save/Const:0',\n clear_devices=True,\n output_graph='',\n initializer_nodes='')\n\n # Add new operation to do post processing in a custom op (TF Lite only)\n if add_postprocessing_op:\n transformed_graph_def = append_postprocessing_op(\n frozen_graph_def,\n max_detections,\n max_classes_per_detection,\n nms_score_threshold,\n nms_iou_threshold,\n num_classes,\n scale_values,\n detections_per_class,\n use_regular_nms,\n additional_output_tensors=additional_output_tensors)\n else:\n # Return frozen without adding post-processing custom op\n transformed_graph_def = frozen_graph_def\n\n binary_graph = os.path.join(output_dir, binary_graph_name)\n with tf.gfile.GFile(binary_graph, 'wb') as f:\n f.write(transformed_graph_def.SerializeToString())\n txt_graph = os.path.join(output_dir, txt_graph_name)\n with tf.gfile.GFile(txt_graph, 'w') as f:\n f.write(str(transformed_graph_def))\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the definition for the PNASNet classification networks.\n\nPaper: https://arxiv.org/abs/1712.00559\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport tensorflow.compat.v1 as tf\nimport tf_slim as slim\nfrom tensorflow.contrib import training as contrib_training\n\nfrom nets.nasnet import nasnet\nfrom nets.nasnet import nasnet_utils\n\narg_scope = slim.arg_scope\n\n\ndef large_imagenet_config():\n \"\"\"Large ImageNet configuration based on PNASNet-5.\"\"\"\n return contrib_training.HParams(\n stem_multiplier=3.0,\n dense_dropout_keep_prob=0.5,\n num_cells=12,\n filter_scaling_rate=2.0,\n num_conv_filters=216,\n drop_path_keep_prob=0.6,\n use_aux_head=1,\n num_reduction_layers=2,\n data_format='NHWC',\n skip_reduction_layer_input=1,\n total_training_steps=250000,\n use_bounded_activation=False,\n )\n\n\ndef mobile_imagenet_config():\n \"\"\"Mobile ImageNet configuration based on PNASNet-5.\"\"\"\n return contrib_training.HParams(\n stem_multiplier=1.0,\n dense_dropout_keep_prob=0.5,\n num_cells=9,\n filter_scaling_rate=2.0,\n num_conv_filters=54,\n drop_path_keep_prob=1.0,\n use_aux_head=1,\n num_reduction_layers=2,\n data_format='NHWC',\n skip_reduction_layer_input=1,\n total_training_steps=250000,\n use_bounded_activation=False,\n )\n\n\ndef pnasnet_large_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997,\n batch_norm_epsilon=0.001):\n \"\"\"Default arg scope for the PNASNet Large ImageNet model.\"\"\"\n return nasnet.nasnet_large_arg_scope(\n weight_decay, batch_norm_decay, batch_norm_epsilon)\n\n\ndef pnasnet_mobile_arg_scope(weight_decay=4e-5,\n batch_norm_decay=0.9997,\n batch_norm_epsilon=0.001):\n \"\"\"Default arg scope for the PNASNet Mobile ImageNet model.\"\"\"\n return nasnet.nasnet_mobile_arg_scope(weight_decay, batch_norm_decay,\n batch_norm_epsilon)\n\n\ndef _build_pnasnet_base(images,\n normal_cell,\n num_classes,\n hparams,\n is_training,\n final_endpoint=None):\n \"\"\"Constructs a PNASNet image model.\"\"\"\n\n end_points = {}\n\n def add_and_check_endpoint(endpoint_name, net):\n end_points[endpoint_name] = net\n return final_endpoint and (endpoint_name == final_endpoint)\n\n # Find where to place the reduction cells or stride normal cells\n reduction_indices = nasnet_utils.calc_reduction_layers(\n hparams.num_cells, hparams.num_reduction_layers)\n\n # pylint: disable=protected-access\n stem = lambda: nasnet._imagenet_stem(images, hparams, normal_cell)\n # pylint: enable=protected-access\n net, cell_outputs = stem()\n if add_and_check_endpoint('Stem', net):\n return net, end_points\n\n # Setup for building in the auxiliary head.\n aux_head_cell_idxes = []\n if len(reduction_indices) >= 2:\n aux_head_cell_idxes.append(reduction_indices[1] - 1)\n\n # Run the cells\n filter_scaling = 1.0\n # true_cell_num accounts for the stem cells\n true_cell_num = 2\n activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu\n for cell_num in range(hparams.num_cells):\n is_reduction = cell_num in reduction_indices\n stride = 2 if is_reduction else 1\n if is_reduction: filter_scaling *= hparams.filter_scaling_rate\n if hparams.skip_reduction_layer_input or not is_reduction:\n prev_layer = cell_outputs[-2]\n net = normal_cell(\n net,\n scope='cell_{}'.format(cell_num),\n filter_scaling=filter_scaling,\n stride=stride,\n prev_layer=prev_layer,\n cell_num=true_cell_num)\n if add_and_check_endpoint('Cell_{}'.format(cell_num), net):\n return net, end_points\n true_cell_num += 1\n cell_outputs.append(net)\n\n if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and\n num_classes and is_training):\n aux_net = activation_fn(net)\n # pylint: disable=protected-access\n nasnet._build_aux_head(aux_net, end_points, num_classes, hparams,\n scope='aux_{}'.format(cell_num))\n # pylint: enable=protected-access\n\n # Final softmax layer\n with tf.variable_scope('final_layer'):\n net = activation_fn(net)\n net = nasnet_utils.global_avg_pool(net)\n if add_and_check_endpoint('global_pool', net) or not num_classes:\n return net, end_points\n net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')\n logits = slim.fully_connected(net, num_classes)\n\n if add_and_check_endpoint('Logits', logits):\n return net, end_points\n\n predictions = tf.nn.softmax(logits, name='predictions')\n if add_and_check_endpoint('Predictions', predictions):\n return net, end_points\n return logits, end_points\n\n\ndef build_pnasnet_large(images,\n num_classes,\n is_training=True,\n final_endpoint=None,\n config=None):\n \"\"\"Build PNASNet Large model for the ImageNet Dataset.\"\"\"\n hparams = copy.deepcopy(config) if config else large_imagenet_config()\n # pylint: disable=protected-access\n nasnet._update_hparams(hparams, is_training)\n # pylint: enable=protected-access\n\n if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':\n tf.logging.info(\n 'A GPU is available on the machine, consider using NCHW '\n 'data format for increased speed on GPU.')\n\n if hparams.data_format == 'NCHW':\n images = tf.transpose(a=images, perm=[0, 3, 1, 2])\n\n # Calculate the total number of cells in the network.\n # There is no distinction between reduction and normal cells in PNAS so the\n # total number of cells is equal to the number normal cells plus the number\n # of stem cells (two by default).\n total_num_cells = hparams.num_cells + 2\n\n normal_cell = PNasNetNormalCell(hparams.num_conv_filters,\n hparams.drop_path_keep_prob, total_num_cells,\n hparams.total_training_steps,\n hparams.use_bounded_activation)\n with arg_scope(\n [slim.dropout, nasnet_utils.drop_path, slim.batch_norm],\n is_training=is_training):\n with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d,\n slim.batch_norm, slim.separable_conv2d,\n nasnet_utils.factorized_reduction,\n nasnet_utils.global_avg_pool,\n nasnet_utils.get_channel_index,\n nasnet_utils.get_channel_dim],\n data_format=hparams.data_format):\n return _build_pnasnet_base(\n images,\n normal_cell=normal_cell,\n num_classes=num_classes,\n hparams=hparams,\n is_training=is_training,\n final_endpoint=final_endpoint)\nbuild_pnasnet_large.default_image_size = 331\n\n\ndef build_pnasnet_mobile(images,\n num_classes,\n is_training=True,\n final_endpoint=None,\n config=None):\n \"\"\"Build PNASNet Mobile model for the ImageNet Dataset.\"\"\"\n hparams = copy.deepcopy(config) if config else mobile_imagenet_config()\n # pylint: disable=protected-access\n nasnet._update_hparams(hparams, is_training)\n # pylint: enable=protected-access\n\n if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':\n tf.logging.info(\n 'A GPU is available on the machine, consider using NCHW '\n 'data format for increased speed on GPU.')\n\n if hparams.data_format == 'NCHW':\n images = tf.transpose(a=images, perm=[0, 3, 1, 2])\n\n # Calculate the total number of cells in the network.\n # There is no distinction between reduction and normal cells in PNAS so the\n # total number of cells is equal to the number normal cells plus the number\n # of stem cells (two by default).\n total_num_cells = hparams.num_cells + 2\n\n normal_cell = PNasNetNormalCell(hparams.num_conv_filters,\n hparams.drop_path_keep_prob, total_num_cells,\n hparams.total_training_steps,\n hparams.use_bounded_activation)\n with arg_scope(\n [slim.dropout, nasnet_utils.drop_path, slim.batch_norm],\n is_training=is_training):\n with arg_scope(\n [\n slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,\n slim.separable_conv2d, nasnet_utils.factorized_reduction,\n nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,\n nasnet_utils.get_channel_dim\n ],\n data_format=hparams.data_format):\n return _build_pnasnet_base(\n images,\n normal_cell=normal_cell,\n num_classes=num_classes,\n hparams=hparams,\n is_training=is_training,\n final_endpoint=final_endpoint)\n\n\nbuild_pnasnet_mobile.default_image_size = 224\n\n\nclass PNasNetNormalCell(nasnet_utils.NasNetABaseCell):\n \"\"\"PNASNet Normal Cell.\"\"\"\n\n def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,\n total_training_steps, use_bounded_activation=False):\n # Configuration for the PNASNet-5 model.\n operations = [\n 'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',\n 'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',\n 'separable_3x3_2', 'none'\n ]\n used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]\n hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]\n\n super(PNasNetNormalCell, self).__init__(\n num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,\n drop_path_keep_prob, total_num_cells, total_training_steps,\n use_bounded_activation)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport pprint\nfrom typing import List, Tuple\n\nfrom absl.testing import parameterized\nimport dataclasses\nimport tensorflow as tf\nfrom official.modeling.hyperparams import base_config\n\n\[email protected]\nclass DumpConfig1(base_config.Config):\n a: int = 1\n b: str = 'text'\n\n\[email protected]\nclass DumpConfig2(base_config.Config):\n c: int = 2\n d: str = 'text'\n e: DumpConfig1 = DumpConfig1()\n\n\[email protected]\nclass DumpConfig3(DumpConfig2):\n f: int = 2\n g: str = 'text'\n h: List[DumpConfig1] = dataclasses.field(\n default_factory=lambda: [DumpConfig1(), DumpConfig1()])\n g: Tuple[DumpConfig1, ...] = (DumpConfig1(),)\n\n\nclass BaseConfigTest(parameterized.TestCase, tf.test.TestCase):\n\n def assertHasSameTypes(self, c, d, msg=''):\n \"\"\"Checks if a Config has the same structure as a given dict.\n\n Args:\n c: the Config object to be check.\n d: the reference dict object.\n msg: The error message to show when type mismatched.\n \"\"\"\n # Make sure d is not a Config. Assume d is either\n # dictionary or primitive type and c is the Config or primitive types.\n self.assertNotIsInstance(d, base_config.Config)\n if isinstance(d, base_config.Config.IMMUTABLE_TYPES):\n self.assertEqual(pprint.pformat(c), pprint.pformat(d), msg=msg)\n elif isinstance(d, base_config.Config.SEQUENCE_TYPES):\n self.assertEqual(type(c), type(d), msg=msg)\n for i, v in enumerate(d):\n self.assertHasSameTypes(c[i], v, msg='{}[{!r}]'.format(msg, i))\n elif isinstance(d, dict):\n self.assertIsInstance(c, base_config.Config, msg=msg)\n for k, v in sorted(d.items()):\n self.assertHasSameTypes(getattr(c, k), v, msg='{}[{!r}]'.format(msg, k))\n else:\n raise TypeError('Unknown type: %r' % type(d))\n\n def assertImportExport(self, v):\n config = base_config.Config({'key': v})\n back = config.as_dict()['key']\n self.assertEqual(pprint.pformat(back), pprint.pformat(v))\n self.assertHasSameTypes(config.key, v, msg='=%s v' % pprint.pformat(v))\n\n def test_invalid_keys(self):\n params = base_config.Config()\n with self.assertRaises(AttributeError):\n _ = params.a\n\n def test_nested_config_types(self):\n config = DumpConfig3()\n self.assertIsInstance(config.e, DumpConfig1)\n self.assertIsInstance(config.h[0], DumpConfig1)\n self.assertIsInstance(config.h[1], DumpConfig1)\n self.assertIsInstance(config.g[0], DumpConfig1)\n\n config.override({'e': {'a': 2, 'b': 'new text'}})\n self.assertIsInstance(config.e, DumpConfig1)\n self.assertEqual(config.e.a, 2)\n self.assertEqual(config.e.b, 'new text')\n\n config.override({'h': [{'a': 3, 'b': 'new text 2'}]})\n self.assertIsInstance(config.h[0], DumpConfig1)\n self.assertLen(config.h, 1)\n self.assertEqual(config.h[0].a, 3)\n self.assertEqual(config.h[0].b, 'new text 2')\n\n config.override({'g': [{'a': 4, 'b': 'new text 3'}]})\n self.assertIsInstance(config.g[0], DumpConfig1)\n self.assertLen(config.g, 1)\n self.assertEqual(config.g[0].a, 4)\n self.assertEqual(config.g[0].b, 'new text 3')\n\n def test_replace(self):\n config = DumpConfig2()\n new_config = config.replace(e={'a': 2})\n self.assertEqual(new_config.e.a, 2)\n self.assertIsInstance(new_config.e, DumpConfig1)\n\n config = DumpConfig2(e=DumpConfig2())\n new_config = config.replace(e={'c': 4})\n self.assertEqual(new_config.e.c, 4)\n self.assertIsInstance(new_config.e, DumpConfig2)\n\n config = DumpConfig3()\n new_config = config.replace(g=[{'a': 4, 'b': 'new text 3'}])\n self.assertIsInstance(new_config.g[0], DumpConfig1)\n self.assertEqual(new_config.g[0].a, 4)\n\n @parameterized.parameters(\n ('_locked', \"The key '_locked' is internally reserved.\"),\n ('_restrictions', \"The key '_restrictions' is internally reserved.\"),\n ('aa', \"The key 'aa' does not exist.\"),\n )\n def test_key_error(self, key, msg):\n params = base_config.Config()\n with self.assertRaisesRegex(KeyError, msg):\n params.override({key: True})\n\n @parameterized.parameters(\n ('str data',),\n (123,),\n (1.23,),\n (None,),\n (['str', 1, 2.3, None],),\n (('str', 1, 2.3, None),),\n )\n def test_import_export_immutable_types(self, v):\n self.assertImportExport(v)\n out = base_config.Config({'key': v})\n self.assertEqual(pprint.pformat(v), pprint.pformat(out.key))\n\n def test_override_is_strict_true(self):\n params = base_config.Config({\n 'a': 'aa',\n 'b': 2,\n 'c': {\n 'c1': 'cc',\n 'c2': 20\n }\n })\n params.override({'a': 2, 'c': {'c1': 'ccc'}}, is_strict=True)\n self.assertEqual(params.a, 2)\n self.assertEqual(params.c.c1, 'ccc')\n with self.assertRaises(KeyError):\n params.override({'d': 'ddd'}, is_strict=True)\n with self.assertRaises(KeyError):\n params.override({'c': {'c3': 30}}, is_strict=True)\n\n config = base_config.Config({'key': [{'a': 42}]})\n config.override({'key': [{'b': 43}]})\n self.assertEqual(config.key[0].b, 43)\n with self.assertRaisesRegex(AttributeError, 'The key `a` does not exist'):\n _ = config.key[0].a\n\n @parameterized.parameters(\n (lambda x: x, 'Unknown type'),\n (object(), 'Unknown type'),\n (set(), 'Unknown type'),\n (frozenset(), 'Unknown type'),\n )\n def test_import_unsupport_types(self, v, msg):\n with self.assertRaisesRegex(TypeError, msg):\n _ = base_config.Config({'key': v})\n\n @parameterized.parameters(\n ({\n 'a': [{\n 'b': 2,\n }, {\n 'c': 3,\n }]\n },),\n ({\n 'c': [{\n 'f': 1.1,\n }, {\n 'h': [1, 2],\n }]\n },),\n (({\n 'a': 'aa',\n 'b': 2,\n 'c': {\n 'c1': 10,\n 'c2': 20,\n }\n },),),\n )\n def test_import_export_nested_structure(self, d):\n self.assertImportExport(d)\n\n @parameterized.parameters(\n ([{\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n }],),\n (({\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n },),),\n )\n def test_import_export_nested_sequences(self, v):\n self.assertImportExport(v)\n\n @parameterized.parameters(\n ([([{}],)],),\n ([['str', 1, 2.3, None]],),\n ((('str', 1, 2.3, None),),),\n ([\n ('str', 1, 2.3, None),\n ],),\n ([\n ('str', 1, 2.3, None),\n ],),\n ([[{\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n }]],),\n ([[[{\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n }]]],),\n ((({\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n },),),),\n (((({\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n },),),),),\n ([({\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n },)],),\n (([{\n 'a': 42,\n 'b': 'hello',\n 'c': 1.2\n }],),),\n )\n def test_import_export_unsupport_sequence(self, v):\n with self.assertRaisesRegex(TypeError,\n 'Invalid sequence: only supports single level'):\n _ = base_config.Config({'key': v})\n\n def test_construct_subtype(self):\n pass\n\n def test_import_config(self):\n params = base_config.Config({'a': [{'b': 2}, {'c': {'d': 3}}]})\n self.assertLen(params.a, 2)\n self.assertEqual(params.a[0].b, 2)\n self.assertEqual(type(params.a[0]), base_config.Config)\n self.assertEqual(pprint.pformat(params.a[0].b), '2')\n self.assertEqual(type(params.a[1]), base_config.Config)\n self.assertEqual(type(params.a[1].c), base_config.Config)\n self.assertEqual(pprint.pformat(params.a[1].c.d), '3')\n\n def test_override(self):\n params = base_config.Config({'a': [{'b': 2}, {'c': {'d': 3}}]})\n params.override({'a': [{'b': 4}, {'c': {'d': 5}}]}, is_strict=False)\n self.assertEqual(type(params.a), list)\n self.assertEqual(type(params.a[0]), base_config.Config)\n self.assertEqual(pprint.pformat(params.a[0].b), '4')\n self.assertEqual(type(params.a[1]), base_config.Config)\n self.assertEqual(type(params.a[1].c), base_config.Config)\n self.assertEqual(pprint.pformat(params.a[1].c.d), '5')\n\n @parameterized.parameters(\n ([{}],),\n (({},),),\n )\n def test_config_vs_params_dict(self, v):\n d = {'key': v}\n self.assertEqual(type(base_config.Config(d).key[0]), base_config.Config)\n self.assertEqual(type(base_config.params_dict.ParamsDict(d).key[0]), dict)\n\n def test_ppformat(self):\n self.assertEqual(\n pprint.pformat([\n 's', 1, 1.0, True, None, {}, [], (), {\n (2,): (3, [4], {\n 6: 7,\n }),\n 8: 9,\n }\n ]),\n \"['s', 1, 1.0, True, None, {}, [], (), {8: 9, (2,): (3, [4], {6: 7})}]\")\n\n def test_with_restrictions(self):\n restrictions = ['e.a<c']\n config = DumpConfig2(restrictions=restrictions)\n config.validate()\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Resnetv2 based feature extractors for CenterNet[1] meta architecture.\n\n[1]: https://arxiv.org/abs/1904.07850\n\"\"\"\n\n\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor\n\n\nclass CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor):\n \"\"\"Resnet v2 base feature extractor for the CenterNet model.\"\"\"\n\n def __init__(self, resnet_type, channel_means=(0., 0., 0.),\n channel_stds=(1., 1., 1.), bgr_ordering=False):\n \"\"\"Initializes the feature extractor with a specific ResNet architecture.\n\n Args:\n resnet_type: A string specifying which kind of ResNet to use. Currently\n only `resnet_v2_50` and `resnet_v2_101` are supported.\n channel_means: A tuple of floats, denoting the mean of each channel\n which will be subtracted from it.\n channel_stds: A tuple of floats, denoting the standard deviation of each\n channel. Each channel will be divided by its standard deviation value.\n bgr_ordering: bool, if set will change the channel ordering to be in the\n [blue, red, green] order.\n\n \"\"\"\n\n super(CenterNetResnetFeatureExtractor, self).__init__(\n channel_means=channel_means, channel_stds=channel_stds,\n bgr_ordering=bgr_ordering)\n if resnet_type == 'resnet_v2_101':\n self._base_model = tf.keras.applications.ResNet101V2(weights=None,\n include_top=False)\n output_layer = 'conv5_block3_out'\n elif resnet_type == 'resnet_v2_50':\n self._base_model = tf.keras.applications.ResNet50V2(weights=None,\n include_top=False)\n output_layer = 'conv5_block3_out'\n else:\n raise ValueError('Unknown Resnet Model {}'.format(resnet_type))\n output_layer = self._base_model.get_layer(output_layer)\n\n self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input,\n outputs=output_layer.output)\n resnet_output = self._resnet_model(self._base_model.input)\n\n for num_filters in [256, 128, 64]:\n # TODO(vighneshb) This section has a few differences from the paper\n # Figure out how much of a performance impact they have.\n\n # 1. We use a simple convolution instead of a deformable convolution\n conv = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=3,\n strides=1, padding='same')\n resnet_output = conv(resnet_output)\n resnet_output = tf.keras.layers.BatchNormalization()(resnet_output)\n resnet_output = tf.keras.layers.ReLU()(resnet_output)\n\n # 2. We use the default initialization for the convolution layers\n # instead of initializing it to do bilinear upsampling.\n conv_transpose = tf.keras.layers.Conv2DTranspose(filters=num_filters,\n kernel_size=3, strides=2,\n padding='same')\n resnet_output = conv_transpose(resnet_output)\n resnet_output = tf.keras.layers.BatchNormalization()(resnet_output)\n resnet_output = tf.keras.layers.ReLU()(resnet_output)\n\n self._feature_extractor_model = tf.keras.models.Model(\n inputs=self._base_model.input, outputs=resnet_output)\n\n def preprocess(self, resized_inputs):\n \"\"\"Preprocess input images for the ResNet model.\n\n This scales images in the range [0, 255] to the range [-1, 1]\n\n Args:\n resized_inputs: a [batch, height, width, channels] float32 tensor.\n\n Returns:\n outputs: a [batch, height, width, channels] float32 tensor.\n\n \"\"\"\n resized_inputs = super(CenterNetResnetFeatureExtractor, self).preprocess(\n resized_inputs)\n return tf.keras.applications.resnet_v2.preprocess_input(resized_inputs)\n\n def load_feature_extractor_weights(self, path):\n self._base_model.load_weights(path)\n\n def call(self, inputs):\n \"\"\"Returns image features extracted by the backbone.\n\n Args:\n inputs: An image tensor of shape [batch_size, input_height,\n input_width, 3]\n\n Returns:\n features_list: A list of length 1 containing a tensor of shape\n [batch_size, input_height // 4, input_width // 4, 64] containing\n the features extracted by the ResNet.\n \"\"\"\n return [self._feature_extractor_model(inputs)]\n\n @property\n def num_feature_outputs(self):\n return 1\n\n @property\n def out_stride(self):\n return 4\n\n @property\n def supported_sub_model_types(self):\n return ['classification']\n\n def get_sub_model(self, sub_model_type):\n if sub_model_type == 'classification':\n return self._base_model\n else:\n ValueError('Sub model type \"{}\" not supported.'.format(sub_model_type))\n\n\ndef resnet_v2_101(channel_means, channel_stds, bgr_ordering):\n \"\"\"The ResNet v2 101 feature extractor.\"\"\"\n\n return CenterNetResnetFeatureExtractor(\n resnet_type='resnet_v2_101',\n channel_means=channel_means,\n channel_stds=channel_stds,\n bgr_ordering=bgr_ordering\n )\n\n\ndef resnet_v2_50(channel_means, channel_stds, bgr_ordering):\n \"\"\"The ResNet v2 50 feature extractor.\"\"\"\n\n return CenterNetResnetFeatureExtractor(\n resnet_type='resnet_v2_50',\n channel_means=channel_means,\n channel_stds=channel_stds,\n bgr_ordering=bgr_ordering)\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base target assigner module.\n\nThe job of a TargetAssigner is, for a given set of anchors (bounding boxes) and\ngroundtruth detections (bounding boxes), to assign classification and regression\ntargets to each anchor as well as weights to each anchor (specifying, e.g.,\nwhich anchors should not contribute to training loss).\n\nIt assigns classification/regression targets by performing the following steps:\n1) Computing pairwise similarity between anchors and groundtruth boxes using a\n provided RegionSimilarity Calculator\n2) Computing a matching based on the similarity matrix using a provided Matcher\n3) Assigning regression targets based on the matching and a provided BoxCoder\n4) Assigning classification targets based on the matching and groundtruth labels\n\nNote that TargetAssigners only operate on detections from a single\nimage at a time, so any logic for applying a TargetAssigner to multiple\nimages must be handled externally.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom official.vision.detection.utils.object_detection import box_list\nfrom official.vision.detection.utils.object_detection import shape_utils\n\nKEYPOINTS_FIELD_NAME = 'keypoints'\n\n\nclass TargetAssigner(object):\n \"\"\"Target assigner to compute classification and regression targets.\"\"\"\n\n def __init__(self,\n similarity_calc,\n matcher,\n box_coder,\n negative_class_weight=1.0,\n unmatched_cls_target=None):\n \"\"\"Construct Object Detection Target Assigner.\n\n Args:\n similarity_calc: a RegionSimilarityCalculator\n matcher: Matcher used to match groundtruth to anchors.\n box_coder: BoxCoder used to encode matching groundtruth boxes with respect\n to anchors.\n negative_class_weight: classification weight to be associated to negative\n anchors (default: 1.0). The weight must be in [0., 1.].\n unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]\n which is consistent with the classification target for each anchor (and\n can be empty for scalar targets). This shape must thus be compatible\n with the groundtruth labels that are passed to the \"assign\" function\n (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). If set to None,\n unmatched_cls_target is set to be [0] for each anchor.\n\n Raises:\n ValueError: if similarity_calc is not a RegionSimilarityCalculator or\n if matcher is not a Matcher or if box_coder is not a BoxCoder\n \"\"\"\n self._similarity_calc = similarity_calc\n self._matcher = matcher\n self._box_coder = box_coder\n self._negative_class_weight = negative_class_weight\n if unmatched_cls_target is None:\n self._unmatched_cls_target = tf.constant([0], tf.float32)\n else:\n self._unmatched_cls_target = unmatched_cls_target\n\n @property\n def box_coder(self):\n return self._box_coder\n\n def assign(self,\n anchors,\n groundtruth_boxes,\n groundtruth_labels=None,\n groundtruth_weights=None,\n **params):\n \"\"\"Assign classification and regression targets to each anchor.\n\n For a given set of anchors and groundtruth detections, match anchors\n to groundtruth_boxes and assign classification and regression targets to\n each anchor as well as weights based on the resulting match (specifying,\n e.g., which anchors should not contribute to training loss).\n\n Anchors that are not matched to anything are given a classification target\n of self._unmatched_cls_target which can be specified via the constructor.\n\n Args:\n anchors: a BoxList representing N anchors\n groundtruth_boxes: a BoxList representing M groundtruth boxes\n groundtruth_labels: a tensor of shape [M, d_1, ... d_k] with labels for\n each of the ground_truth boxes. The subshape [d_1, ... d_k] can be empty\n (corresponding to scalar inputs). When set to None, groundtruth_labels\n assumes a binary problem where all ground_truth boxes get a positive\n label (of 1).\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box. The weights\n must be in [0., 1.]. If None, all weights are set to 1.\n **params: Additional keyword arguments for specific implementations of the\n Matcher.\n\n Returns:\n cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],\n where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels\n which has shape [num_gt_boxes, d_1, d_2, ... d_k].\n cls_weights: a float32 tensor with shape [num_anchors]\n reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]\n reg_weights: a float32 tensor with shape [num_anchors]\n match: a matcher.Match object encoding the match between anchors and\n groundtruth boxes, with rows corresponding to groundtruth boxes\n and columns corresponding to anchors.\n\n Raises:\n ValueError: if anchors or groundtruth_boxes are not of type\n box_list.BoxList\n \"\"\"\n if not isinstance(anchors, box_list.BoxList):\n raise ValueError('anchors must be an BoxList')\n if not isinstance(groundtruth_boxes, box_list.BoxList):\n raise ValueError('groundtruth_boxes must be an BoxList')\n\n if groundtruth_labels is None:\n groundtruth_labels = tf.ones(\n tf.expand_dims(groundtruth_boxes.num_boxes(), 0))\n groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)\n unmatched_shape_assert = shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],\n shape_utils.combined_static_and_dynamic_shape(\n self._unmatched_cls_target))\n labels_and_box_shapes_assert = shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[:1],\n shape_utils.combined_static_and_dynamic_shape(\n groundtruth_boxes.get())[:1])\n\n if groundtruth_weights is None:\n num_gt_boxes = groundtruth_boxes.num_boxes_static()\n if not num_gt_boxes:\n num_gt_boxes = groundtruth_boxes.num_boxes()\n groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)\n with tf.control_dependencies(\n [unmatched_shape_assert, labels_and_box_shapes_assert]):\n match_quality_matrix = self._similarity_calc(\n groundtruth_boxes.get(), anchors.get())\n match = self._matcher.match(match_quality_matrix, **params)\n reg_targets = self._create_regression_targets(anchors, groundtruth_boxes,\n match)\n cls_targets = self._create_classification_targets(groundtruth_labels,\n match)\n reg_weights = self._create_regression_weights(match, groundtruth_weights)\n cls_weights = self._create_classification_weights(match,\n groundtruth_weights)\n\n num_anchors = anchors.num_boxes_static()\n if num_anchors is not None:\n reg_targets = self._reset_target_shape(reg_targets, num_anchors)\n cls_targets = self._reset_target_shape(cls_targets, num_anchors)\n reg_weights = self._reset_target_shape(reg_weights, num_anchors)\n cls_weights = self._reset_target_shape(cls_weights, num_anchors)\n\n return cls_targets, cls_weights, reg_targets, reg_weights, match\n\n def _reset_target_shape(self, target, num_anchors):\n \"\"\"Sets the static shape of the target.\n\n Args:\n target: the target tensor. Its first dimension will be overwritten.\n num_anchors: the number of anchors, which is used to override the target's\n first dimension.\n\n Returns:\n A tensor with the shape info filled in.\n \"\"\"\n target_shape = target.get_shape().as_list()\n target_shape[0] = num_anchors\n target.set_shape(target_shape)\n return target\n\n def _create_regression_targets(self, anchors, groundtruth_boxes, match):\n \"\"\"Returns a regression target for each anchor.\n\n Args:\n anchors: a BoxList representing N anchors\n groundtruth_boxes: a BoxList representing M groundtruth_boxes\n match: a matcher.Match object\n\n Returns:\n reg_targets: a float32 tensor with shape [N, box_code_dimension]\n \"\"\"\n matched_gt_boxes = match.gather_based_on_match(\n groundtruth_boxes.get(),\n unmatched_value=tf.zeros(4),\n ignored_value=tf.zeros(4))\n matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)\n if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):\n groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)\n matched_keypoints = match.gather_based_on_match(\n groundtruth_keypoints,\n unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),\n ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))\n matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)\n matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)\n match_results_shape = shape_utils.combined_static_and_dynamic_shape(\n match.match_results)\n\n # Zero out the unmatched and ignored regression targets.\n unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(),\n [match_results_shape[0], 1])\n matched_anchors_mask = match.matched_column_indicator()\n # To broadcast matched_anchors_mask to the same shape as\n # matched_reg_targets.\n matched_anchors_mask = tf.tile(\n tf.expand_dims(matched_anchors_mask, 1),\n [1, tf.shape(matched_reg_targets)[1]])\n reg_targets = tf.where(matched_anchors_mask, matched_reg_targets,\n unmatched_ignored_reg_targets)\n return reg_targets\n\n def _default_regression_target(self):\n \"\"\"Returns the default target for anchors to regress to.\n\n Default regression targets are set to zero (though in\n this implementation what these targets are set to should\n not matter as the regression weight of any box set to\n regress to the default target is zero).\n\n Returns:\n default_target: a float32 tensor with shape [1, box_code_dimension]\n \"\"\"\n return tf.constant([self._box_coder.code_size * [0]], tf.float32)\n\n def _create_classification_targets(self, groundtruth_labels, match):\n \"\"\"Create classification targets for each anchor.\n\n Assign a classification target of for each anchor to the matching\n groundtruth label that is provided by match. Anchors that are not matched\n to anything are given the target self._unmatched_cls_target\n\n Args:\n groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k] with\n labels for each of the ground_truth boxes. The subshape [d_1, ... d_k]\n can be empty (corresponding to scalar labels).\n match: a matcher.Match object that provides a matching between anchors and\n groundtruth boxes.\n\n Returns:\n a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the\n subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has\n shape [num_gt_boxes, d_1, d_2, ... d_k].\n \"\"\"\n return match.gather_based_on_match(\n groundtruth_labels,\n unmatched_value=self._unmatched_cls_target,\n ignored_value=self._unmatched_cls_target)\n\n def _create_regression_weights(self, match, groundtruth_weights):\n \"\"\"Set regression weight for each anchor.\n\n Only positive anchors are set to contribute to the regression loss, so this\n method returns a weight of 1 for every positive anchor and 0 for every\n negative anchor.\n\n Args:\n match: a matcher.Match object that provides a matching between anchors and\n groundtruth boxes.\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box.\n\n Returns:\n a float32 tensor with shape [num_anchors] representing regression weights.\n \"\"\"\n return match.gather_based_on_match(\n groundtruth_weights, ignored_value=0., unmatched_value=0.)\n\n def _create_classification_weights(self, match, groundtruth_weights):\n \"\"\"Create classification weights for each anchor.\n\n Positive (matched) anchors are associated with a weight of\n positive_class_weight and negative (unmatched) anchors are associated with\n a weight of negative_class_weight. When anchors are ignored, weights are set\n to zero. By default, both positive/negative weights are set to 1.0,\n but they can be adjusted to handle class imbalance (which is almost always\n the case in object detection).\n\n Args:\n match: a matcher.Match object that provides a matching between anchors and\n groundtruth boxes.\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box.\n\n Returns:\n a float32 tensor with shape [num_anchors] representing classification\n weights.\n \"\"\"\n return match.gather_based_on_match(\n groundtruth_weights,\n ignored_value=0.,\n unmatched_value=self._negative_class_weight)\n\n def get_box_coder(self):\n \"\"\"Get BoxCoder of this TargetAssigner.\n\n Returns:\n BoxCoder object.\n \"\"\"\n return self._box_coder\n",
"# Lint as: python2, python3\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Keypoint Head.\n\nContains Keypoint prediction head classes for different meta architectures.\nAll the keypoint prediction heads have a predict function that receives the\n`features` as the first argument and returns `keypoint_predictions`.\nKeypoints could be used to represent the human body joint locations as in\nMask RCNN paper. Or they could be used to represent different part locations of\nobjects.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\nimport tf_slim as slim\n\nfrom object_detection.predictors.heads import head\n\n\nclass MaskRCNNKeypointHead(head.Head):\n \"\"\"Mask RCNN keypoint prediction head.\n\n Please refer to Mask RCNN paper:\n https://arxiv.org/abs/1703.06870\n \"\"\"\n\n def __init__(self,\n num_keypoints=17,\n conv_hyperparams_fn=None,\n keypoint_heatmap_height=56,\n keypoint_heatmap_width=56,\n keypoint_prediction_num_conv_layers=8,\n keypoint_prediction_conv_depth=512):\n \"\"\"Constructor.\n\n Args:\n num_keypoints: (int scalar) number of keypoints.\n conv_hyperparams_fn: A function to generate tf-slim arg_scope with\n hyperparameters for convolution ops.\n keypoint_heatmap_height: Desired output mask height. The default value\n is 14.\n keypoint_heatmap_width: Desired output mask width. The default value\n is 14.\n keypoint_prediction_num_conv_layers: Number of convolution layers applied\n to the image_features in mask prediction branch.\n keypoint_prediction_conv_depth: The depth for the first conv2d_transpose\n op applied to the image_features in the mask prediction branch. If set\n to 0, the depth of the convolution layers will be automatically chosen\n based on the number of object classes and the number of channels in the\n image features.\n \"\"\"\n super(MaskRCNNKeypointHead, self).__init__()\n self._num_keypoints = num_keypoints\n self._conv_hyperparams_fn = conv_hyperparams_fn\n self._keypoint_heatmap_height = keypoint_heatmap_height\n self._keypoint_heatmap_width = keypoint_heatmap_width\n self._keypoint_prediction_num_conv_layers = (\n keypoint_prediction_num_conv_layers)\n self._keypoint_prediction_conv_depth = keypoint_prediction_conv_depth\n\n def predict(self, features, num_predictions_per_location=1):\n \"\"\"Performs keypoint prediction.\n\n Args:\n features: A float tensor of shape [batch_size, height, width,\n channels] containing features for a batch of images.\n num_predictions_per_location: Int containing number of predictions per\n location.\n\n Returns:\n instance_masks: A float tensor of shape\n [batch_size, 1, num_keypoints, heatmap_height, heatmap_width].\n\n Raises:\n ValueError: If num_predictions_per_location is not 1.\n \"\"\"\n if num_predictions_per_location != 1:\n raise ValueError('Only num_predictions_per_location=1 is supported')\n with slim.arg_scope(self._conv_hyperparams_fn()):\n net = slim.conv2d(\n features,\n self._keypoint_prediction_conv_depth, [3, 3],\n scope='conv_1')\n for i in range(1, self._keypoint_prediction_num_conv_layers):\n net = slim.conv2d(\n net,\n self._keypoint_prediction_conv_depth, [3, 3],\n scope='conv_%d' % (i + 1))\n net = slim.conv2d_transpose(\n net, self._num_keypoints, [2, 2], scope='deconv1')\n heatmaps_mask = tf.image.resize_bilinear(\n net, [self._keypoint_heatmap_height, self._keypoint_heatmap_width],\n align_corners=True,\n name='upsample')\n return tf.expand_dims(\n tf.transpose(heatmaps_mask, perm=[0, 3, 1, 2]),\n axis=1,\n name='KeypointPredictor')\n",
"# Lint as: python2, python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ssd_mobilenet_v2_nas_fpn_feature_extractor.\"\"\"\nimport unittest\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.models import ssd_feature_extractor_test\nfrom object_detection.models import ssd_mobilenet_v2_mnasfpn_feature_extractor as mnasfpn_feature_extractor\nfrom object_detection.utils import tf_version\n\n\[email protected](tf_version.is_tf2(), 'Skipping TF1.X only test.')\nclass SsdMobilenetV2MnasFPNFeatureExtractorTest(\n ssd_feature_extractor_test.SsdFeatureExtractorTestBase):\n\n def _create_feature_extractor(self,\n depth_multiplier,\n pad_to_multiple,\n use_explicit_padding=False):\n min_depth = 16\n is_training = True\n fpn_num_filters = 48\n return mnasfpn_feature_extractor.SSDMobileNetV2MnasFPNFeatureExtractor(\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n self.conv_hyperparams_fn,\n additional_layer_depth=fpn_num_filters,\n use_explicit_padding=use_explicit_padding)\n\n def test_extract_features_returns_correct_shapes_320_256(self):\n image_height = 320\n image_width = 256\n depth_multiplier = 1.0\n pad_to_multiple = 1\n expected_feature_map_shape = [(2, 40, 32, 48), (2, 20, 16, 48),\n (2, 10, 8, 48), (2, 5, 4, 48)]\n self.check_extract_features_returns_correct_shape(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_explicit_padding=False)\n self.check_extract_features_returns_correct_shape(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_explicit_padding=True)\n\n def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):\n image_height = 256\n image_width = 256\n depth_multiplier = 0.5**12\n pad_to_multiple = 1\n expected_feature_map_shape = [(2, 32, 32, 16), (2, 16, 16, 16),\n (2, 8, 8, 16), (2, 4, 4, 16)]\n self.check_extract_features_returns_correct_shape(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_explicit_padding=False)\n self.check_extract_features_returns_correct_shape(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_explicit_padding=True)\n\n def test_preprocess_returns_correct_value_range(self):\n image_height = 320\n image_width = 320\n depth_multiplier = 1\n pad_to_multiple = 1\n test_image = np.random.rand(2, image_height, image_width, 3)\n feature_extractor = self._create_feature_extractor(depth_multiplier,\n pad_to_multiple)\n preprocessed_image = feature_extractor.preprocess(test_image)\n self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for meta_architectures.lstm_ssd_meta_arch.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tf_slim as slim\n\nfrom lstm_object_detection.lstm import lstm_cells\nfrom lstm_object_detection.meta_architectures import lstm_ssd_meta_arch\nfrom object_detection.core import anchor_generator\nfrom object_detection.core import box_list\nfrom object_detection.core import losses\nfrom object_detection.core import post_processing\nfrom object_detection.core import region_similarity_calculator as sim_calc\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.core import target_assigner\nfrom object_detection.models import feature_map_generators\nfrom object_detection.utils import test_case\nfrom object_detection.utils import test_utils\n\n\nMAX_TOTAL_NUM_BOXES = 5\nNUM_CLASSES = 1\n\n\nclass FakeLSTMFeatureExtractor(\n lstm_ssd_meta_arch.LSTMSSDFeatureExtractor):\n\n def __init__(self):\n super(FakeLSTMFeatureExtractor, self).__init__(\n is_training=True,\n depth_multiplier=1.0,\n min_depth=0,\n pad_to_multiple=1,\n conv_hyperparams_fn=self.scope_fn)\n self._lstm_state_depth = 256\n\n def scope_fn(self):\n with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu6) as sc:\n return sc\n\n def create_lstm_cell(self):\n pass\n\n def extract_features(self, preprocessed_inputs, state_saver=None,\n state_name='lstm_state', unroll_length=5, scope=None):\n with tf.variable_scope('mock_model'):\n net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,\n kernel_size=1, scope='layer1')\n image_features = {'last_layer': net}\n\n self._states_out = {}\n feature_map_layout = {\n 'from_layer': ['last_layer'],\n 'layer_depth': [-1],\n 'use_explicit_padding': self._use_explicit_padding,\n 'use_depthwise': self._use_depthwise,\n }\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=(self._depth_multiplier),\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features)\n return list(feature_maps.values())\n\n\nclass FakeLSTMInterleavedFeatureExtractor(\n lstm_ssd_meta_arch.LSTMSSDInterleavedFeatureExtractor):\n\n def __init__(self):\n super(FakeLSTMInterleavedFeatureExtractor, self).__init__(\n is_training=True,\n depth_multiplier=1.0,\n min_depth=0,\n pad_to_multiple=1,\n conv_hyperparams_fn=self.scope_fn)\n self._lstm_state_depth = 256\n\n def scope_fn(self):\n with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu6) as sc:\n return sc\n\n def create_lstm_cell(self):\n pass\n\n def extract_base_features_large(self, preprocessed_inputs):\n with tf.variable_scope('base_large'):\n net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,\n kernel_size=1, scope='layer1')\n return net\n\n def extract_base_features_small(self, preprocessed_inputs):\n with tf.variable_scope('base_small'):\n net = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,\n kernel_size=1, scope='layer1')\n return net\n\n def extract_features(self, preprocessed_inputs, state_saver=None,\n state_name='lstm_state', unroll_length=5, scope=None):\n with tf.variable_scope('mock_model'):\n net_large = self.extract_base_features_large(preprocessed_inputs)\n net_small = self.extract_base_features_small(preprocessed_inputs)\n net = slim.conv2d(\n inputs=tf.concat([net_large, net_small], axis=3),\n num_outputs=32,\n kernel_size=1,\n scope='layer1')\n image_features = {'last_layer': net}\n\n self._states_out = {}\n feature_map_layout = {\n 'from_layer': ['last_layer'],\n 'layer_depth': [-1],\n 'use_explicit_padding': self._use_explicit_padding,\n 'use_depthwise': self._use_depthwise,\n }\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=(self._depth_multiplier),\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features)\n return list(feature_maps.values())\n\n\nclass MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):\n \"\"\"Sets up a simple 2x2 anchor grid on the unit square.\"\"\"\n\n def name_scope(self):\n return 'MockAnchorGenerator'\n\n def num_anchors_per_location(self):\n return [1]\n\n def _generate(self, feature_map_shape_list, im_height, im_width):\n return [box_list.BoxList(\n tf.constant([[0, 0, .5, .5],\n [0, .5, .5, 1],\n [.5, 0, 1, .5],\n [1., 1., 1.5, 1.5] # Anchor that is outside clip_window.\n ], tf.float32))]\n\n def num_anchors(self):\n return 4\n\n\nclass LSTMSSDMetaArchTest(test_case.TestCase):\n\n def _create_model(self,\n interleaved=False,\n apply_hard_mining=True,\n normalize_loc_loss_by_codesize=False,\n add_background_class=True,\n random_example_sampling=False,\n use_expected_classification_loss_under_sampling=False,\n min_num_negative_samples=1,\n desired_negative_sampling_ratio=3,\n unroll_length=1):\n num_classes = NUM_CLASSES\n is_training = False\n mock_anchor_generator = MockAnchorGenerator2x2()\n mock_box_predictor = test_utils.MockBoxPredictor(is_training, num_classes)\n mock_box_coder = test_utils.MockBoxCoder()\n if interleaved:\n fake_feature_extractor = FakeLSTMInterleavedFeatureExtractor()\n else:\n fake_feature_extractor = FakeLSTMFeatureExtractor()\n mock_matcher = test_utils.MockMatcher()\n region_similarity_calculator = sim_calc.IouSimilarity()\n encode_background_as_zeros = False\n def image_resizer_fn(image):\n return [tf.identity(image), tf.shape(image)]\n\n classification_loss = losses.WeightedSigmoidClassificationLoss()\n localization_loss = losses.WeightedSmoothL1LocalizationLoss()\n non_max_suppression_fn = functools.partial(\n post_processing.batch_multiclass_non_max_suppression,\n score_thresh=-20.0,\n iou_thresh=1.0,\n max_size_per_class=5,\n max_total_size=MAX_TOTAL_NUM_BOXES)\n classification_loss_weight = 1.0\n localization_loss_weight = 1.0\n negative_class_weight = 1.0\n normalize_loss_by_num_matches = False\n\n hard_example_miner = None\n if apply_hard_mining:\n # This hard example miner is expected to be a no-op.\n hard_example_miner = losses.HardExampleMiner(\n num_hard_examples=None,\n iou_threshold=1.0)\n\n target_assigner_instance = target_assigner.TargetAssigner(\n region_similarity_calculator,\n mock_matcher,\n mock_box_coder,\n negative_class_weight=negative_class_weight)\n\n code_size = 4\n model = lstm_ssd_meta_arch.LSTMSSDMetaArch(\n is_training=is_training,\n anchor_generator=mock_anchor_generator,\n box_predictor=mock_box_predictor,\n box_coder=mock_box_coder,\n feature_extractor=fake_feature_extractor,\n encode_background_as_zeros=encode_background_as_zeros,\n image_resizer_fn=image_resizer_fn,\n non_max_suppression_fn=non_max_suppression_fn,\n score_conversion_fn=tf.identity,\n classification_loss=classification_loss,\n localization_loss=localization_loss,\n classification_loss_weight=classification_loss_weight,\n localization_loss_weight=localization_loss_weight,\n normalize_loss_by_num_matches=normalize_loss_by_num_matches,\n hard_example_miner=hard_example_miner,\n unroll_length=unroll_length,\n target_assigner_instance=target_assigner_instance,\n add_summaries=False)\n return model, num_classes, mock_anchor_generator.num_anchors(), code_size\n\n def _get_value_for_matching_key(self, dictionary, suffix):\n for key in dictionary.keys():\n if key.endswith(suffix):\n return dictionary[key]\n raise ValueError('key not found {}'.format(suffix))\n\n def test_predict_returns_correct_items_and_sizes(self):\n batch_size = 3\n height = width = 2\n num_unroll = 1\n\n graph = tf.Graph()\n with graph.as_default():\n model, num_classes, num_anchors, code_size = self._create_model()\n preprocessed_images = tf.random_uniform(\n [batch_size * num_unroll, height, width, 3],\n minval=-1.,\n maxval=1.)\n true_image_shapes = tf.tile(\n [[height, width, 3]], [batch_size, 1])\n prediction_dict = model.predict(preprocessed_images, true_image_shapes)\n\n\n self.assertIn('preprocessed_inputs', prediction_dict)\n self.assertIn('box_encodings', prediction_dict)\n self.assertIn('class_predictions_with_background', prediction_dict)\n self.assertIn('feature_maps', prediction_dict)\n self.assertIn('anchors', prediction_dict)\n self.assertAllEqual(\n [batch_size * num_unroll, height, width, 3],\n prediction_dict['preprocessed_inputs'].shape.as_list())\n self.assertAllEqual(\n [batch_size * num_unroll, num_anchors, code_size],\n prediction_dict['box_encodings'].shape.as_list())\n self.assertAllEqual(\n [batch_size * num_unroll, num_anchors, num_classes + 1],\n prediction_dict['class_predictions_with_background'].shape.as_list())\n self.assertAllEqual(\n [num_anchors, code_size],\n prediction_dict['anchors'].shape.as_list())\n\n def test_interleaved_predict_returns_correct_items_and_sizes(self):\n batch_size = 3\n height = width = 2\n num_unroll = 1\n\n graph = tf.Graph()\n with graph.as_default():\n model, num_classes, num_anchors, code_size = self._create_model(\n interleaved=True)\n preprocessed_images = tf.random_uniform(\n [batch_size * num_unroll, height, width, 3],\n minval=-1.,\n maxval=1.)\n true_image_shapes = tf.tile(\n [[height, width, 3]], [batch_size, 1])\n prediction_dict = model.predict(preprocessed_images, true_image_shapes)\n\n self.assertIn('preprocessed_inputs', prediction_dict)\n self.assertIn('box_encodings', prediction_dict)\n self.assertIn('class_predictions_with_background', prediction_dict)\n self.assertIn('feature_maps', prediction_dict)\n self.assertIn('anchors', prediction_dict)\n self.assertAllEqual(\n [batch_size * num_unroll, height, width, 3],\n prediction_dict['preprocessed_inputs'].shape.as_list())\n self.assertAllEqual(\n [batch_size * num_unroll, num_anchors, code_size],\n prediction_dict['box_encodings'].shape.as_list())\n self.assertAllEqual(\n [batch_size * num_unroll, num_anchors, num_classes + 1],\n prediction_dict['class_predictions_with_background'].shape.as_list())\n self.assertAllEqual(\n [num_anchors, code_size],\n prediction_dict['anchors'].shape.as_list())\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Box matcher.\"\"\"\n\n# Import libraries\nimport tensorflow as tf\n\nfrom official.vision.beta.ops import box_ops\n\n\[email protected]_keras_serializable(package='Vision')\nclass BoxMatcher(tf.keras.layers.Layer):\n \"\"\"Match boxes with groundtruth boxes.\"\"\"\n\n def __init__(self,\n foreground_iou_threshold=0.5,\n background_iou_high_threshold=0.5,\n background_iou_low_threshold=0,\n **kwargs):\n \"\"\"Initializes a box matcher.\n\n Args:\n foreground_iou_threshold: float, represent the IoU threshold for a box to\n be considered as positive (if >= `foreground_iou_threshold`).\n background_iou_high_threshold: float, represent the IoU threshold for a\n box to be considered as negative (if overlap in\n [`background_iou_low_threshold`, `background_iou_high_threshold`]).\n background_iou_low_threshold: float, represent the IoU threshold for a box\n to be considered as negative (if overlap in\n [`background_iou_low_threshold`, `background_iou_high_threshold`])\n **kwargs: other key word arguments passed to Layer.\n \"\"\"\n self._config_dict = {\n 'foreground_iou_threshold': foreground_iou_threshold,\n 'background_iou_high_threshold': background_iou_high_threshold,\n 'background_iou_low_threshold': background_iou_low_threshold,\n }\n super(BoxMatcher, self).__init__(**kwargs)\n\n def call(self, boxes, gt_boxes, gt_classes):\n \"\"\"Match boxes to groundtruth boxes.\n\n Given the proposal boxes and the groundtruth boxes and classes, perform the\n groundtruth matching by taking the argmax of the IoU between boxes and\n groundtruth boxes.\n\n Args:\n boxes: a tensor of shape of [batch_size, N, 4] representing the box\n coordianates to be matched to groundtruth boxes.\n gt_boxes: a tensor of shape of [batch_size, MAX_INSTANCES, 4] representing\n the groundtruth box coordinates. It is padded with -1s to indicate the\n invalid boxes.\n gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box\n classes. It is padded with -1s to indicate the invalid classes.\n\n Returns:\n matched_gt_boxes: a tensor of shape of [batch, N, 4], representing\n the matched groundtruth box coordinates for each input box. The box is\n considered to match to a groundtruth box only if the IoU overlap is\n greater than `foreground_iou_threshold`. If the box is a negative match,\n or does not overlap with any groundtruth boxes, the matched boxes will\n be set to all 0s.\n matched_gt_classes: a tensor of shape of [batch, N], representing\n the matched groundtruth classes for each input box. If the box is a\n negative match or does not overlap with any groundtruth boxes, the\n matched classes of it will be set to 0, which corresponds to the\n background class.\n matched_gt_indices: a tensor of shape of [batch, N], representing the\n indices of the matched groundtruth boxes in the original gt_boxes\n tensor. If the box is a negative match or does not overlap with any\n groundtruth boxes, the index of the matched groundtruth will be set to\n -1.\n positive_matches: a bool tensor of shape of [batch, N], representing\n whether each box is a positive matches or not. A positive match is the\n case where IoU of a box with any groundtruth box is greater than\n `foreground_iou_threshold`.\n negative_matches: a bool tensor of shape of [batch, N], representing\n whether each box is a negative matches or not. A negative match is the\n case where IoU of a box with any groundtruth box is greater than\n `background_iou_low_threshold` and less than\n `background_iou_low_threshold`.\n ignored_matches: a bool tensor of shape of [batch, N], representing\n whether each box is an ignored matches or not. An ignored matches is the\n match that is neither positive or negative.\n \"\"\"\n matched_gt_boxes, matched_gt_classes, matched_gt_indices, matched_iou, _ = (\n box_ops.box_matching(boxes, gt_boxes, gt_classes))\n\n positive_matches = tf.greater(\n matched_iou, self._config_dict['foreground_iou_threshold'])\n negative_matches = tf.logical_and(\n tf.greater_equal(\n matched_iou, self._config_dict['background_iou_low_threshold']),\n tf.less(\n matched_iou, self._config_dict['background_iou_high_threshold']))\n ignored_matches = tf.logical_and(\n tf.less(matched_iou, 0.0),\n tf.greater_equal(\n matched_iou, self._config_dict['background_iou_high_threshold']))\n ignored_matches = tf.logical_and(\n ignored_matches,\n tf.less(\n matched_iou, self._config_dict['foreground_iou_threshold']))\n\n background_indicator = tf.logical_or(negative_matches, ignored_matches)\n\n # re-assign negatively matched boxes to the background class.\n matched_gt_boxes = tf.where(\n tf.tile(tf.expand_dims(background_indicator, -1), [1, 1, 4]),\n tf.zeros_like(matched_gt_boxes),\n matched_gt_boxes)\n matched_gt_classes = tf.where(\n background_indicator,\n tf.zeros_like(matched_gt_classes),\n matched_gt_classes)\n matched_gt_indices = tf.where(\n background_indicator,\n -tf.ones_like(matched_gt_indices),\n matched_gt_indices)\n\n return (matched_gt_boxes, matched_gt_classes, matched_gt_indices,\n positive_matches, negative_matches, ignored_matches)\n\n def get_config(self):\n return self._config_dict\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classification decoder and parser.\"\"\"\n# Import libraries\nimport tensorflow as tf\n\nfrom official.vision.beta.dataloaders import decoder\nfrom official.vision.beta.dataloaders import parser\nfrom official.vision.beta.ops import preprocess_ops\n\nMEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)\nSTDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)\n\n\nclass Decoder(decoder.Decoder):\n \"\"\"A tf.Example decoder for classification task.\"\"\"\n\n def __init__(self):\n self._keys_to_features = {\n 'image/encoded': tf.io.FixedLenFeature((), tf.string, default_value=''),\n 'image/class/label': (\n tf.io.FixedLenFeature((), tf.int64, default_value=-1))\n }\n\n def decode(self, serialized_example):\n return tf.io.parse_single_example(\n serialized_example, self._keys_to_features)\n\n\nclass Parser(parser.Parser):\n \"\"\"Parser to parse an image and its annotations into a dictionary of tensors.\"\"\"\n\n def __init__(self,\n output_size,\n num_classes,\n aug_rand_hflip=True,\n dtype='float32'):\n \"\"\"Initializes parameters for parsing annotations in the dataset.\n\n Args:\n output_size: `Tenssor` or `list` for [height, width] of output image. The\n output_size should be divided by the largest feature stride 2^max_level.\n num_classes: `float`, number of classes.\n aug_rand_hflip: `bool`, if True, augment training with random\n horizontal flip.\n dtype: `str`, cast output image in dtype. It can be 'float32', 'float16',\n or 'bfloat16'.\n \"\"\"\n self._output_size = output_size\n self._aug_rand_hflip = aug_rand_hflip\n self._num_classes = num_classes\n if dtype == 'float32':\n self._dtype = tf.float32\n elif dtype == 'float16':\n self._dtype = tf.float16\n elif dtype == 'bfloat16':\n self._dtype = tf.bfloat16\n else:\n raise ValueError('dtype {!r} is not supported!'.format(dtype))\n\n def _parse_train_data(self, decoded_tensors):\n \"\"\"Parses data for training.\"\"\"\n label = tf.cast(decoded_tensors['image/class/label'], dtype=tf.int32)\n\n image_bytes = decoded_tensors['image/encoded']\n image_shape = tf.image.extract_jpeg_shape(image_bytes)\n\n # Crops image.\n # TODO(pengchong): support image format other than JPEG.\n cropped_image = preprocess_ops.random_crop_image_v2(\n image_bytes, image_shape)\n image = tf.cond(\n tf.reduce_all(tf.equal(tf.shape(cropped_image), image_shape)),\n lambda: preprocess_ops.center_crop_image_v2(image_bytes, image_shape),\n lambda: cropped_image)\n\n if self._aug_rand_hflip:\n image = tf.image.random_flip_left_right(image)\n\n # Resizes image.\n image = tf.image.resize(\n image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)\n\n # Normalizes image with mean and std pixel values.\n image = preprocess_ops.normalize_image(image,\n offset=MEAN_RGB,\n scale=STDDEV_RGB)\n\n # Convert image to self._dtype.\n image = tf.image.convert_image_dtype(image, self._dtype)\n\n return image, label\n\n def _parse_eval_data(self, decoded_tensors):\n \"\"\"Parses data for evaluation.\"\"\"\n label = tf.cast(decoded_tensors['image/class/label'], dtype=tf.int32)\n image_bytes = decoded_tensors['image/encoded']\n image_shape = tf.image.extract_jpeg_shape(image_bytes)\n\n # Center crops and resizes image.\n image = preprocess_ops.center_crop_image_v2(image_bytes, image_shape)\n\n image = tf.image.resize(\n image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)\n\n image = tf.reshape(image, [self._output_size[0], self._output_size[1], 3])\n\n # Normalizes image with mean and std pixel values.\n image = preprocess_ops.normalize_image(image,\n offset=MEAN_RGB,\n scale=STDDEV_RGB)\n\n # Convert image to self._dtype.\n image = tf.image.convert_image_dtype(image, self._dtype)\n\n return image, label\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.utils.object_detection_evaluation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\nfrom absl.testing import parameterized\nimport numpy as np\nimport six\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\nfrom object_detection import eval_util\nfrom object_detection.core import standard_fields\nfrom object_detection.utils import object_detection_evaluation\nfrom object_detection.utils import tf_version\n\n\nclass OpenImagesV2EvaluationTest(tf.test.TestCase):\n\n def test_returns_correct_metric_values(self):\n categories = [{\n 'id': 1,\n 'name': 'cat'\n }, {\n 'id': 2,\n 'name': 'dog'\n }, {\n 'id': 3,\n 'name': 'elephant'\n }]\n\n oiv2_evaluator = object_detection_evaluation.OpenImagesDetectionEvaluator(\n categories)\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array(\n [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n oiv2_evaluator.add_single_ground_truth_image_info(image_key1, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1,\n standard_fields.InputDataFields.groundtruth_group_of:\n np.array([], dtype=bool)\n })\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array(\n [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)\n groundtruth_is_group_of_list2 = np.array([False, True, False], dtype=bool)\n oiv2_evaluator.add_single_ground_truth_image_info(image_key2, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes2,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels2,\n standard_fields.InputDataFields.groundtruth_group_of:\n groundtruth_is_group_of_list2\n })\n image_key3 = 'img3'\n groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels3 = np.array([2], dtype=int)\n oiv2_evaluator.add_single_ground_truth_image_info(image_key3, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes3,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels3\n })\n # Add detections\n image_key = 'img2'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],\n dtype=float)\n detected_class_labels = np.array([1, 1, 3], dtype=int)\n detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)\n oiv2_evaluator.add_single_detected_image_info(image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels\n })\n metrics = oiv2_evaluator.evaluate()\n self.assertAlmostEqual(\n metrics['OpenImagesV2_PerformanceByCategory/[email protected]/dog'], 0.0)\n self.assertAlmostEqual(\n metrics['OpenImagesV2_PerformanceByCategory/[email protected]/elephant'], 0.0)\n self.assertAlmostEqual(\n metrics['OpenImagesV2_PerformanceByCategory/[email protected]/cat'], 0.16666666)\n self.assertAlmostEqual(metrics['OpenImagesV2_Precision/[email protected]'],\n 0.05555555)\n oiv2_evaluator.clear()\n self.assertFalse(oiv2_evaluator._image_ids)\n\n\nclass OpenImagesChallengeEvaluatorTest(tf.test.TestCase):\n\n def test_returns_correct_detection_metric_values(self):\n categories = [{\n 'id': 1,\n 'name': 'cat'\n }, {\n 'id': 2,\n 'name': 'dog'\n }, {\n 'id': 3,\n 'name': 'elephant'\n }]\n oivchallenge_evaluator = (\n object_detection_evaluation.OpenImagesChallengeEvaluator(\n categories, evaluate_masks=False, group_of_weight=0.5))\n\n image_key = 'img1'\n groundtruth_boxes = np.array(\n [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)\n groundtruth_class_labels = np.array([1, 3, 1], dtype=int)\n groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)\n groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)\n oivchallenge_evaluator.add_single_ground_truth_image_info(\n image_key, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels,\n standard_fields.InputDataFields.groundtruth_group_of:\n groundtruth_is_group_of_list,\n standard_fields.InputDataFields.groundtruth_image_classes:\n groundtruth_verified_labels,\n })\n image_key = 'img2'\n groundtruth_boxes = np.array(\n [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels = np.array([1, 1, 3], dtype=int)\n groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)\n oivchallenge_evaluator.add_single_ground_truth_image_info(\n image_key, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels,\n standard_fields.InputDataFields.groundtruth_group_of:\n groundtruth_is_group_of_list\n })\n image_key = 'img3'\n groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels = np.array([2], dtype=int)\n oivchallenge_evaluator.add_single_ground_truth_image_info(\n image_key, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels\n })\n image_key = 'img1'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120]], dtype=float)\n detected_class_labels = np.array([2, 2], dtype=int)\n detected_scores = np.array([0.7, 0.8], dtype=float)\n oivchallenge_evaluator.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels\n })\n image_key = 'img2'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220],\n [10, 10, 11, 11]],\n dtype=float)\n detected_class_labels = np.array([1, 1, 2, 3], dtype=int)\n detected_scores = np.array([0.7, 0.8, 0.5, 0.9], dtype=float)\n oivchallenge_evaluator.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels\n })\n image_key = 'img3'\n detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)\n detected_class_labels = np.array([2], dtype=int)\n detected_scores = np.array([0.5], dtype=float)\n oivchallenge_evaluator.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels\n })\n metrics = oivchallenge_evaluator.evaluate()\n expected_metric_name = 'OpenImagesDetectionChallenge'\n\n self.assertAlmostEqual(\n metrics[\n expected_metric_name + '_PerformanceByCategory/[email protected]/dog'],\n 0.3333333333)\n self.assertAlmostEqual(\n metrics[\n expected_metric_name + '_PerformanceByCategory/[email protected]/elephant'],\n 0.333333333333)\n self.assertAlmostEqual(\n metrics[\n expected_metric_name + '_PerformanceByCategory/[email protected]/cat'],\n 0.142857142857)\n self.assertAlmostEqual(\n metrics[expected_metric_name + '_Precision/[email protected]'],\n 0.269841269)\n\n oivchallenge_evaluator.clear()\n self.assertFalse(oivchallenge_evaluator._image_ids)\n\n def test_returns_correct_instance_segm_metric_values(self):\n categories = [{'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}]\n oivchallenge_evaluator = (\n object_detection_evaluation.OpenImagesChallengeEvaluator(\n categories, evaluate_masks=True))\n\n image_key = 'img1'\n groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels = np.array([1, 2, 1], dtype=int)\n groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)\n groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)\n groundtruth_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n dtype=np.uint8)\n zero_mask = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n dtype=np.uint8)\n groundtruth_masks = np.stack([groundtruth_mask_0, zero_mask, zero_mask],\n axis=0)\n oivchallenge_evaluator.add_single_ground_truth_image_info(\n image_key, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels,\n standard_fields.InputDataFields.groundtruth_group_of:\n groundtruth_is_group_of_list,\n standard_fields.InputDataFields.groundtruth_image_classes:\n groundtruth_verified_labels,\n standard_fields.InputDataFields.groundtruth_instance_masks:\n groundtruth_masks\n })\n image_key = 'img3'\n groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels = np.array([2], dtype=int)\n groundtruth_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n dtype=np.uint8)\n groundtruth_masks = np.stack([groundtruth_mask_0], axis=0)\n oivchallenge_evaluator.add_single_ground_truth_image_info(\n image_key, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels,\n standard_fields.InputDataFields.groundtruth_instance_masks:\n groundtruth_masks\n })\n image_key = 'img1'\n detected_boxes = np.array([[0, 0, 2, 2], [2, 2, 3, 3]], dtype=float)\n detection_mask_0 = np.array([[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]],\n dtype=np.uint8)\n detected_masks = np.stack([detection_mask_0, zero_mask], axis=0)\n detected_class_labels = np.array([2, 1], dtype=int)\n detected_scores = np.array([0.7, 0.8], dtype=float)\n oivchallenge_evaluator.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels,\n standard_fields.DetectionResultFields.detection_masks:\n detected_masks\n })\n image_key = 'img3'\n detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)\n detected_class_labels = np.array([2], dtype=int)\n detected_scores = np.array([0.5], dtype=float)\n detected_mask_0 = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n dtype=np.uint8)\n detected_masks = np.stack([detected_mask_0], axis=0)\n oivchallenge_evaluator.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels,\n standard_fields.DetectionResultFields.detection_masks:\n detected_masks\n })\n metrics = oivchallenge_evaluator.evaluate()\n expected_metric_name = 'OpenImagesInstanceSegmentationChallenge'\n\n self.assertAlmostEqual(\n metrics[expected_metric_name + '_PerformanceByCategory/[email protected]/dog'],\n 1.0)\n self.assertAlmostEqual(\n metrics[\n expected_metric_name + '_PerformanceByCategory/[email protected]/cat'],\n 0)\n self.assertAlmostEqual(\n metrics[expected_metric_name + '_Precision/[email protected]'], 0.5)\n\n oivchallenge_evaluator.clear()\n self.assertFalse(oivchallenge_evaluator._image_ids)\n\n\nclass PascalEvaluationTest(tf.test.TestCase):\n\n def test_returns_correct_metric_values_on_boxes(self):\n categories = [{'id': 1, 'name': 'cat'},\n {'id': 2, 'name': 'dog'},\n {'id': 3, 'name': 'elephant'}]\n # Add groundtruth\n pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(\n categories)\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key1,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1,\n standard_fields.InputDataFields.groundtruth_difficult:\n np.array([], dtype=bool)})\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],\n [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)\n groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key2,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels2,\n standard_fields.InputDataFields.groundtruth_difficult:\n groundtruth_is_difficult_list2})\n image_key3 = 'img3'\n groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels3 = np.array([2], dtype=int)\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key3,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels3})\n\n # Add detections\n image_key = 'img2'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],\n dtype=float)\n detected_class_labels = np.array([1, 1, 3], dtype=int)\n detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)\n pascal_evaluator.add_single_detected_image_info(\n image_key,\n {standard_fields.DetectionResultFields.detection_boxes: detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels})\n\n metrics = pascal_evaluator.evaluate()\n self.assertAlmostEqual(\n metrics['PascalBoxes_PerformanceByCategory/[email protected]/dog'], 0.0)\n self.assertAlmostEqual(\n metrics['PascalBoxes_PerformanceByCategory/[email protected]/elephant'], 0.0)\n self.assertAlmostEqual(\n metrics['PascalBoxes_PerformanceByCategory/[email protected]/cat'], 0.16666666)\n self.assertAlmostEqual(metrics['PascalBoxes_Precision/[email protected]'],\n 0.05555555)\n pascal_evaluator.clear()\n self.assertFalse(pascal_evaluator._image_ids)\n\n def test_returns_correct_metric_values_on_masks(self):\n categories = [{'id': 1, 'name': 'cat'},\n {'id': 2, 'name': 'dog'},\n {'id': 3, 'name': 'elephant'}]\n # Add groundtruth\n pascal_evaluator = (\n object_detection_evaluation.PascalInstanceSegmentationEvaluator(\n categories))\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n groundtruth_masks_1_0 = np.array([[1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0]], dtype=np.uint8)\n groundtruth_masks_1_1 = np.array([[0, 0, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 1, 0]], dtype=np.uint8)\n groundtruth_masks_1_2 = np.array([[0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0]], dtype=np.uint8)\n groundtruth_masks1 = np.stack(\n [groundtruth_masks_1_0, groundtruth_masks_1_1, groundtruth_masks_1_2],\n axis=0)\n\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key1, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_instance_masks:\n groundtruth_masks1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1,\n standard_fields.InputDataFields.groundtruth_difficult:\n np.array([], dtype=bool)\n })\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],\n [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)\n groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)\n groundtruth_masks_2_0 = np.array([[1, 1, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=np.uint8)\n groundtruth_masks_2_1 = np.array([[0, 0, 0, 0],\n [1, 1, 1, 1],\n [0, 0, 0, 0]], dtype=np.uint8)\n groundtruth_masks_2_2 = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1]], dtype=np.uint8)\n groundtruth_masks2 = np.stack(\n [groundtruth_masks_2_0, groundtruth_masks_2_1, groundtruth_masks_2_2],\n axis=0)\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key2, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes2,\n standard_fields.InputDataFields.groundtruth_instance_masks:\n groundtruth_masks2,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels2,\n standard_fields.InputDataFields.groundtruth_difficult:\n groundtruth_is_difficult_list2\n })\n image_key3 = 'img3'\n groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels3 = np.array([2], dtype=int)\n groundtruth_masks_3_0 = np.array([[1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 1]], dtype=np.uint8)\n groundtruth_masks3 = np.stack([groundtruth_masks_3_0], axis=0)\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key3, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes3,\n standard_fields.InputDataFields.groundtruth_instance_masks:\n groundtruth_masks3,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels3\n })\n\n # Add detections\n image_key = 'img2'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],\n dtype=float)\n detected_class_labels = np.array([1, 1, 3], dtype=int)\n detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)\n detected_masks_0 = np.array([[1, 1, 1, 1],\n [0, 0, 1, 0],\n [0, 0, 0, 0]], dtype=np.uint8)\n detected_masks_1 = np.array([[1, 0, 0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0]], dtype=np.uint8)\n detected_masks_2 = np.array([[0, 1, 0, 0],\n [0, 1, 1, 0],\n [0, 1, 0, 0]], dtype=np.uint8)\n detected_masks = np.stack(\n [detected_masks_0, detected_masks_1, detected_masks_2], axis=0)\n\n pascal_evaluator.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_masks:\n detected_masks,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels\n })\n\n metrics = pascal_evaluator.evaluate()\n\n self.assertAlmostEqual(\n metrics['PascalMasks_PerformanceByCategory/[email protected]/dog'], 0.0)\n self.assertAlmostEqual(\n metrics['PascalMasks_PerformanceByCategory/[email protected]/elephant'], 0.0)\n self.assertAlmostEqual(\n metrics['PascalMasks_PerformanceByCategory/[email protected]/cat'], 0.16666666)\n self.assertAlmostEqual(metrics['PascalMasks_Precision/[email protected]'],\n 0.05555555)\n pascal_evaluator.clear()\n self.assertFalse(pascal_evaluator._image_ids)\n\n def test_value_error_on_duplicate_images(self):\n categories = [{'id': 1, 'name': 'cat'},\n {'id': 2, 'name': 'dog'},\n {'id': 3, 'name': 'elephant'}]\n # Add groundtruth\n pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(\n categories)\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key1,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1})\n with self.assertRaises(ValueError):\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key1,\n {standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1})\n\n\nclass WeightedPascalEvaluationTest(tf.test.TestCase):\n\n def setUp(self):\n self.categories = [{'id': 1, 'name': 'cat'},\n {'id': 2, 'name': 'dog'},\n {'id': 3, 'name': 'elephant'}]\n\n def create_and_add_common_ground_truth(self):\n # Add groundtruth\n self.wp_eval = (\n object_detection_evaluation.WeightedPascalDetectionEvaluator(\n self.categories))\n\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key1,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1})\n # add 'img2' separately\n image_key3 = 'img3'\n groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels3 = np.array([2], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key3,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes3,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels3})\n\n def add_common_detected(self):\n image_key = 'img2'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],\n dtype=float)\n detected_class_labels = np.array([1, 1, 3], dtype=int)\n detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)\n self.wp_eval.add_single_detected_image_info(\n image_key,\n {standard_fields.DetectionResultFields.detection_boxes: detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels})\n\n def test_returns_correct_metric_values(self):\n self.create_and_add_common_ground_truth()\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],\n [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key2,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels2\n })\n self.add_common_detected()\n\n metrics = self.wp_eval.evaluate()\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/dog'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/elephant'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/cat'], 0.5 / 4)\n self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix +\n 'Precision/[email protected]'],\n 1. / (4 + 1 + 2) / 3)\n self.wp_eval.clear()\n self.assertFalse(self.wp_eval._image_ids)\n\n def test_returns_correct_metric_values_with_difficult_list(self):\n self.create_and_add_common_ground_truth()\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],\n [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)\n groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key2,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes2,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels2,\n standard_fields.InputDataFields.groundtruth_difficult:\n groundtruth_is_difficult_list2\n })\n self.add_common_detected()\n\n metrics = self.wp_eval.evaluate()\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/dog'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/elephant'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/cat'], 0.5 / 3)\n self.assertAlmostEqual(metrics[self.wp_eval._metric_prefix +\n 'Precision/[email protected]'],\n 1. / (3 + 1 + 2) / 3)\n self.wp_eval.clear()\n self.assertFalse(self.wp_eval._image_ids)\n\n def test_value_error_on_duplicate_images(self):\n # Add groundtruth\n self.wp_eval = (\n object_detection_evaluation.WeightedPascalDetectionEvaluator(\n self.categories))\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key1,\n {standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1})\n with self.assertRaises(ValueError):\n self.wp_eval.add_single_ground_truth_image_info(\n image_key1,\n {standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1})\n\n\nclass PrecisionAtRecallEvaluationTest(tf.test.TestCase):\n\n def setUp(self):\n self.categories = [{\n 'id': 1,\n 'name': 'cat'\n }, {\n 'id': 2,\n 'name': 'dog'\n }, {\n 'id': 3,\n 'name': 'elephant'\n }]\n\n def create_and_add_common_ground_truth(self):\n # Add groundtruth\n self.wp_eval = (\n object_detection_evaluation.PrecisionAtRecallDetectionEvaluator(\n self.categories, recall_lower_bound=0.0, recall_upper_bound=0.5))\n\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key1, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1\n })\n # add 'img2' separately\n image_key3 = 'img3'\n groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels3 = np.array([2], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key3, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes3,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels3\n })\n\n def add_common_detected(self):\n image_key = 'img2'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],\n dtype=float)\n detected_class_labels = np.array([1, 1, 3], dtype=int)\n detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)\n self.wp_eval.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n detected_boxes,\n standard_fields.DetectionResultFields.detection_scores:\n detected_scores,\n standard_fields.DetectionResultFields.detection_classes:\n detected_class_labels\n })\n\n def test_returns_correct_metric_values(self):\n self.create_and_add_common_ground_truth()\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array(\n [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key2, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes2,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels2\n })\n self.add_common_detected()\n\n metrics = self.wp_eval.evaluate()\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/dog'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/elephant'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/cat'], 0.5 / 4)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'Precision/[email protected]@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 4)\n self.wp_eval.clear()\n self.assertFalse(self.wp_eval._image_ids)\n\n def test_returns_correct_metric_values_with_difficult_list(self):\n self.create_and_add_common_ground_truth()\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array(\n [[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)\n groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key2, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes2,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels2,\n standard_fields.InputDataFields.groundtruth_difficult:\n groundtruth_is_difficult_list2\n })\n self.add_common_detected()\n\n metrics = self.wp_eval.evaluate()\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/dog'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/elephant'], 0.0)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'PerformanceByCategory/[email protected]/cat'], 0.5 / 3)\n self.assertAlmostEqual(\n metrics[self.wp_eval._metric_prefix +\n 'Precision/[email protected]@[0.0,0.5]Recall'], 1. / (3 + 1 + 2) / 3)\n self.wp_eval.clear()\n self.assertFalse(self.wp_eval._image_ids)\n\n def test_value_error_on_duplicate_images(self):\n # Add groundtruth\n self.wp_eval = (\n object_detection_evaluation.PrecisionAtRecallDetectionEvaluator(\n self.categories, recall_lower_bound=0.0, recall_upper_bound=0.5))\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)\n self.wp_eval.add_single_ground_truth_image_info(\n image_key1, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1\n })\n with self.assertRaises(ValueError):\n self.wp_eval.add_single_ground_truth_image_info(\n image_key1, {\n standard_fields.InputDataFields.groundtruth_boxes:\n groundtruth_boxes1,\n standard_fields.InputDataFields.groundtruth_classes:\n groundtruth_class_labels1\n })\n\n\nclass ObjectDetectionEvaluationTest(tf.test.TestCase):\n\n def setUp(self):\n num_groundtruth_classes = 3\n self.od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n num_groundtruth_classes)\n\n image_key1 = 'img1'\n groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],\n dtype=float)\n groundtruth_class_labels1 = np.array([0, 2, 0], dtype=int)\n self.od_eval.add_single_ground_truth_image_info(\n image_key1, groundtruth_boxes1, groundtruth_class_labels1)\n image_key2 = 'img2'\n groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],\n [10, 10, 12, 12]], dtype=float)\n groundtruth_class_labels2 = np.array([0, 0, 2], dtype=int)\n groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)\n groundtruth_is_group_of_list2 = np.array([False, False, True], dtype=bool)\n self.od_eval.add_single_ground_truth_image_info(\n image_key2, groundtruth_boxes2, groundtruth_class_labels2,\n groundtruth_is_difficult_list2, groundtruth_is_group_of_list2)\n\n image_key3 = 'img3'\n groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)\n groundtruth_class_labels3 = np.array([1], dtype=int)\n self.od_eval.add_single_ground_truth_image_info(\n image_key3, groundtruth_boxes3, groundtruth_class_labels3)\n\n image_key = 'img2'\n detected_boxes = np.array(\n [[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],\n dtype=float)\n detected_class_labels = np.array([0, 0, 2], dtype=int)\n detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)\n self.od_eval.add_single_detected_image_info(\n image_key, detected_boxes, detected_scores, detected_class_labels)\n\n def test_value_error_on_zero_classes(self):\n with self.assertRaises(ValueError):\n object_detection_evaluation.ObjectDetectionEvaluation(\n num_groundtruth_classes=0)\n\n def test_add_single_ground_truth_image_info(self):\n expected_num_gt_instances_per_class = np.array([3, 1, 1], dtype=int)\n expected_num_gt_imgs_per_class = np.array([2, 1, 2], dtype=int)\n self.assertTrue(np.array_equal(expected_num_gt_instances_per_class,\n self.od_eval.num_gt_instances_per_class))\n self.assertTrue(np.array_equal(expected_num_gt_imgs_per_class,\n self.od_eval.num_gt_imgs_per_class))\n groundtruth_boxes2 = np.array([[10, 10, 11, 11], [500, 500, 510, 510],\n [10, 10, 12, 12]], dtype=float)\n self.assertTrue(np.allclose(self.od_eval.groundtruth_boxes['img2'],\n groundtruth_boxes2))\n groundtruth_is_difficult_list2 = np.array([False, True, False], dtype=bool)\n self.assertTrue(np.allclose(\n self.od_eval.groundtruth_is_difficult_list['img2'],\n groundtruth_is_difficult_list2))\n groundtruth_is_group_of_list2 = np.array([False, False, True], dtype=bool)\n self.assertTrue(\n np.allclose(self.od_eval.groundtruth_is_group_of_list['img2'],\n groundtruth_is_group_of_list2))\n\n groundtruth_class_labels1 = np.array([0, 2, 0], dtype=int)\n self.assertTrue(np.array_equal(self.od_eval.groundtruth_class_labels[\n 'img1'], groundtruth_class_labels1))\n\n def test_add_single_detected_image_info(self):\n expected_scores_per_class = [[np.array([0.8, 0.7], dtype=float)], [],\n [np.array([0.9], dtype=float)]]\n expected_tp_fp_labels_per_class = [[np.array([0, 1], dtype=bool)], [],\n [np.array([0], dtype=bool)]]\n expected_num_images_correctly_detected_per_class = np.array([0, 0, 0],\n dtype=int)\n for i in range(self.od_eval.num_class):\n for j in range(len(expected_scores_per_class[i])):\n self.assertTrue(np.allclose(expected_scores_per_class[i][j],\n self.od_eval.scores_per_class[i][j]))\n self.assertTrue(np.array_equal(expected_tp_fp_labels_per_class[i][\n j], self.od_eval.tp_fp_labels_per_class[i][j]))\n self.assertTrue(np.array_equal(\n expected_num_images_correctly_detected_per_class,\n self.od_eval.num_images_correctly_detected_per_class))\n\n def test_evaluate(self):\n (average_precision_per_class, mean_ap, precisions_per_class,\n recalls_per_class, corloc_per_class,\n mean_corloc) = self.od_eval.evaluate()\n expected_precisions_per_class = [np.array([0, 0.5], dtype=float),\n np.array([], dtype=float),\n np.array([0], dtype=float)]\n expected_recalls_per_class = [\n np.array([0, 1. / 3.], dtype=float), np.array([], dtype=float),\n np.array([0], dtype=float)\n ]\n expected_average_precision_per_class = np.array([1. / 6., 0, 0],\n dtype=float)\n expected_corloc_per_class = np.array([0, 0, 0], dtype=float)\n expected_mean_ap = 1. / 18\n expected_mean_corloc = 0.0\n for i in range(self.od_eval.num_class):\n self.assertTrue(np.allclose(expected_precisions_per_class[i],\n precisions_per_class[i]))\n self.assertTrue(np.allclose(expected_recalls_per_class[i],\n recalls_per_class[i]))\n self.assertTrue(np.allclose(expected_average_precision_per_class,\n average_precision_per_class))\n self.assertTrue(np.allclose(expected_corloc_per_class, corloc_per_class))\n self.assertAlmostEqual(expected_mean_ap, mean_ap)\n self.assertAlmostEqual(expected_mean_corloc, mean_corloc)\n\n def test_merge_internal_state(self):\n # Test that if initial state is merged, the results of the evaluation are\n # the same.\n od_eval_state = self.od_eval.get_internal_state()\n copy_od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n self.od_eval.num_class)\n copy_od_eval.merge_internal_state(od_eval_state)\n\n (average_precision_per_class, mean_ap, precisions_per_class,\n recalls_per_class, corloc_per_class,\n mean_corloc) = self.od_eval.evaluate()\n\n (copy_average_precision_per_class, copy_mean_ap, copy_precisions_per_class,\n copy_recalls_per_class, copy_corloc_per_class,\n copy_mean_corloc) = copy_od_eval.evaluate()\n\n for i in range(self.od_eval.num_class):\n self.assertTrue(\n np.allclose(copy_precisions_per_class[i], precisions_per_class[i]))\n self.assertTrue(\n np.allclose(copy_recalls_per_class[i], recalls_per_class[i]))\n self.assertTrue(\n np.allclose(copy_average_precision_per_class,\n average_precision_per_class))\n self.assertTrue(np.allclose(copy_corloc_per_class, corloc_per_class))\n self.assertAlmostEqual(copy_mean_ap, mean_ap)\n self.assertAlmostEqual(copy_mean_corloc, mean_corloc)\n\n\[email protected](tf_version.is_tf2(), 'Eval Metrics ops are supported in TF1.X '\n 'only.')\nclass ObjectDetectionEvaluatorTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n self.categories = [{\n 'id': 1,\n 'name': 'person'\n }, {\n 'id': 2,\n 'name': 'dog'\n }, {\n 'id': 3,\n 'name': 'cat'\n }]\n self.od_eval = object_detection_evaluation.ObjectDetectionEvaluator(\n categories=self.categories)\n\n def _make_evaluation_dict(self,\n resized_groundtruth_masks=False,\n batch_size=1,\n max_gt_boxes=None,\n scale_to_absolute=False):\n input_data_fields = standard_fields.InputDataFields\n detection_fields = standard_fields.DetectionResultFields\n\n image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8)\n if batch_size == 1:\n key = tf.constant('image1')\n else:\n key = tf.constant([str(i) for i in range(batch_size)])\n detection_boxes = tf.concat([\n tf.tile(\n tf.constant([[[0., 0., 1., 1.]]]), multiples=[batch_size - 1, 1, 1\n ]),\n tf.constant([[[0., 0., 0.5, 0.5]]])\n ],\n axis=0)\n detection_scores = tf.concat([\n tf.tile(tf.constant([[0.5]]), multiples=[batch_size - 1, 1]),\n tf.constant([[0.8]])\n ],\n axis=0)\n detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1])\n detection_masks = tf.tile(\n tf.ones(shape=[1, 2, 20, 20], dtype=tf.float32),\n multiples=[batch_size, 1, 1, 1])\n groundtruth_boxes = tf.constant([[0., 0., 1., 1.]])\n groundtruth_classes = tf.constant([1])\n groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8)\n num_detections = tf.ones([batch_size])\n if resized_groundtruth_masks:\n groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8)\n\n if batch_size > 1:\n groundtruth_boxes = tf.tile(\n tf.expand_dims(groundtruth_boxes, 0), multiples=[batch_size, 1, 1])\n groundtruth_classes = tf.tile(\n tf.expand_dims(groundtruth_classes, 0), multiples=[batch_size, 1])\n groundtruth_instance_masks = tf.tile(\n tf.expand_dims(groundtruth_instance_masks, 0),\n multiples=[batch_size, 1, 1, 1])\n\n detections = {\n detection_fields.detection_boxes: detection_boxes,\n detection_fields.detection_scores: detection_scores,\n detection_fields.detection_classes: detection_classes,\n detection_fields.detection_masks: detection_masks,\n detection_fields.num_detections: num_detections\n }\n groundtruth = {\n input_data_fields.groundtruth_boxes:\n groundtruth_boxes,\n input_data_fields.groundtruth_classes:\n groundtruth_classes,\n input_data_fields.groundtruth_instance_masks:\n groundtruth_instance_masks,\n }\n if batch_size > 1:\n return eval_util.result_dict_for_batched_example(\n image,\n key,\n detections,\n groundtruth,\n scale_to_absolute=scale_to_absolute,\n max_gt_boxes=max_gt_boxes)\n else:\n return eval_util.result_dict_for_single_example(\n image,\n key,\n detections,\n groundtruth,\n scale_to_absolute=scale_to_absolute)\n\n @parameterized.parameters({\n 'batch_size': 1,\n 'expected_map': 0,\n 'max_gt_boxes': None,\n 'scale_to_absolute': True\n }, {\n 'batch_size': 8,\n 'expected_map': 0.765625,\n 'max_gt_boxes': [1],\n 'scale_to_absolute': True\n }, {\n 'batch_size': 1,\n 'expected_map': 0,\n 'max_gt_boxes': None,\n 'scale_to_absolute': False\n }, {\n 'batch_size': 8,\n 'expected_map': 0.765625,\n 'max_gt_boxes': [1],\n 'scale_to_absolute': False\n })\n def test_get_estimator_eval_metric_ops(self,\n batch_size=1,\n expected_map=1,\n max_gt_boxes=None,\n scale_to_absolute=False):\n\n eval_dict = self._make_evaluation_dict(\n batch_size=batch_size,\n max_gt_boxes=max_gt_boxes,\n scale_to_absolute=scale_to_absolute)\n tf.logging.info('eval_dict: {}'.format(eval_dict))\n metric_ops = self.od_eval.get_estimator_eval_metric_ops(eval_dict)\n _, update_op = metric_ops['Precision/[email protected]']\n\n with self.test_session() as sess:\n metrics = {}\n for key, (value_op, _) in six.iteritems(metric_ops):\n metrics[key] = value_op\n sess.run(update_op)\n metrics = sess.run(metrics)\n self.assertAlmostEqual(expected_map, metrics['Precision/[email protected]'])\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Main function to train various object detection models.\"\"\"\n\nimport functools\nimport pprint\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.common import distribute_utils\nfrom official.modeling.hyperparams import params_dict\nfrom official.modeling.training import distributed_executor as executor\nfrom official.utils import hyperparams_flags\nfrom official.utils.flags import core as flags_core\nfrom official.utils.misc import keras_utils\nfrom official.vision.detection.configs import factory as config_factory\nfrom official.vision.detection.dataloader import input_reader\nfrom official.vision.detection.dataloader import mode_keys as ModeKeys\nfrom official.vision.detection.executor.detection_executor import DetectionDistributedExecutor\nfrom official.vision.detection.modeling import factory as model_factory\n\nhyperparams_flags.initialize_common_flags()\nflags_core.define_log_steps()\n\nflags.DEFINE_bool('enable_xla', default=False, help='Enable XLA for GPU')\n\nflags.DEFINE_string(\n 'mode', default='train', help='Mode to run: `train` or `eval`.')\n\nflags.DEFINE_string(\n 'model', default='retinanet',\n help='Model to run: `retinanet`, `mask_rcnn` or `shapemask`.')\n\nflags.DEFINE_string('training_file_pattern', None,\n 'Location of the train data.')\n\nflags.DEFINE_string('eval_file_pattern', None, 'Location of ther eval data')\n\nflags.DEFINE_string(\n 'checkpoint_path', None,\n 'The checkpoint path to eval. Only used in eval_once mode.')\n\nFLAGS = flags.FLAGS\n\n\ndef run_executor(params,\n mode,\n checkpoint_path=None,\n train_input_fn=None,\n eval_input_fn=None,\n callbacks=None,\n prebuilt_strategy=None):\n \"\"\"Runs the object detection model on distribution strategy defined by the user.\"\"\"\n\n if params.architecture.use_bfloat16:\n policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(\n 'mixed_bfloat16')\n tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)\n\n model_builder = model_factory.model_generator(params)\n\n if prebuilt_strategy is not None:\n strategy = prebuilt_strategy\n else:\n strategy_config = params.strategy_config\n distribute_utils.configure_cluster(strategy_config.worker_hosts,\n strategy_config.task_index)\n strategy = distribute_utils.get_distribution_strategy(\n distribution_strategy=params.strategy_type,\n num_gpus=strategy_config.num_gpus,\n all_reduce_alg=strategy_config.all_reduce_alg,\n num_packs=strategy_config.num_packs,\n tpu_address=strategy_config.tpu)\n\n num_workers = int(strategy.num_replicas_in_sync + 7) // 8\n is_multi_host = (int(num_workers) >= 2)\n\n if mode == 'train':\n\n def _model_fn(params):\n return model_builder.build_model(params, mode=ModeKeys.TRAIN)\n\n logging.info(\n 'Train num_replicas_in_sync %d num_workers %d is_multi_host %s',\n strategy.num_replicas_in_sync, num_workers, is_multi_host)\n\n dist_executor = DetectionDistributedExecutor(\n strategy=strategy,\n params=params,\n model_fn=_model_fn,\n loss_fn=model_builder.build_loss_fn,\n is_multi_host=is_multi_host,\n predict_post_process_fn=model_builder.post_processing,\n trainable_variables_filter=model_builder\n .make_filter_trainable_variables_fn())\n\n if is_multi_host:\n train_input_fn = functools.partial(\n train_input_fn,\n batch_size=params.train.batch_size // strategy.num_replicas_in_sync)\n\n return dist_executor.train(\n train_input_fn=train_input_fn,\n model_dir=params.model_dir,\n iterations_per_loop=params.train.iterations_per_loop,\n total_steps=params.train.total_steps,\n init_checkpoint=model_builder.make_restore_checkpoint_fn(),\n custom_callbacks=callbacks,\n save_config=True)\n elif mode == 'eval' or mode == 'eval_once':\n\n def _model_fn(params):\n return model_builder.build_model(params, mode=ModeKeys.PREDICT_WITH_GT)\n\n logging.info('Eval num_replicas_in_sync %d num_workers %d is_multi_host %s',\n strategy.num_replicas_in_sync, num_workers, is_multi_host)\n\n if is_multi_host:\n eval_input_fn = functools.partial(\n eval_input_fn,\n batch_size=params.eval.batch_size // strategy.num_replicas_in_sync)\n\n dist_executor = DetectionDistributedExecutor(\n strategy=strategy,\n params=params,\n model_fn=_model_fn,\n loss_fn=model_builder.build_loss_fn,\n is_multi_host=is_multi_host,\n predict_post_process_fn=model_builder.post_processing,\n trainable_variables_filter=model_builder\n .make_filter_trainable_variables_fn())\n\n if mode == 'eval':\n results = dist_executor.evaluate_from_model_dir(\n model_dir=params.model_dir,\n eval_input_fn=eval_input_fn,\n eval_metric_fn=model_builder.eval_metrics,\n eval_timeout=params.eval.eval_timeout,\n min_eval_interval=params.eval.min_eval_interval,\n total_steps=params.train.total_steps)\n else:\n # Run evaluation once for a single checkpoint.\n if not checkpoint_path:\n raise ValueError('checkpoint_path cannot be empty.')\n if tf.io.gfile.isdir(checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)\n summary_writer = executor.SummaryWriter(params.model_dir, 'eval')\n results, _ = dist_executor.evaluate_checkpoint(\n checkpoint_path=checkpoint_path,\n eval_input_fn=eval_input_fn,\n eval_metric_fn=model_builder.eval_metrics,\n summary_writer=summary_writer)\n for k, v in results.items():\n logging.info('Final eval metric %s: %f', k, v)\n return results\n else:\n raise ValueError('Mode not found: %s.' % mode)\n\n\ndef run(callbacks=None):\n keras_utils.set_session_config(enable_xla=FLAGS.enable_xla)\n\n params = config_factory.config_generator(FLAGS.model)\n\n params = params_dict.override_params_dict(\n params, FLAGS.config_file, is_strict=True)\n\n params = params_dict.override_params_dict(\n params, FLAGS.params_override, is_strict=True)\n params.override(\n {\n 'strategy_type': FLAGS.strategy_type,\n 'model_dir': FLAGS.model_dir,\n 'strategy_config': executor.strategy_flags_dict(),\n },\n is_strict=False)\n\n # Make sure use_tpu and strategy_type are in sync.\n params.use_tpu = (params.strategy_type == 'tpu')\n\n if not params.use_tpu:\n params.override({\n 'architecture': {\n 'use_bfloat16': False,\n },\n 'norm_activation': {\n 'use_sync_bn': False,\n },\n }, is_strict=True)\n\n params.validate()\n params.lock()\n pp = pprint.PrettyPrinter()\n params_str = pp.pformat(params.as_dict())\n logging.info('Model Parameters: %s', params_str)\n\n train_input_fn = None\n eval_input_fn = None\n training_file_pattern = FLAGS.training_file_pattern or params.train.train_file_pattern\n eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern\n if not training_file_pattern and not eval_file_pattern:\n raise ValueError('Must provide at least one of training_file_pattern and '\n 'eval_file_pattern.')\n\n if training_file_pattern:\n # Use global batch size for single host.\n train_input_fn = input_reader.InputFn(\n file_pattern=training_file_pattern,\n params=params,\n mode=input_reader.ModeKeys.TRAIN,\n batch_size=params.train.batch_size)\n\n if eval_file_pattern:\n eval_input_fn = input_reader.InputFn(\n file_pattern=eval_file_pattern,\n params=params,\n mode=input_reader.ModeKeys.PREDICT_WITH_GT,\n batch_size=params.eval.batch_size,\n num_examples=params.eval.eval_samples)\n\n if callbacks is None:\n callbacks = []\n\n if FLAGS.log_steps:\n callbacks.append(\n keras_utils.TimeHistory(\n batch_size=params.train.batch_size,\n log_steps=FLAGS.log_steps,\n ))\n\n return run_executor(\n params,\n FLAGS.mode,\n checkpoint_path=FLAGS.checkpoint_path,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n callbacks=callbacks)\n\n\ndef main(argv):\n del argv # Unused.\n\n run()\n\n\nif __name__ == '__main__':\n tf.config.set_soft_device_placement(True)\n app.run(main)\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"SSD Keras-based MobilenetV2 FPN Feature Extractor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.meta_architectures import ssd_meta_arch\nfrom object_detection.models import feature_map_generators\nfrom object_detection.models.keras_models import mobilenet_v2\nfrom object_detection.models.keras_models import model_utils\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\n\n# Total number of blocks in Mobilenet_V2 base network.\nNUM_LAYERS = 19\n\n\n# A modified config of mobilenet v2 that makes it more detection friendly.\ndef _create_modified_mobilenet_config():\n last_conv = model_utils.ConvDefs(conv_name='Conv_1', filters=256)\n return [last_conv]\n\n\nclass SSDMobileNetV2FpnKerasFeatureExtractor(\n ssd_meta_arch.SSDKerasFeatureExtractor):\n \"\"\"SSD Feature Extractor using Keras-based MobilenetV2 FPN features.\"\"\"\n\n def __init__(self,\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n conv_hyperparams,\n freeze_batchnorm,\n inplace_batchnorm_update,\n fpn_min_level=3,\n fpn_max_level=7,\n additional_layer_depth=256,\n reuse_weights=None,\n use_explicit_padding=False,\n use_depthwise=False,\n use_native_resize_op=False,\n override_base_feature_extractor_hyperparams=False,\n name=None):\n \"\"\"SSD Keras based FPN feature extractor Mobilenet v2 architecture.\n\n Args:\n is_training: whether the network is in training mode.\n depth_multiplier: float depth multiplier for feature extractor.\n min_depth: minimum feature extractor depth.\n pad_to_multiple: the nearest multiple to zero pad the input height and\n width dimensions to.\n conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object\n containing convolution hyperparameters for the layers added on top of\n the base feature extractor.\n freeze_batchnorm: whether to freeze batch norm parameters during\n training or not. When training with a small batch size (e.g. 1), it is\n desirable to freeze batch norm update and use pretrained batch norm\n params.\n inplace_batchnorm_update: whether to update batch norm moving average\n values inplace. When this is false train op must add a control\n dependency on tf.graphkeys.UPDATE_OPS collection in order to update\n batch norm statistics.\n fpn_min_level: the highest resolution feature map to use in FPN. The valid\n values are {2, 3, 4, 5} which map to MobileNet v2 layers\n {layer_4, layer_7, layer_14, layer_19}, respectively.\n fpn_max_level: the smallest resolution feature map to construct or use in\n FPN. FPN constructions uses features maps starting from fpn_min_level\n upto the fpn_max_level. In the case that there are not enough feature\n maps in the backbone network, additional feature maps are created by\n applying stride 2 convolutions until we get the desired number of fpn\n levels.\n additional_layer_depth: additional feature map layer channel depth.\n reuse_weights: whether to reuse variables. Default is None.\n use_explicit_padding: Whether to use explicit padding when extracting\n features. Default is False.\n use_depthwise: Whether to use depthwise convolutions. Default is False.\n use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize\n to do upsampling in FPN. Default is false.\n override_base_feature_extractor_hyperparams: Whether to override\n hyperparameters of the base feature extractor with the one from\n `conv_hyperparams`.\n name: a string name scope to assign to the model. If 'None', Keras\n will auto-generate one from the class name.\n \"\"\"\n super(SSDMobileNetV2FpnKerasFeatureExtractor, self).__init__(\n is_training=is_training,\n depth_multiplier=depth_multiplier,\n min_depth=min_depth,\n pad_to_multiple=pad_to_multiple,\n conv_hyperparams=conv_hyperparams,\n freeze_batchnorm=freeze_batchnorm,\n inplace_batchnorm_update=inplace_batchnorm_update,\n use_explicit_padding=use_explicit_padding,\n use_depthwise=use_depthwise,\n override_base_feature_extractor_hyperparams=\n override_base_feature_extractor_hyperparams,\n name=name)\n self._fpn_min_level = fpn_min_level\n self._fpn_max_level = fpn_max_level\n self._additional_layer_depth = additional_layer_depth\n self._conv_defs = None\n if self._use_depthwise:\n self._conv_defs = _create_modified_mobilenet_config()\n self._use_native_resize_op = use_native_resize_op\n self._feature_blocks = ['layer_4', 'layer_7', 'layer_14', 'layer_19']\n self.classification_backbone = None\n self._fpn_features_generator = None\n self._coarse_feature_layers = []\n\n def build(self, input_shape):\n full_mobilenet_v2 = mobilenet_v2.mobilenet_v2(\n batchnorm_training=(self._is_training and not self._freeze_batchnorm),\n conv_hyperparams=(self._conv_hyperparams\n if self._override_base_feature_extractor_hyperparams\n else None),\n weights=None,\n use_explicit_padding=self._use_explicit_padding,\n alpha=self._depth_multiplier,\n min_depth=self._min_depth,\n include_top=False)\n layer_names = [layer.name for layer in full_mobilenet_v2.layers]\n outputs = []\n for layer_idx in [4, 7, 14]:\n add_name = 'block_{}_add'.format(layer_idx - 2)\n project_name = 'block_{}_project_BN'.format(layer_idx - 2)\n output_layer_name = add_name if add_name in layer_names else project_name\n outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output)\n layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output\n outputs.append(layer_19)\n self.classification_backbone = tf.keras.Model(\n inputs=full_mobilenet_v2.inputs,\n outputs=outputs)\n # pylint:disable=g-long-lambda\n self._depth_fn = lambda d: max(\n int(d * self._depth_multiplier), self._min_depth)\n self._base_fpn_max_level = min(self._fpn_max_level, 5)\n self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level\n self._fpn_features_generator = (\n feature_map_generators.KerasFpnTopDownFeatureMaps(\n num_levels=self._num_levels,\n depth=self._depth_fn(self._additional_layer_depth),\n use_depthwise=self._use_depthwise,\n use_explicit_padding=self._use_explicit_padding,\n use_native_resize_op=self._use_native_resize_op,\n is_training=self._is_training,\n conv_hyperparams=self._conv_hyperparams,\n freeze_batchnorm=self._freeze_batchnorm,\n name='FeatureMaps'))\n # Construct coarse feature layers\n padding = 'VALID' if self._use_explicit_padding else 'SAME'\n kernel_size = 3\n stride = 2\n for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1):\n coarse_feature_layers = []\n if self._use_explicit_padding:\n def fixed_padding(features, kernel_size=kernel_size):\n return ops.fixed_padding(features, kernel_size)\n coarse_feature_layers.append(tf.keras.layers.Lambda(\n fixed_padding, name='fixed_padding'))\n layer_name = 'bottom_up_Conv2d_{}'.format(\n i - self._base_fpn_max_level + NUM_LAYERS)\n conv_block = feature_map_generators.create_conv_block(\n self._use_depthwise, kernel_size, padding, stride, layer_name,\n self._conv_hyperparams, self._is_training, self._freeze_batchnorm,\n self._depth_fn(self._additional_layer_depth))\n coarse_feature_layers.extend(conv_block)\n self._coarse_feature_layers.append(coarse_feature_layers)\n self.built = True\n\n def preprocess(self, resized_inputs):\n \"\"\"SSD preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def _extract_features(self, preprocessed_inputs):\n \"\"\"Extract features from preprocessed inputs.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n feature_maps: a list of tensors where the ith tensor has shape\n [batch, height_i, width_i, depth_i]\n \"\"\"\n preprocessed_inputs = shape_utils.check_min_image_dim(\n 33, preprocessed_inputs)\n\n image_features = self.classification_backbone(\n ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))\n\n feature_block_list = []\n for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):\n feature_block_list.append(self._feature_blocks[level - 2])\n\n feature_start_index = len(self._feature_blocks) - self._num_levels\n fpn_input_image_features = [\n (key, image_features[feature_start_index + index])\n for index, key in enumerate(feature_block_list)]\n fpn_features = self._fpn_features_generator(fpn_input_image_features)\n\n feature_maps = []\n for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):\n feature_maps.append(fpn_features['top_down_{}'.format(\n self._feature_blocks[level - 2])])\n last_feature_map = fpn_features['top_down_{}'.format(\n self._feature_blocks[self._base_fpn_max_level - 2])]\n\n for coarse_feature_layers in self._coarse_feature_layers:\n for layer in coarse_feature_layers:\n last_feature_map = layer(last_feature_map)\n feature_maps.append(last_feature_map)\n return feature_maps\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for XLNet classifier network.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom official.nlp.modeling import networks\nfrom official.nlp.modeling.models import xlnet\n\n\ndef _get_xlnet_base() -> tf.keras.layers.Layer:\n \"\"\"Returns a trivial base XLNet model.\"\"\"\n return networks.XLNetBase(\n vocab_size=100,\n num_layers=2,\n hidden_size=4,\n num_attention_heads=2,\n head_size=2,\n inner_size=2,\n dropout_rate=0.,\n attention_dropout_rate=0.,\n attention_type='bi',\n bi_data=True,\n initializer=tf.keras.initializers.RandomNormal(stddev=0.1),\n two_stream=False,\n tie_attention_biases=True,\n reuse_length=0,\n inner_activation='relu')\n\n\n# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It\n# guarantees forward compatibility of this code for the V2 switchover.\n@keras_parameterized.run_all_keras_modes\nclass XLNetClassifierTest(keras_parameterized.TestCase):\n\n def test_xlnet_trainer(self):\n \"\"\"Validate that the Keras object can be created.\"\"\"\n num_classes = 2\n seq_length = 4\n # Build a simple XLNet based network to use with the XLNet trainer.\n xlnet_base = _get_xlnet_base()\n\n # Create an XLNet trainer with the created network.\n xlnet_trainer_model = xlnet.XLNetClassifier(\n network=xlnet_base,\n num_classes=num_classes,\n initializer=tf.keras.initializers.RandomNormal(stddev=0.1),\n summary_type='last',\n dropout_rate=0.1)\n inputs = dict(\n input_ids=tf.keras.layers.Input(\n shape=(seq_length,), dtype=tf.int32, name='input_word_ids'),\n segment_ids=tf.keras.layers.Input(\n shape=(seq_length,), dtype=tf.int32, name='segment_ids'),\n input_mask=tf.keras.layers.Input(\n shape=(seq_length,), dtype=tf.float32, name='input_mask'),\n permutation_mask=tf.keras.layers.Input(\n shape=(seq_length, seq_length,), dtype=tf.float32,\n name='permutation_mask'),\n masked_tokens=tf.keras.layers.Input(\n shape=(seq_length,), dtype=tf.float32, name='masked_tokens'))\n\n logits, _ = xlnet_trainer_model(inputs)\n\n expected_classification_shape = [None, num_classes]\n self.assertAllEqual(expected_classification_shape, logits.shape.as_list())\n\n @parameterized.parameters(1, 2)\n def test_xlnet_tensor_call(self, num_classes):\n \"\"\"Validates that the Keras object can be invoked.\"\"\"\n seq_length = 4\n batch_size = 2\n # Build a simple XLNet based network to use with the XLNet trainer.\n xlnet_base = _get_xlnet_base()\n\n # Create an XLNet trainer with the created network.\n xlnet_trainer_model = xlnet.XLNetClassifier(\n network=xlnet_base,\n num_classes=num_classes,\n initializer=tf.keras.initializers.RandomNormal(stddev=0.1),\n summary_type='last',\n dropout_rate=0.1)\n\n sequence_shape = (batch_size, seq_length)\n inputs = dict(\n input_ids=np.random.randint(10, size=sequence_shape, dtype='int32'),\n segment_ids=np.random.randint(2, size=sequence_shape, dtype='int32'),\n input_mask=np.random.randint(2, size=sequence_shape).astype('float32'),\n permutation_mask=np.random.randint(\n 2, size=(batch_size, seq_length, seq_length)).astype('float32'),\n masked_tokens=tf.random.uniform(shape=sequence_shape))\n xlnet_trainer_model(inputs)\n\n def test_serialize_deserialize(self):\n \"\"\"Validates that the XLNet trainer can be serialized and deserialized.\"\"\"\n # Build a simple XLNet based network to use with the XLNet trainer.\n xlnet_base = _get_xlnet_base()\n\n # Create an XLNet trainer with the created network.\n xlnet_trainer_model = xlnet.XLNetClassifier(\n network=xlnet_base,\n num_classes=2,\n initializer=tf.keras.initializers.RandomNormal(stddev=0.1),\n summary_type='last',\n dropout_rate=0.1)\n\n # Create another XLNet trainer via serialization and deserialization.\n config = xlnet_trainer_model.get_config()\n new_xlnet_trainer_model = xlnet.XLNetClassifier.from_config(\n config)\n\n # Validate that the config can be forced to JSON.\n _ = new_xlnet_trainer_model.to_json()\n\n # If serialization was successful, then the new config should match the old.\n self.assertAllEqual(xlnet_trainer_model.get_config(),\n new_xlnet_trainer_model.get_config())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# python3\n\"\"\"Common layer creator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.python.training import moving_averages # pylint: disable=g-direct-tensorflow-import\n\n\nclass CommonLayers(object):\n \"\"\"A base class that defines TfLite compatible NN layers.\"\"\"\n\n def __init__(self,\n mode,\n regularizer_scale=0.0,\n weights_initializer=tf.keras.initializers.glorot_uniform(),\n quantization_enabled=True):\n \"\"\"PoDLayers constructor.\n\n Args:\n mode: Graph creation mode.\n regularizer_scale: Optional regularizer for the weights.\n weights_initializer: Optional initializer for the weights.\n quantization_enabled: Enables quantization of weights and activation in\n the DNN.\n \"\"\"\n self._mode = mode\n self._regularizer_scale = regularizer_scale\n self._weights_initializer = weights_initializer\n self._quantization_enabled = quantization_enabled\n # Batch normalization is the default normalization scheme.\n self._normalizer = self.batch_normalization\n self._moment_fn = None\n\n def qrange_sigmoid(self, tensor):\n \"\"\"Quantize the tensor in sigmoid range (0.0, 1.0).\"\"\"\n return tf.fake_quant_with_min_max_args(\n tensor, 0.0, 1.0) if self._quantization_enabled else tensor\n\n def qrange_tanh(self, tensor):\n \"\"\"Quantize the tensor in tanh range (-1.0, 1.0).\"\"\"\n return tf.fake_quant_with_min_max_args(\n tensor, -1.0, 1.0) if self._quantization_enabled else tensor\n\n def _quantized_tanh(self, tensor):\n \"\"\"Apply tanh op and quantize in the range (-1.0, 1.0).\"\"\"\n return self.qrange_tanh(tf.tanh(tensor))\n\n def _quantized_sigmoid(self, tensor):\n \"\"\"Apply sigmoid op and quantize in the range (0.0, 1.0).\"\"\"\n return self.qrange_sigmoid(tf.sigmoid(tensor))\n\n def set_moment_fn(self, moment_fn):\n \"\"\"Set a moment function that will be used by batch norm.\"\"\"\n self._moment_fn = moment_fn\n\n def set_regularizer_scale(self, regularizer_scale):\n \"\"\"Override / set a new weights regularizer scale.\"\"\"\n self._regularizer_scale = regularizer_scale\n\n def set_variable_length_moment_fn(self, sequence_length, max_sequence_length):\n \"\"\"Set variable length moment function for use in batch norm.\n\n Args:\n sequence_length: An vector of sequence lengths.\n max_sequence_length: Padding length for the batch.\n\n Returns:\n Returns sequence mask.\n \"\"\"\n mask = tf.sequence_mask(\n sequence_length, maxlen=max_sequence_length, dtype=tf.float32)\n mask = tf.expand_dims(mask, 2)\n\n mask_r4 = tf.expand_dims(mask, 3)\n mask_r2 = tf.reshape(mask, [-1, 1])\n inverse_numsteps = tf.math.reciprocal(tf.reduce_sum(mask))\n\n def _varlen_moment_fn(input_tensor, axes):\n \"\"\"Moment function to use with batch normalization.\"\"\"\n input_tensor_shape = input_tensor.get_shape().as_list()\n input_tensor_rank = len(input_tensor_shape)\n if input_tensor_rank == 2:\n input_tensor = mask_r2 * input_tensor\n elif input_tensor_rank == 4:\n assert input_tensor_shape[2] == 1\n input_tensor = mask_r4 * input_tensor\n else:\n assert False, \"Supports rank2 and rank4 tensors.\"\n ex = tf.reduce_sum(input_tensor, axis=axes) * inverse_numsteps\n exx = tf.reduce_sum(\n input_tensor * input_tensor, axis=axes) * inverse_numsteps\n return ex, (exx - ex * ex)\n\n self._moment_fn = _varlen_moment_fn\n return mask\n\n def batch_normalization(self, input_tensor, decay=0.999):\n \"\"\"Add batch normalization network structure after input_tensor.\n\n It performs batch normalization of the input tensor. This routine is\n verified to works for rank 4 or 2 tensors.\n\n Args:\n input_tensor: Input tensor that needs to be normalized.\n decay: Moving average decay\n\n Returns:\n A tensor that is normalized.\n \"\"\"\n input_tensor_shape = input_tensor.get_shape().as_list()\n nstat = input_tensor_shape[-1]\n reduce_dims = list(range(len(input_tensor_shape) - 1))\n\n with tf.variable_scope(name_or_scope=None, default_name=\"batch_norm\"):\n offset = tf.get_variable(\n \"offset\",\n shape=[nstat],\n initializer=tf.zeros_initializer,\n trainable=True)\n scale = tf.get_variable(\n \"scale\",\n shape=[nstat],\n initializer=tf.ones_initializer,\n trainable=True)\n moving_mean = tf.get_variable(\n \"moving_mean\",\n shape=[nstat],\n initializer=tf.zeros_initializer,\n trainable=False)\n moving_var = tf.get_variable(\n \"moving_variance\",\n shape=[nstat],\n initializer=tf.ones_initializer,\n trainable=False)\n\n if self._mode == tf.estimator.ModeKeys.TRAIN:\n # During training compute summay stats, update them to moving average\n # variables and use the summary stas for batch normalization.\n moment_fn = self._moment_fn or tf.nn.moments\n mean_mom, var_mom = moment_fn(input_tensor, reduce_dims)\n with tf.control_dependencies([\n moving_averages.assign_moving_average(\n moving_mean, mean_mom, decay, name=\"mean_op\"),\n moving_averages.assign_moving_average(\n moving_var, var_mom, decay, name=\"variance_op\")\n ]):\n tensor = tf.nn.batch_normalization(\n input_tensor,\n mean_mom,\n var_mom,\n offset,\n scale,\n 1e-9,\n name=\"batch_norm_core\")\n else:\n # During eval/inference use the moving average variable for batch\n # normalization. The variables would be frozen to constants before\n # saving graph.\n tensor = tf.nn.batch_normalization(\n input_tensor,\n moving_mean,\n moving_var,\n offset,\n scale,\n 1e-9,\n name=\"batch_norm_core\")\n return tensor\n\n def get_quantization_ranges(self, tensor, ema_decay=0.99):\n \"\"\"Perform fake quantization of the tensor.\n\n The method computes ranges for quantization by first computing the\n batch min/max and then computing a moving average of the min/max across\n batches. The moving average of min/max is used for quantization during\n inference. During training the batch min/maxs are used directly.\n\n Args:\n tensor: Input tensor that needs to be quantized.\n ema_decay: Moving average decay\n\n Returns:\n Min/Max for fake quantization.\n \"\"\"\n # If neither quantization is enabled, nor are we calculating ranges for\n # floating point models, this method is a no-op.\n if not self._quantization_enabled:\n return None, None\n\n # Calculate min/max for the tensor.\n min_var = tf.get_variable(\"min\", initializer=0.0, trainable=False)\n max_var = tf.get_variable(\"max\", initializer=1.0, trainable=False)\n\n if self._mode == tf.estimator.ModeKeys.TRAIN:\n # During training estimate moving average for min/max. Use the min/max\n # values directly for quantization.\n ops = []\n batch_min = tf.reduce_min(tensor, name=\"BatchMin\")\n # Toco expects 0.0 to be part of the quantization range.\n batch_min = tf.minimum(batch_min, 0.0)\n ops.append(\n moving_averages.assign_moving_average(min_var, batch_min, ema_decay))\n\n batch_max = tf.reduce_max(tensor, name=\"BatchMax\")\n # Toco expects 0.0 to be part of the quantization range.\n batch_max = tf.maximum(batch_max, 0.0)\n ops.append(\n moving_averages.assign_moving_average(max_var, batch_max, ema_decay))\n\n with tf.control_dependencies(ops):\n return tf.identity(batch_min), tf.identity(batch_max)\n else:\n # During inference/eval use the moving average min/maxs for\n # quantization.\n return min_var, max_var\n\n def quantization(self, tensor, ema_decay=0.99, num_bits=8):\n \"\"\"Perform fake quantization of the tensor.\n\n The method performs fake quantization of the tensor by first computing the\n batch min/max and then computing a moving average of the min/max across\n batches. The moving average of min/max is used for quantization during\n inference. During training the batch min/maxs are used directly.\n\n Args:\n tensor: Input tensor that needs to be quantized.\n ema_decay: Moving average decay\n num_bits: Number of bits used for quantization\n\n Returns:\n Quantized tensor.\n \"\"\"\n with tf.variable_scope(\n name_or_scope=None, default_name=\"MovingAvgQuantize\"):\n min_tensor, max_tensor = self.get_quantization_ranges(tensor, ema_decay)\n if min_tensor is None or max_tensor is None:\n return tensor\n else:\n return tf.fake_quant_with_min_max_vars(\n tensor, min_tensor, max_tensor, num_bits=num_bits)\n\n def _weight_quantization(self, tensor, num_bits=8):\n \"\"\"Quantize weights when enabled.\"\"\"\n if not self._quantization_enabled:\n return tensor\n\n # For infer mode, toco computes the min/max from the weights offline to\n # quantize it. During train/eval this is computed from the current value\n # in the session by the graph itself.\n modes = set([tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL])\n if self._mode in modes:\n batch_min = tf.reduce_min(tensor, name=\"BatchMin\")\n # Toco expects 0.0 to be part of the quantization range.\n batch_min = tf.minimum(batch_min, 0.0)\n\n batch_max = tf.reduce_max(tensor, name=\"BatchMax\")\n # Toco expects 0.0 to be part of the quantization range.\n batch_max = tf.maximum(batch_max, 0.0)\n\n return tf.fake_quant_with_min_max_vars(\n tensor, batch_min, batch_max, num_bits=num_bits)\n else:\n return tensor\n\n def _get_weight(self, shape, num_bits=8):\n \"\"\"Return a weight variable for the given shape.\n\n The disable_pruning flag overrides the global pruning_obj object. When set\n to True, the returned weight tensor is not pruned.\n Args:\n shape: Shape of the weight tensor\n num_bits: Number of bits to use for the variable.\n\n Returns:\n Quantized tensor with the mask and threshold variables needed for pruning.\n\n \"\"\"\n weight = tf.get_variable(\n \"weight\", shape, initializer=self._weights_initializer)\n if self._regularizer_scale > 0.0:\n reg_loss = tf.nn.l2_loss(weight) * tf.convert_to_tensor(\n self._regularizer_scale)\n tf.losses.add_loss(\n reg_loss, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n return self._weight_quantization(weight, num_bits=num_bits)\n\n def _get_bias(self, shape):\n weight = tf.get_variable(\"bias\", shape, initializer=tf.zeros_initializer())\n if self._regularizer_scale > 0.0:\n reg_loss = tf.nn.l2_loss(weight) * tf.convert_to_tensor(\n self._regularizer_scale)\n tf.losses.add_loss(\n reg_loss, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n return weight\n\n def zero_beyond_sequence_length(self, sequence_length, gate):\n \"\"\"Generate a binary mask for the sequence based on the timestep's validity.\n\n Args:\n sequence_length: The sequence length tensor of [batch size] elements.\n gate: A gate tensor used by the QuasiRNN cell to infer shape from it.\n\n Returns:\n Mask tensor with one for valid time and zero for invalid timestep.\n \"\"\"\n mask = tf.sequence_mask(\n sequence_length, maxlen=tf.shape(gate)[1], dtype=tf.float32)\n return tf.expand_dims(mask, 2)\n\n def _convolution2d(self,\n inputs,\n kernel_size,\n filters,\n stride,\n padding,\n dilations=None,\n weight_mask=None,\n scope=\"convolution2d\"):\n \"\"\"Linear part of the convolution layer.\"\"\"\n if isinstance(stride, int):\n strides = [1, stride, stride, 1]\n else:\n if not isinstance(stride, list) or len(stride) != 2:\n raise ValueError(\"`Stride` should be an integer or a list of length 2\")\n strides = [1, stride[0], stride[1], 1]\n if dilations is not None:\n if not isinstance(dilations, list) or len(dilations) != 2:\n raise ValueError(\"`Dilations` should be an integer list of length 2\")\n dilations = [1, dilations[0], dilations[1], 1]\n else:\n dilations = [1, 1, 1, 1]\n\n with tf.variable_scope(name_or_scope=None, default_name=scope):\n input_channels = inputs.get_shape().as_list()[-1]\n kernel_shape = kernel_size + [input_channels, filters]\n weight = self._get_weight(kernel_shape)\n if weight_mask is not None:\n # Tensor multiply for disabling backprop\n weight = weight * weight_mask\n bias = self._get_bias([filters])\n\n features = tf.nn.conv2d(\n inputs, weight, strides, padding, dilations=dilations)\n return tf.nn.bias_add(features, bias)\n\n def convolution2d(self,\n inputs,\n kernel_size,\n filters,\n scope=\"convolution2d\",\n stride=1,\n padding=\"SAME\",\n dilations=None,\n weight_mask=None,\n activation=tf.nn.relu,\n normalization=True):\n \"\"\"Creates a 2d convolution layer.\n\n Performs batch normalization to the tensor pre activation and fake\n quantization post activation.\n\n Args:\n inputs: Input tensor, that is expected to be a rank 4 tensor.\n kernel_size: 2D convolution kernel size (2 tuple).\n filters: Number of output channels (integer).\n scope: A string that would be used as variable scope for the layer.\n stride: Convolution stride, can be a constant or a 2 tuple.\n padding: Padding to use for the convolution.\n dilations: tuple of size 2 specifying the dilation rates for input height\n and width respectively. Refer to tf.nn.conv2d API for more details.\n weight_mask: A floating point numpy array or constant tensor mask to turn\n off weights in the convolution kernel.\n activation: Activation function to be used, Relu is used by default.\n normalization: A boolean flag indicating if batchnorm should be performed.\n\n Returns:\n Tensor result of the convolution layer.\n\n Raises:\n ValueError: If inputs is not a rank 4 tensor\n ValueError: If kernel_size is not a list or tuple of length 2\n \"\"\"\n if len(inputs.get_shape().as_list()) != 4:\n raise ValueError(\"`inputs` should be a rank 4 tensor. \"\n \"Was: {}.\".format(len(inputs.get_shape().as_list())))\n\n kernel_size = list(kernel_size)\n if len(kernel_size) != 2:\n raise ValueError(\"`kernel_size` should be a tuple or list of length 2. \"\n \"Was: {}.\".format(kernel_size))\n\n features_rank4 = self._convolution2d(\n inputs,\n kernel_size,\n filters,\n stride,\n padding,\n dilations,\n weight_mask=weight_mask,\n scope=scope)\n\n if normalization and self._normalizer:\n features_rank4 = self._normalizer(features_rank4)\n if activation is not None:\n features_rank4 = activation(features_rank4)\n\n return self.quantization(features_rank4)\n\n def _fully_connected(self,\n features,\n output_size,\n scope=\"fully_connected\",\n use_bias=True):\n \"\"\"Performs fully connected operation.\"\"\"\n with tf.variable_scope(name_or_scope=None, default_name=scope):\n weight = self._get_weight(\n [features.get_shape().as_list()[-1], output_size])\n bias = self._get_bias([output_size])\n features = tf.matmul(features, weight)\n return tf.nn.bias_add(features, bias) if use_bias else features\n\n def fully_connected(self,\n features,\n output_size,\n scope=\"fully_connected\",\n activation=tf.nn.relu,\n normalization=True,\n use_bias=True):\n \"\"\"Creates a fully connected layer.\n\n Performs batch normalization to the tensor pre activation and fake\n quantization post activation.\n\n Args:\n features: Input features to the fully connected layer.\n output_size: Number of output features.\n scope: A variable scope for the connected layer.\n activation: activation function to be used, Relu is used by default.\n normalization: A flag indicating if batchnorm should be performed.\n use_bias: If True, bias is added to the result\n\n Returns:\n Tensor result of the fully connected layer.\n\n Raises:\n ValueError: If last dimension of features is dynamic (shape = None).\n \"\"\"\n input_shape = features.get_shape().as_list()\n if not input_shape[-1]:\n raise ValueError(\"Last dimension of features should be static\")\n\n need_reshape = len(input_shape) > 2\n input_tensor = features\n if need_reshape:\n features = tf.reshape(features, [-1, input_shape[-1]])\n\n features = self._fully_connected(\n features, output_size, scope=scope, use_bias=use_bias)\n\n if normalization and self._normalizer:\n features = self._normalizer(features)\n\n if activation:\n # Batch normalization is done pre activation as suggested in the original\n # paper. Quantization is done post activation because the range will\n # change after applying the squashing function.\n features = activation(features)\n features = self.quantization(features)\n if not need_reshape:\n return features\n else:\n # The fully connected layer changes the last dimension to output_size.\n # If a reshape was done before applying the fully connected layer, change\n # it back to the right rank. If the input dimensions are known use the\n # static shape otherwise use the shape tensor.\n if sum([val is None for val in input_shape]) <= 1:\n # Just one dynamic shape, we can reshape with -1\n output_shape = [-1 if val is None else val for val in input_shape]\n else:\n input_shape_tensor = tf.shape(input_tensor)\n output_shape = [\n shape or input_shape_tensor[index]\n for index, shape in enumerate(input_shape)\n ]\n output_shape[-1] = output_size\n return tf.reshape(features, output_shape)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the original form of Residual Networks.\n\nThe 'v1' residual networks (ResNets) implemented in this module were proposed\nby:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nOther variants were introduced in:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe networks defined in this module utilize the bottleneck building block of\n[1] with projection shortcuts only for increasing depths. They employ batch\nnormalization *after* every weight layer. This is the architecture used by\nMSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and\nResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'\narchitecture and the alternative 'v2' architecture of [2] which uses batch\nnormalization *before* every weight layer in the so-called full pre-activation\nunits.\n\nTypical use:\n\n from tf_slim.nets import resnet_v1\n\nResNet-101 for image classification into 1000 classes:\n\n # inputs has shape [batch, 224, 224, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope()):\n net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)\n\nResNet-101 for semantic segmentation into 21 classes:\n\n # inputs has shape [batch, 513, 513, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope()):\n net, end_points = resnet_v1.resnet_v1_101(inputs,\n 21,\n is_training=False,\n global_pool=False,\n output_stride=16)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nimport tf_slim as slim\n\nfrom nets import resnet_utils\n\n\nresnet_arg_scope = resnet_utils.resnet_arg_scope\n\n\nclass NoOpScope(object):\n \"\"\"No-op context manager.\"\"\"\n\n def __enter__(self):\n return None\n\n def __exit__(self, exc_type, exc_value, traceback):\n return False\n\n\[email protected]_arg_scope\ndef bottleneck(inputs,\n depth,\n depth_bottleneck,\n stride,\n rate=1,\n outputs_collections=None,\n scope=None,\n use_bounded_activations=False):\n \"\"\"Bottleneck residual unit variant with BN after convolutions.\n\n This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for\n its definition. Note that we use here the bottleneck variant which has an\n extra bottleneck layer.\n\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n use_bounded_activations: Whether or not to use bounded activations. Bounded\n activations better lend themselves to quantized inference.\n\n Returns:\n The ResNet unit's output.\n \"\"\"\n with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n if depth == depth_in:\n shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')\n else:\n shortcut = slim.conv2d(\n inputs,\n depth, [1, 1],\n stride=stride,\n activation_fn=tf.nn.relu6 if use_bounded_activations else None,\n scope='shortcut')\n\n residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n if use_bounded_activations:\n # Use clip_by_value to simulate bandpass activation.\n residual = tf.clip_by_value(residual, -6.0, 6.0)\n output = tf.nn.relu6(shortcut + residual)\n else:\n output = tf.nn.relu(shortcut + residual)\n\n return slim.utils.collect_named_outputs(outputs_collections,\n sc.name,\n output)\n\n\ndef resnet_v1(inputs,\n blocks,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n spatial_squeeze=True,\n store_non_strided_activations=False,\n reuse=None,\n scope=None):\n \"\"\"Generator for v1 ResNet models.\n\n This function generates a family of ResNet v1 models. See the resnet_v1_*()\n methods for specific model instantiations, obtained by selecting different\n block instantiations that produce ResNets of various depths.\n\n Training for image classification on Imagenet is usually done with [224, 224]\n inputs, resulting in [7, 7] feature maps at the output of the last ResNet\n block for the ResNets defined in [1] that have nominal stride equal to 32.\n However, for dense prediction tasks we advise that one uses inputs with\n spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In\n this case the feature maps at the ResNet output will have spatial shape\n [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]\n and corners exactly aligned with the input image corners, which greatly\n facilitates alignment of the features to the image. Using as input [225, 225]\n images results in [8, 8] feature maps at the output of the last ResNet block.\n\n For dense prediction tasks, the ResNet needs to run in fully-convolutional\n (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all\n have nominal stride equal to 32 and a good choice in FCN mode is to use\n output_stride=16 in order to increase the density of the computed features at\n small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n blocks: A list of length equal to the number of ResNet blocks. Each element\n is a resnet_utils.Block object describing the units in the block.\n num_classes: Number of predicted classes for classification tasks.\n If 0 or None, we return the features before the logit layer.\n is_training: whether batch_norm layers are in training mode. If this is set\n to None, the callers can specify slim.batch_norm's is_training parameter\n from an outer slim.arg_scope.\n global_pool: If True, we perform global average pooling before computing the\n logits. Set to True for image classification, False for dense prediction.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution.\n include_root_block: If True, include the initial convolution followed by\n max-pooling, if False excludes it.\n spatial_squeeze: if True, logits is of shape [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n To use this parameter, the input images must be smaller than 300x300\n pixels, in which case the output logit layer does not contain spatial\n information and can be removed.\n store_non_strided_activations: If True, we compute non-strided (undecimated)\n activations at the last unit of each block and store them in the\n `outputs_collections` before subsampling them. This gives us access to\n higher resolution intermediate activations which are useful in some\n dense prediction problems but increases 4x the computation and memory cost\n at the last unit of each block.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n If global_pool is False, then height_out and width_out are reduced by a\n factor of output_stride compared to the respective height_in and width_in,\n else both height_out and width_out equal one. If num_classes is 0 or None,\n then net is the output of the last ResNet block, potentially after global\n average pooling. If num_classes a non-zero integer, net contains the\n pre-softmax activations.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n \"\"\"\n with tf.variable_scope(\n scope, 'resnet_v1', [inputs], reuse=reuse) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n with slim.arg_scope([slim.conv2d, bottleneck,\n resnet_utils.stack_blocks_dense],\n outputs_collections=end_points_collection):\n with (slim.arg_scope([slim.batch_norm], is_training=is_training)\n if is_training is not None else NoOpScope()):\n net = inputs\n if include_root_block:\n if output_stride is not None:\n if output_stride % 4 != 0:\n raise ValueError('The output_stride needs to be a multiple of 4.')\n output_stride /= 4\n net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')\n net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')\n net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,\n store_non_strided_activations)\n # Convert end_points_collection into a dictionary of end_points.\n end_points = slim.utils.convert_collection_to_dict(\n end_points_collection)\n\n if global_pool:\n # Global average pooling.\n net = tf.reduce_mean(\n input_tensor=net, axis=[1, 2], name='pool5', keepdims=True)\n end_points['global_pool'] = net\n if num_classes:\n net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='logits')\n end_points[sc.name + '/logits'] = net\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')\n end_points[sc.name + '/spatial_squeeze'] = net\n end_points['predictions'] = slim.softmax(net, scope='predictions')\n return net, end_points\nresnet_v1.default_image_size = 224\n\n\ndef resnet_v1_block(scope, base_depth, num_units, stride):\n \"\"\"Helper function for creating a resnet_v1 bottleneck block.\n\n Args:\n scope: The scope of the block.\n base_depth: The depth of the bottleneck layer for each unit.\n num_units: The number of units in the block.\n stride: The stride of the block, implemented as a stride in the last unit.\n All other units have stride=1.\n\n Returns:\n A resnet_v1 bottleneck block.\n \"\"\"\n return resnet_utils.Block(scope, bottleneck, [{\n 'depth': base_depth * 4,\n 'depth_bottleneck': base_depth,\n 'stride': 1\n }] * (num_units - 1) + [{\n 'depth': base_depth * 4,\n 'depth_bottleneck': base_depth,\n 'stride': stride\n }])\n\n\ndef resnet_v1_50(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n spatial_squeeze=True,\n store_non_strided_activations=False,\n min_base_depth=8,\n depth_multiplier=1,\n reuse=None,\n scope='resnet_v1_50'):\n \"\"\"ResNet-50 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)\n blocks = [\n resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,\n stride=2),\n resnet_v1_block('block2', base_depth=depth_func(128), num_units=4,\n stride=2),\n resnet_v1_block('block3', base_depth=depth_func(256), num_units=6,\n stride=2),\n resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,\n stride=1),\n ]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n store_non_strided_activations=store_non_strided_activations,\n reuse=reuse, scope=scope)\nresnet_v1_50.default_image_size = resnet_v1.default_image_size\n\n\ndef resnet_v1_101(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n spatial_squeeze=True,\n store_non_strided_activations=False,\n min_base_depth=8,\n depth_multiplier=1,\n reuse=None,\n scope='resnet_v1_101'):\n \"\"\"ResNet-101 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)\n blocks = [\n resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,\n stride=2),\n resnet_v1_block('block2', base_depth=depth_func(128), num_units=4,\n stride=2),\n resnet_v1_block('block3', base_depth=depth_func(256), num_units=23,\n stride=2),\n resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,\n stride=1),\n ]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n store_non_strided_activations=store_non_strided_activations,\n reuse=reuse, scope=scope)\nresnet_v1_101.default_image_size = resnet_v1.default_image_size\n\n\ndef resnet_v1_152(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n store_non_strided_activations=False,\n spatial_squeeze=True,\n min_base_depth=8,\n depth_multiplier=1,\n reuse=None,\n scope='resnet_v1_152'):\n \"\"\"ResNet-152 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)\n blocks = [\n resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,\n stride=2),\n resnet_v1_block('block2', base_depth=depth_func(128), num_units=8,\n stride=2),\n resnet_v1_block('block3', base_depth=depth_func(256), num_units=36,\n stride=2),\n resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,\n stride=1),\n ]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n store_non_strided_activations=store_non_strided_activations,\n reuse=reuse, scope=scope)\nresnet_v1_152.default_image_size = resnet_v1.default_image_size\n\n\ndef resnet_v1_200(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n store_non_strided_activations=False,\n spatial_squeeze=True,\n min_base_depth=8,\n depth_multiplier=1,\n reuse=None,\n scope='resnet_v1_200'):\n \"\"\"ResNet-200 model of [2]. See resnet_v1() for arg and return description.\"\"\"\n depth_func = lambda d: max(int(d * depth_multiplier), min_base_depth)\n blocks = [\n resnet_v1_block('block1', base_depth=depth_func(64), num_units=3,\n stride=2),\n resnet_v1_block('block2', base_depth=depth_func(128), num_units=24,\n stride=2),\n resnet_v1_block('block3', base_depth=depth_func(256), num_units=36,\n stride=2),\n resnet_v1_block('block4', base_depth=depth_func(512), num_units=3,\n stride=1),\n ]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n store_non_strided_activations=store_non_strided_activations,\n reuse=reuse, scope=scope)\nresnet_v1_200.default_image_size = resnet_v1.default_image_size\n",
"# Lint as: python2, python3\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ssd resnet v1 FPN feature extractors.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport numpy as np\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.models import ssd_feature_extractor_test\nfrom object_detection.utils import test_utils\n\n\nclass SSDResnetFPNFeatureExtractorTestBase(\n ssd_feature_extractor_test.SsdFeatureExtractorTestBase):\n \"\"\"Helper test class for SSD Resnet v1 FPN feature extractors.\"\"\"\n\n @abc.abstractmethod\n def _resnet_scope_name(self):\n pass\n\n @abc.abstractmethod\n def _fpn_scope_name(self):\n return 'fpn'\n\n @abc.abstractmethod\n def _create_feature_extractor(self,\n depth_multiplier,\n pad_to_multiple,\n use_explicit_padding=False,\n min_depth=32,\n use_keras=False):\n pass\n\n def test_extract_features_returns_correct_shapes_256(self):\n image_height = 256\n image_width = 256\n depth_multiplier = 1.0\n pad_to_multiple = 1\n expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),\n (2, 8, 8, 256), (2, 4, 4, 256),\n (2, 2, 2, 256)]\n self.check_extract_features_returns_correct_shape(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_keras=self.is_tf2())\n\n def test_extract_features_returns_correct_shapes_with_dynamic_inputs(\n self):\n image_height = 256\n image_width = 256\n depth_multiplier = 1.0\n pad_to_multiple = 1\n expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),\n (2, 8, 8, 256), (2, 4, 4, 256),\n (2, 2, 2, 256)]\n self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_keras=self.is_tf2())\n\n def test_extract_features_returns_correct_shapes_with_depth_multiplier(\n self):\n image_height = 256\n image_width = 256\n depth_multiplier = 0.5\n expected_num_channels = int(256 * depth_multiplier)\n pad_to_multiple = 1\n expected_feature_map_shape = [(2, 32, 32, expected_num_channels),\n (2, 16, 16, expected_num_channels),\n (2, 8, 8, expected_num_channels),\n (2, 4, 4, expected_num_channels),\n (2, 2, 2, expected_num_channels)]\n self.check_extract_features_returns_correct_shape(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_keras=self.is_tf2())\n\n def test_extract_features_returns_correct_shapes_with_min_depth(\n self):\n image_height = 256\n image_width = 256\n depth_multiplier = 1.0\n pad_to_multiple = 1\n min_depth = 320\n expected_feature_map_shape = [(2, 32, 32, min_depth),\n (2, 16, 16, min_depth),\n (2, 8, 8, min_depth),\n (2, 4, 4, min_depth),\n (2, 2, 2, min_depth)]\n\n with test_utils.GraphContextOrNone() as g:\n image_tensor = tf.random.uniform([2, image_height, image_width, 3])\n feature_extractor = self._create_feature_extractor(\n depth_multiplier, pad_to_multiple, min_depth=min_depth,\n use_keras=self.is_tf2())\n\n def graph_fn():\n if self.is_tf2():\n return feature_extractor(image_tensor)\n return feature_extractor.extract_features(image_tensor)\n\n feature_maps = self.execute(graph_fn, [], graph=g)\n for feature_map, expected_shape in zip(feature_maps,\n expected_feature_map_shape):\n self.assertAllEqual(feature_map.shape, expected_shape)\n\n def test_extract_features_returns_correct_shapes_with_pad_to_multiple(\n self):\n image_height = 254\n image_width = 254\n depth_multiplier = 1.0\n pad_to_multiple = 32\n expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),\n (2, 8, 8, 256), (2, 4, 4, 256),\n (2, 2, 2, 256)]\n\n self.check_extract_features_returns_correct_shape(\n 2, image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape, use_keras=self.is_tf2())\n\n def test_extract_features_raises_error_with_invalid_image_size(\n self):\n image_height = 32\n image_width = 32\n depth_multiplier = 1.0\n pad_to_multiple = 1\n self.check_extract_features_raises_error_with_invalid_image_size(\n image_height, image_width, depth_multiplier, pad_to_multiple,\n use_keras=self.is_tf2())\n\n def test_preprocess_returns_correct_value_range(self):\n image_height = 128\n image_width = 128\n depth_multiplier = 1\n pad_to_multiple = 1\n test_image_np = np.random.rand(4, image_height, image_width, 3)\n with test_utils.GraphContextOrNone() as g:\n test_image = tf.constant(test_image_np)\n feature_extractor = self._create_feature_extractor(\n depth_multiplier, pad_to_multiple, use_keras=self.is_tf2())\n\n def graph_fn():\n preprocessed_image = feature_extractor.preprocess(test_image)\n return preprocessed_image\n\n preprocessed_image_out = self.execute(graph_fn, [], graph=g)\n self.assertAllClose(preprocessed_image_out,\n test_image_np - [[123.68, 116.779, 103.939]])\n\n def test_variables_only_created_in_scope(self):\n if self.is_tf2():\n self.skipTest('test_variables_only_created_in_scope is only tf1')\n depth_multiplier = 1\n pad_to_multiple = 1\n scope_name = self._resnet_scope_name()\n self.check_feature_extractor_variables_under_scope(\n depth_multiplier,\n pad_to_multiple,\n scope_name,\n use_keras=self.is_tf2())\n\n def test_variable_count(self):\n if self.is_tf2():\n self.skipTest('test_variable_count is only tf1')\n depth_multiplier = 1\n pad_to_multiple = 1\n variables = self.get_feature_extractor_variables(\n depth_multiplier,\n pad_to_multiple,\n use_keras=self.is_tf2())\n # The number of expected variables in resnet_v1_50, resnet_v1_101,\n # and resnet_v1_152 is 279, 534, and 789 respectively.\n expected_variables_len = 279\n scope_name = self._resnet_scope_name()\n if scope_name in ('ResNet101V1_FPN', 'resnet_v1_101'):\n expected_variables_len = 534\n elif scope_name in ('ResNet152V1_FPN', 'resnet_v1_152'):\n expected_variables_len = 789\n self.assertEqual(len(variables), expected_variables_len)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base box coder.\n\nBox coders convert between coordinate frames, namely image-centric\n(with (0,0) on the top left of image) and anchor-centric (with (0,0) being\ndefined by a specific anchor).\n\nUsers of a BoxCoder can call two methods:\n encode: which encodes a box with respect to a given anchor\n (or rather, a tensor of boxes wrt a corresponding tensor of anchors) and\n decode: which inverts this encoding with a decode operation.\nIn both cases, the arguments are assumed to be in 1-1 correspondence already;\nit is not the job of a BoxCoder to perform matching.\n\"\"\"\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom abc import abstractproperty\n\nimport tensorflow as tf\n\n# Box coder types.\nFASTER_RCNN = 'faster_rcnn'\nKEYPOINT = 'keypoint'\nMEAN_STDDEV = 'mean_stddev'\nSQUARE = 'square'\n\n\nclass BoxCoder(object):\n \"\"\"Abstract base class for box coder.\"\"\"\n __metaclass__ = ABCMeta\n\n @abstractproperty\n def code_size(self):\n \"\"\"Return the size of each code.\n\n This number is a constant and should agree with the output of the `encode`\n op (e.g. if rel_codes is the output of self.encode(...), then it should have\n shape [N, code_size()]). This abstractproperty should be overridden by\n implementations.\n\n Returns:\n an integer constant\n \"\"\"\n pass\n\n def encode(self, boxes, anchors):\n \"\"\"Encode a box list relative to an anchor collection.\n\n Args:\n boxes: BoxList holding N boxes to be encoded\n anchors: BoxList of N anchors\n\n Returns:\n a tensor representing N relative-encoded boxes\n \"\"\"\n with tf.name_scope('Encode'):\n return self._encode(boxes, anchors)\n\n def decode(self, rel_codes, anchors):\n \"\"\"Decode boxes that are encoded relative to an anchor collection.\n\n Args:\n rel_codes: a tensor representing N relative-encoded boxes\n anchors: BoxList of anchors\n\n Returns:\n boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,\n with corners y_min, x_min, y_max, x_max)\n \"\"\"\n with tf.name_scope('Decode'):\n return self._decode(rel_codes, anchors)\n\n @abstractmethod\n def _encode(self, boxes, anchors):\n \"\"\"Method to be overriden by implementations.\n\n Args:\n boxes: BoxList holding N boxes to be encoded\n anchors: BoxList of N anchors\n\n Returns:\n a tensor representing N relative-encoded boxes\n \"\"\"\n pass\n\n @abstractmethod\n def _decode(self, rel_codes, anchors):\n \"\"\"Method to be overriden by implementations.\n\n Args:\n rel_codes: a tensor representing N relative-encoded boxes\n anchors: BoxList of anchors\n\n Returns:\n boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,\n with corners y_min, x_min, y_max, x_max)\n \"\"\"\n pass\n\n\ndef batch_decode(encoded_boxes, box_coder, anchors):\n \"\"\"Decode a batch of encoded boxes.\n\n This op takes a batch of encoded bounding boxes and transforms\n them to a batch of bounding boxes specified by their corners in\n the order of [y_min, x_min, y_max, x_max].\n\n Args:\n encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,\n code_size] representing the location of the objects.\n box_coder: a BoxCoder object.\n anchors: a BoxList of anchors used to encode `encoded_boxes`.\n\n Returns:\n decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,\n coder_size] representing the corners of the objects in the order\n of [y_min, x_min, y_max, x_max].\n\n Raises:\n ValueError: if batch sizes of the inputs are inconsistent, or if\n the number of anchors inferred from encoded_boxes and anchors are\n inconsistent.\n \"\"\"\n encoded_boxes.get_shape().assert_has_rank(3)\n if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():\n raise ValueError(\n 'The number of anchors inferred from encoded_boxes'\n ' and anchors are inconsistent: shape[1] of encoded_boxes'\n ' %s should be equal to the number of anchors: %s.' %\n (encoded_boxes.get_shape()[1].value, anchors.num_boxes_static()))\n\n decoded_boxes = tf.stack([\n box_coder.decode(boxes, anchors).get()\n for boxes in tf.unstack(encoded_boxes)\n ])\n return decoded_boxes\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.box_coder.faster_rcnn_box_coder.\"\"\"\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.box_coders import faster_rcnn_box_coder\nfrom object_detection.core import box_list\nfrom object_detection.utils import test_case\n\n\nclass FasterRcnnBoxCoderTest(test_case.TestCase):\n\n def test_get_correct_relative_codes_after_encoding(self):\n boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],\n np.float32)\n anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],\n np.float32)\n expected_rel_codes = [[-0.5, -0.416666, -0.405465, -0.182321],\n [-0.083333, -0.222222, -0.693147, -1.098612]]\n def graph_fn(boxes, anchors):\n boxes = box_list.BoxList(boxes)\n anchors = box_list.BoxList(anchors)\n coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()\n rel_codes = coder.encode(boxes, anchors)\n return rel_codes\n rel_codes_out = self.execute(graph_fn, [boxes, anchors])\n self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,\n atol=1e-04)\n\n def test_get_correct_relative_codes_after_encoding_with_scaling(self):\n boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]],\n np.float32)\n anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],\n np.float32)\n expected_rel_codes = [[-1., -1.25, -1.62186, -0.911608],\n [-0.166667, -0.666667, -2.772588, -5.493062]]\n def graph_fn(boxes, anchors):\n scale_factors = [2, 3, 4, 5]\n boxes = box_list.BoxList(boxes)\n anchors = box_list.BoxList(anchors)\n coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(\n scale_factors=scale_factors)\n rel_codes = coder.encode(boxes, anchors)\n return rel_codes\n rel_codes_out = self.execute(graph_fn, [boxes, anchors])\n self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,\n atol=1e-04)\n\n def test_get_correct_boxes_after_decoding(self):\n anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],\n np.float32)\n rel_codes = np.array([[-0.5, -0.416666, -0.405465, -0.182321],\n [-0.083333, -0.222222, -0.693147, -1.098612]],\n np.float32)\n expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]\n def graph_fn(rel_codes, anchors):\n anchors = box_list.BoxList(anchors)\n coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()\n boxes = coder.decode(rel_codes, anchors)\n return boxes.get()\n boxes_out = self.execute(graph_fn, [rel_codes, anchors])\n self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04,\n atol=1e-04)\n\n def test_get_correct_boxes_after_decoding_with_scaling(self):\n anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]],\n np.float32)\n rel_codes = np.array([[-1., -1.25, -1.62186, -0.911608],\n [-0.166667, -0.666667, -2.772588, -5.493062]],\n np.float32)\n expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]\n def graph_fn(rel_codes, anchors):\n scale_factors = [2, 3, 4, 5]\n anchors = box_list.BoxList(anchors)\n coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(\n scale_factors=scale_factors)\n boxes = coder.decode(rel_codes, anchors).get()\n return boxes\n boxes_out = self.execute(graph_fn, [rel_codes, anchors])\n self.assertAllClose(expected_boxes, boxes_out, rtol=1e-04,\n atol=1e-04)\n\n def test_very_small_Width_nan_after_encoding(self):\n boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32)\n anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32)\n expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826]]\n def graph_fn(boxes, anchors):\n boxes = box_list.BoxList(boxes)\n anchors = box_list.BoxList(anchors)\n coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()\n rel_codes = coder.encode(boxes, anchors)\n return rel_codes\n rel_codes_out = self.execute(graph_fn, [boxes, anchors])\n self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,\n atol=1e-04)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.io.gfile.GFile",
"tensorflow.summary.scalar",
"tensorflow.summary.create_file_writer"
],
[
"tensorflow.sparse.to_dense",
"tensorflow.zeros",
"tensorflow.io.decode_png",
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.io.parse_single_example",
"tensorflow.cast",
"tensorflow.io.decode_image",
"tensorflow.io.VarLenFeature",
"tensorflow.io.FixedLenFeature",
"tensorflow.zeros_like",
"tensorflow.map_fn",
"tensorflow.size"
],
[
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.identity",
"tensorflow.tools.graph_transforms.TransformGraph",
"numpy.stack",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.name_scope"
],
[
"tensorflow.compat.v1.nn.softmax",
"tensorflow.compat.v1.test.is_gpu_available",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.variable_scope",
"tensorflow.contrib.training.HParams"
],
[
"tensorflow.test.main"
],
[
"tensorflow.compat.v1.keras.applications.ResNet101V2",
"tensorflow.compat.v1.keras.layers.Conv2D",
"tensorflow.compat.v1.keras.applications.resnet_v2.preprocess_input",
"tensorflow.compat.v1.keras.models.Model",
"tensorflow.compat.v1.keras.layers.Conv2DTranspose",
"tensorflow.compat.v1.keras.layers.ReLU",
"tensorflow.compat.v1.keras.applications.ResNet50V2",
"tensorflow.compat.v1.keras.layers.BatchNormalization"
],
[
"tensorflow.constant",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.where"
],
[
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.image.resize_bilinear"
],
[
"numpy.abs",
"numpy.random.rand",
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.random_uniform",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.identity"
],
[
"tensorflow.less",
"tensorflow.logical_or",
"tensorflow.greater",
"tensorflow.ones_like",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.greater_equal"
],
[
"tensorflow.image.random_flip_left_right",
"tensorflow.shape",
"tensorflow.cast",
"tensorflow.io.parse_single_example",
"tensorflow.reshape",
"tensorflow.image.extract_jpeg_shape",
"tensorflow.io.FixedLenFeature",
"tensorflow.image.resize",
"tensorflow.image.convert_image_dtype"
],
[
"tensorflow.compat.v1.ones",
"numpy.allclose",
"numpy.array_equal",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.zeros",
"numpy.stack",
"numpy.array",
"tensorflow.compat.v1.constant"
],
[
"tensorflow.io.gfile.isdir",
"tensorflow.train.latest_checkpoint",
"tensorflow.compat.v2.keras.mixed_precision.experimental.Policy",
"tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy",
"tensorflow.config.set_soft_device_placement"
],
[
"tensorflow.compat.v1.keras.layers.Lambda",
"tensorflow.compat.v1.keras.Model"
],
[
"tensorflow.keras.layers.Input",
"tensorflow.random.uniform",
"tensorflow.test.main",
"tensorflow.keras.initializers.RandomNormal",
"numpy.random.randint"
],
[
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.fake_quant_with_min_max_vars",
"tensorflow.compat.v1.sequence_mask",
"tensorflow.compat.v1.identity",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.sigmoid",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.keras.initializers.glorot_uniform",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.losses.add_loss",
"tensorflow.compat.v1.nn.batch_normalization",
"tensorflow.compat.v1.tanh",
"tensorflow.compat.v1.nn.conv2d",
"tensorflow.compat.v1.minimum",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.reduce_min",
"tensorflow.compat.v1.fake_quant_with_min_max_args",
"tensorflow.compat.v1.nn.l2_loss",
"tensorflow.compat.v1.nn.bias_add"
],
[
"tensorflow.compat.v1.nn.relu6",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.variable_scope"
],
[
"numpy.random.rand",
"tensorflow.compat.v1.random.uniform",
"tensorflow.compat.v1.constant"
],
[
"tensorflow.name_scope",
"tensorflow.unstack"
],
[
"numpy.array",
"tensorflow.compat.v1.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cns-iu/ccf-research | [
"e029c8985a249c1caec925e95f5286c505c706ea"
] | [
"hackathon/annotation_compare_viz.py"
] | [
"import json\nfrom re import split\nimport shutil\nimport os\nimport sys\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom skimage import io\nfrom shapely.geometry import Polygon\n\nImage.MAX_IMAGE_PIXELS = None\n\n\ndef make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n\n\ndef dice(a, b):\n return 2 * a.intersection(b).area / (a.area + b.area)\n\n\ndef recall(a, b):\n return a.intersection(b).area / b.area\n\n\ndef precision(a, b):\n return a.intersection(b).area / a.area\n\n\ndef find_diff(dice_thred=0.5, draw_preview=True, log_score=True):\n # A - new json\n with open(file_A_path) as data_file:\n data = json.load(data_file)\n\n average_area = sum(\n [Polygon(item[\"geometry\"][\"coordinates\"][0]).area for item in data]\n ) / len(data)\n area_threshold = average_area / 50\n print(\"average area size: \", average_area)\n print(\"size threshold: \", area_threshold)\n\n coor_list_a = []\n\n for item in data:\n coor = item[\"geometry\"][\"coordinates\"]\n poly = Polygon(coor[0])\n if poly.area > area_threshold:\n coor_list_a.extend(item[\"geometry\"][\"coordinates\"])\n else:\n print(\"A ignore\", poly.area)\n A_x_list = [[xy[0] for xy in coor] for coor in coor_list_a]\n A_y_list = [[xy[1] for xy in coor] for coor in coor_list_a]\n A_id_list = [i for i in range(len(coor_list_a))]\n\n # B - old json\n with open(file_B_path) as data_file:\n data = json.load(data_file)\n\n coor_list_b = []\n\n for item in data:\n coor = item[\"geometry\"][\"coordinates\"]\n coor = [\n [[xy[1], xy[0]] for xy in coor[0]]\n ] # for some json. Comment this line if needed\n poly = Polygon(coor[0])\n if poly.area > area_threshold:\n coor_list_b.extend(coor)\n else:\n print(\"B ignore\", poly.area)\n B_x_list = [[xy[0] for xy in coor] for coor in coor_list_b]\n B_y_list = [[xy[1] for xy in coor] for coor in coor_list_b]\n\n # find difference\n center_list_new = []\n for i in range(len(A_x_list)):\n mean_x = (sum(A_x_list[i]) - A_x_list[i][-1]) / (len(A_x_list[i]) - 1)\n mean_y = (sum(A_y_list[i]) - A_y_list[i][-1]) / (len(A_y_list[i]) - 1)\n center_list_new.append((mean_x, mean_y))\n\n center_list_old = []\n for i in range(len(B_x_list)):\n mean_x = (sum(B_x_list[i]) - B_x_list[i][-1]) / (len(B_x_list[i]) - 1)\n mean_y = (sum(B_y_list[i]) - B_y_list[i][-1]) / (len(B_y_list[i]) - 1)\n center_list_old.append((mean_x, mean_y))\n\n new_added_list = []\n new_added_f1_list = []\n new_same_list = []\n new_revised_list = []\n f1_list = []\n\n positon_threshold = 500\n dice_threshold = dice_thred\n\n ignore_count = 0\n for i in A_id_list:\n x, y = center_list_new[i]\n new_p = Polygon(coor_list_a[i])\n min_f1 = 0\n min_j = -1\n _recall, _precision = -1, -1\n for j in range(len(center_list_old)):\n _x, _y = center_list_old[j]\n old_p = Polygon(coor_list_b[j])\n if (x - _x) ** 2 + (y - _y) ** 2 <= positon_threshold ** 2:\n f1 = dice(new_p, old_p)\n if f1 > min_f1:\n min_f1 = f1\n min_j = j\n _recall = recall(new_p, old_p)\n _precision = precision(new_p, old_p)\n\n if min_f1 >= 0.999:\n _flag = f\"Same\\t{min_f1}\"\n new_same_list.append(i)\n elif min_f1 >= dice_threshold:\n _flag = f\"Revised\\t{min_f1}\"\n new_revised_list.append(i)\n f1_list.append((min_f1, _recall, _precision))\n else:\n _flag = f\"Added\\t{min_f1}\"\n new_added_list.append(i)\n new_added_f1_list.append(min_f1)\n # print(min_f1)\n if _flag.startswith(\"Same\") or _flag.startswith(\"Revised\"):\n if min_j != -1:\n coor_list_b.pop(min_j)\n center_list_old.pop(min_j)\n # print(i, _flag)\n\n removed_count = len(center_list_old)\n print(f\"A\\tB\\tsame\\tmatch\\tadded\\tdeleted\")\n print(\n f\"{len(A_x_list)}\\t{len(B_x_list)}\\t{len(new_same_list)}\\t{len(new_revised_list)}\"\n f\"\\t{len(new_added_list)}\\t{removed_count}\"\n )\n print(f\"[FP: {len(new_added_list)}/{len(A_x_list)}]\")\n print(f\"[FN: {removed_count}/{len(B_x_list)}]\")\n # print(f\"{len(new_same_list)} same\")\n # print(f\"{len(new_revised_list)} revised\")\n # print(f\"{len(new_added_list)} added\")\n # print(f\"{removed_count} deleted\")\n\n # draw visualization\n if draw_preview:\n ref_image = io.imread(image_ref_path)\n background = np.zeros(shape=ref_image.shape, dtype=np.uint8)\n img = Image.fromarray(background, \"L\")\n img = img.convert(\"RGB\")\n font_path = r\"c:\\windows\\fonts\\bahnschrift.ttf\"\n font = ImageFont.truetype(font_path, size=48)\n title_font = ImageFont.truetype(font_path, size=72)\n ImageDraw.Draw(img).text(\n (100, 400),\n text=f\"DICE Threshold = {dice_thred}\",\n font=title_font,\n fill=\"white\",\n )\n ImageDraw.Draw(img).text(\n (100, 480),\n text=f\"PREDICTION [FP: {len(new_added_list)}/{len(A_x_list)}]\",\n font=title_font,\n fill=\"yellow\",\n )\n ImageDraw.Draw(img).text(\n (100, 560),\n text=f\"GROUND TRUTH [FN: {removed_count}/{len(B_x_list)}]\",\n font=title_font,\n fill=\"red\",\n )\n\n for i in new_added_list:\n coor_tuple = [(xy[1], xy[0]) for xy in coor_list_a[i]]\n # print(coor_tuple)\n ImageDraw.Draw(img).line(coor_tuple, fill=\"yellow\", width=6)\n # text\n f1 = new_added_f1_list[new_added_list.index(i)]\n if f1 > 0:\n text = \"{:.3f}\".format(f1) # + f\",{Polygon(coor_list_a[i]).area}\"\n ImageDraw.Draw(img).text(\n (center_list_new[i][1] - 40, center_list_new[i][0] + 60),\n text,\n font=font,\n )\n\n for coor_b in coor_list_b:\n coor_tuple = [(xy[1], xy[0]) for xy in coor_b]\n # print(coor_tuple)\n ImageDraw.Draw(img).line(coor_tuple, fill=\"red\", width=6)\n # text = f\",{Polygon(coor_b).area}\"\n # ImageDraw.Draw(img).text(\n # (coor_tuple[0][0], coor_tuple[0][1]),\n # text,\n # font=font,\n # )\n img = np.array(img).astype(\"uint8\")\n output_path = image_ref_path.replace(\n \".png\", f'_{str(dice_thred).replace(\".\",\"_\")}.png'\n )\n io.imsave(output_path, img)\n print(f\"Image saved to {output_path}\")\n\n # write score\n if log_score:\n txt_path = file_A_path.replace(\"json\", \"txt\")\n with open(txt_path, \"w\") as f:\n for item in f1_list:\n f.write(f\"{item[0]},{item[1]},{item[2]}\\n\")\n\n\nif __name__ == \"__main__\":\n file_A_path = (\n r\"C:\\Users\\yiju\\Desktop\\Copy\\Scripts\\masks\\1-tom-new-kidney\\pred_00a67c839.json\"\n )\n file_B_path = r\"C:\\Users\\yiju\\Desktop\\Copy\\Data\\hubmap-kidney-segmentation\\test\\00a67c839.json\"\n\n if len(sys.argv) >= 3:\n file_A_path = sys.argv[1]\n file_B_path = sys.argv[2]\n image_ref_path = file_A_path.replace(\"json\", \"png\")\n\n A_name = file_A_path.split(\"\\\\\")[-1].split(\".\")[0]\n B_name = file_B_path.split(\"\\\\\")[-1].split(\".\")[0]\n print(\"A: \", A_name)\n print(\"B: \", B_name)\n\n for d in [0.5]: # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:\n find_diff(dice_thred=d, draw_preview=True, log_score=True)\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yizhe-ang/MMSceneGraph | [
"d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba",
"d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba",
"d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba",
"d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba"
] | [
"mmdet/models/relation_heads/imp_head.py",
"tools/gradcheck.py",
"mmdet/models/relational_caption_heads/triplelstm_head.py",
"mmdet/models/mask_heads/transfer_mask_head.py"
] | [
"# ---------------------------------------------------------------\r\n# imp_head.py\r\n# Set-up time: 2020/5/21 下午11:22\r\n# Copyright (c) 2020 ICT\r\n# Licensed under The MIT License [see LICENSE for details]\r\n# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT\r\n# Contact: [email protected] [OR] [email protected]\r\n# ---------------------------------------------------------------\r\n\r\nfrom ..registry import HEADS\r\nimport torch\r\nfrom .relation_head import RelationHead\r\nfrom .approaches import IMPContext\r\nfrom mmdet.core import bbox2roi\r\n\r\n\r\[email protected]_module\r\nclass IMPHead(RelationHead):\r\n def __init__(self, **kwargs):\r\n super(IMPHead, self).__init__(**kwargs)\r\n\r\n self.context_layer = IMPContext(self.head_config, self.obj_classes, self.rel_classes)\r\n\r\n def forward(self,\r\n img,\r\n img_meta,\r\n det_result,\r\n gt_result=None,\r\n is_testing=False,\r\n ignore_classes=None):\r\n \"\"\"\r\n Obtain the relation prediction results based on detection results.\r\n Args:\r\n img (Tensor): of shape (N, C, H, W) encoding input images.\r\n Typically these should be mean centered and std scaled.\r\n\r\n img_meta (list[dict]): list of image info dict where each dict has:\r\n 'img_shape', 'scale_factor', 'flip', and may also contain\r\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\r\n For details on the values of these keys see\r\n `mmdet/datasets/pipelines/formatting.py:Collect`.\r\n det_result: (Result): Result containing bbox, label, mask, point, rels,\r\n etc. According to different mode, all the contents have been\r\n set correctly. Feel free to use it.\r\n gt_result : (Result): The ground truth information.\r\n is_testing:\r\n\r\n Returns:\r\n det_result with the following newly added keys:\r\n refine_scores (list[Tensor]): logits of object\r\n rel_scores (list[Tensor]): logits of relation\r\n rel_pair_idxes (list[Tensor]): (num_rel, 2) index of subject and object\r\n relmaps (list[Tensor]): (num_obj, num_obj):\r\n target_rel_labels (list[Tensor]): the target relation label.\r\n \"\"\"\r\n roi_feats, union_feats, det_result = self.frontend_features(img, img_meta, det_result, gt_result)\r\n if roi_feats.shape[0] == 0:\r\n return det_result\r\n\r\n refine_obj_scores, rel_scores = self.context_layer(roi_feats, union_feats, det_result)\r\n\r\n num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]\r\n num_objs = [len(b) for b in det_result.bboxes]\r\n assert len(num_rels) == len(num_objs)\r\n\r\n if self.use_bias:\r\n obj_preds = refine_obj_scores.max(-1)[1]\r\n obj_preds = obj_preds.split(num_objs, dim=0)\r\n\r\n pair_preds = []\r\n for pair_idx, obj_pred in zip(det_result.rel_pair_idxes, obj_preds):\r\n pair_preds.append(torch.stack((obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]), dim=1))\r\n pair_pred = torch.cat(pair_preds, dim=0)\r\n\r\n rel_scores = rel_scores + self.freq_bias.index_with_labels(pair_pred.long())\r\n\r\n # make some changes: list to tensor or tensor to tuple\r\n if self.training:\r\n det_result.target_labels = torch.cat(det_result.target_labels, dim=-1)\r\n det_result.target_rel_labels = torch.cat(det_result.target_rel_labels, dim=-1)\r\n else:\r\n refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)\r\n rel_scores = rel_scores.split(num_rels, dim=0)\r\n\r\n det_result.refine_scores = refine_obj_scores\r\n det_result.rel_scores = rel_scores\r\n return det_result\r\n\r\n",
"# ---------------------------------------------------------------\r\n# gradcheck.py\r\n# Set-up time: 2020/4/16 下午9:57\r\n# Copyright (c) 2020 ICT\r\n# Licensed under The MIT License [see LICENSE for details]\r\n# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT\r\n# Contact: [email protected] [OR] [email protected]\r\n# ---------------------------------------------------------------\r\n\r\nimport os.path as osp\r\nimport sys\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch.autograd import gradcheck\r\n\r\n\r\n # noqa: E402, isort:skip\r\nfrom mmdet.ops.shape_aware_roi_align import ShapeAwareRoIAlign\r\n\r\nfeat_size = 15\r\nspatial_scale = 1.0 / 8\r\nimg_size = feat_size / spatial_scale\r\nnum_imgs = 2\r\nnum_rois = 20\r\n\r\nbatch_ind = np.random.randint(num_imgs, size=(num_rois, 1))\r\nrois = (np.random.rand(num_rois, 4) * img_size * 0.5).astype(np.int32)\r\nrois[:, 2:] += int(img_size * 0.5)\r\nrois = np.hstack((batch_ind, rois))\r\n\r\nroi_heights = rois[:, 4] - rois[:, 2] + 1\r\nroi_widths = rois[:, 3] - rois[:, 1] + 1\r\nmasks = []\r\nfor i in range(num_rois):\r\n masks.append(torch.from_numpy(np.random.rand(roi_heights[i], roi_widths[i])).float().cuda())\r\n\r\nfeat = torch.randn(\r\n num_imgs, 16, feat_size, feat_size, requires_grad=True, device='cuda:0')\r\nrois = torch.from_numpy(rois).float().cuda()\r\ninputs = (feat, rois, masks)\r\nprint('Gradcheck for roi align...')\r\ntest = gradcheck(ShapeAwareRoIAlign(3, spatial_scale), inputs, atol=1e-3, eps=1e-3)\r\nprint(test)\r\ntest = gradcheck(ShapeAwareRoIAlign(3, spatial_scale, 2), inputs, atol=1e-3, eps=1e-3)\r\nprint(test)\r\n",
"# ---------------------------------------------------------------\r\n# triplelstm_head.py\r\n# Set-up time: 2021/2/2 上午11:41\r\n# Copyright (c) 2020 ICT\r\n# Licensed under The MIT License [see LICENSE for details]\r\n# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT\r\n# Contact: [email protected] [OR] [email protected]\r\n# ---------------------------------------------------------------\r\n\r\nfrom ..registry import HEADS\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom .. import builder\r\nfrom ..losses import accuracy\r\nfrom mmdet.datasets import build_dataset\r\nimport os\r\nfrom mmdet.models.relation_heads.approaches import (FrequencyBias, RelationSampler, PostProcessor)\r\nfrom mmdet.core import force_fp32\r\nfrom mmdet.core import get_classes, get_predicates, get_attributes, get_tokens\r\nimport numpy as np\r\nimport mmcv\r\nfrom mmdet.core import bbox2roi\r\nimport itertools\r\nimport copy\r\nfrom mmcv.cnn import xavier_init, normal_init, kaiming_init\r\n\r\nfrom .relational_caption_head import RelationalCaptionHead\r\nfrom mmdet.models.relation_heads.approaches.motif_util import block_orthogonal\r\nfrom mmdet.models.captioners.utils import activation, expand_tensor\r\n\r\n\r\[email protected]_module\r\nclass TripleLSTMHead(RelationalCaptionHead):\r\n def __init__(self, **kwargs):\r\n super(TripleLSTMHead, self).__init__(**kwargs)\r\n\r\n rnn_input_size = self.head_config.rnn_input_dim + self.word_embed_config.word_embed_dim\r\n self.subj_lstm = nn.LSTMCell(rnn_input_size, self.head_config.hidden_dim)\r\n self.obj_lstm = nn.LSTMCell(rnn_input_size, self.head_config.hidden_dim)\r\n self.union_lstm = nn.LSTMCell(rnn_input_size, self.head_config.hidden_dim)\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n self.dropout = nn.Dropout(0.5)\r\n\r\n self.proj_union = nn.Linear(self.head_config.union_feat_dim + self.head_config.union_spatial_dim,\r\n self.head_config.rnn_input_dim)\r\n self.proj_subj = nn.Linear(self.head_config.single_feat_dim, self.head_config.rnn_input_dim)\r\n self.proj_obj = nn.Linear(self.head_config.single_feat_dim, self.head_config.rnn_input_dim)\r\n\r\n # REM module\r\n if self.head_config.with_rem:\r\n self.W = nn.Linear(self.head_config.single_feat_dim, 3 * self.head_config.context_hidden_dim)\r\n self.Wz = nn.Linear(self.head_config.context_hidden_dim, self.head_config.single_feat_dim)\r\n\r\n # rel caption module\r\n sequential = [nn.Embedding(self.vocab_size, self.word_embed_config.word_embed_dim)]\r\n sequential.append(activation(self.word_embed_config.word_embed_act, elu_alpha=self.word_embed_config.elu_alpha))\r\n if self.word_embed_config.word_embed_norm:\r\n sequential.append(nn.LayerNorm(self.word_embed_config.word_embed_dim))\r\n if self.word_embed_config.dropout_word_embed > 0:\r\n sequential.append(nn.Dropout(self.word_embed_config.dropout_word_embed))\r\n self.word_embed = nn.Sequential(*sequential)\r\n\r\n self.logit = nn.Linear(self.head_config.hidden_dim * 3, self.vocab_size)\r\n\r\n def init_weights(self):\r\n super(TripleLSTMHead, self).init_weights()\r\n\r\n kaiming_init(self.proj_union, distribution='uniform', a=1)\r\n kaiming_init(self.proj_subj, distribution='uniform', a=1)\r\n kaiming_init(self.proj_obj, distribution='uniform', a=1)\r\n if self.head_config.with_rem:\r\n kaiming_init(self.Wz, distribution='uniform', a=1)\r\n block_orthogonal(self.W.weight.data,\r\n [self.head_config.context_hidden_dim, self.head_config.single_feat_dim])\r\n\r\n def relcaption_forward(self, roi_subj_feats, roi_obj_feats, union_feats, state, wt):\r\n xt = self.word_embed(wt)\r\n h_subj, c_subj = self.subj_lstm(torch.cat((roi_subj_feats, xt), -1), (state[0][0], state[1][0]))\r\n h_obj, c_obj = self.obj_lstm(torch.cat((roi_obj_feats, xt), -1), (state[0][1], state[1][1]))\r\n h_union, c_union = self.union_lstm(torch.cat((union_feats, xt), -1), (state[0][2], state[1][2]))\r\n\r\n logit = torch.cat((h_subj, h_obj, h_union), -1)\r\n state = [torch.stack([h_subj, h_obj, h_union]), torch.stack([c_subj, c_obj, c_union])]\r\n\r\n return logit, state\r\n\r\n def get_relcaption_logprobs_state(self, state, wt, roi_subj_feats, roi_obj_feats, union_feats):\r\n output, state = self.relcaption_forward(roi_subj_feats, roi_obj_feats, union_feats, state, wt)\r\n logprobs = F.log_softmax(self.logit(output), dim=1)\r\n return logprobs, state\r\n\r\n def init_relcap_hidden(self, batch_size, device):\r\n return [torch.zeros(3, batch_size, self.head_config.hidden_dim).to(device),\r\n torch.zeros(3, batch_size, self.head_config.hidden_dim).to(device)]\r\n\r\n def decode_beam(self, beam_size, batch_size, device, inference_func, **input_vars):\r\n seq_logprob = torch.zeros((batch_size, 1, 1)).to(device)\r\n log_probs = []\r\n selected_words = None\r\n seq_mask = torch.ones((batch_size, beam_size, 1)).to(device)\r\n\r\n state = self.init_relcap_hidden(batch_size, device=device)\r\n wt = torch.zeros(batch_size, dtype=torch.long).to(device)\r\n\r\n outputs = []\r\n for t in range(self.seq_len):\r\n cur_beam_size = 1 if t == 0 else beam_size\r\n word_logprob, state = inference_func(state, wt, **input_vars)\r\n word_logprob = word_logprob.view(batch_size, cur_beam_size, -1)\r\n candidate_logprob = seq_logprob + word_logprob\r\n\r\n # Mask sequence if it reaches EOS\r\n if t > 0:\r\n mask = (selected_words.view(batch_size, cur_beam_size) != 0).float().unsqueeze(-1)\r\n seq_mask = seq_mask * mask\r\n word_logprob = word_logprob * seq_mask.expand_as(word_logprob)\r\n old_seq_logprob = seq_logprob.expand_as(candidate_logprob).contiguous()\r\n old_seq_logprob[:, :, 1:] = -999\r\n candidate_logprob = seq_mask * candidate_logprob + old_seq_logprob * (1 - seq_mask)\r\n\r\n selected_idx, selected_logprob = self.select(batch_size, beam_size, t, candidate_logprob)\r\n selected_beam = selected_idx / candidate_logprob.shape[-1]\r\n selected_words = selected_idx - selected_beam * candidate_logprob.shape[-1]\r\n\r\n for s in range(len(state)):\r\n state[s] = self._expand_state(batch_size, beam_size, cur_beam_size, state[s], selected_beam)\r\n\r\n seq_logprob = selected_logprob.unsqueeze(-1)\r\n seq_mask = torch.gather(seq_mask, 1, selected_beam.unsqueeze(-1))\r\n outputs = list(torch.gather(o, 1, selected_beam.unsqueeze(-1)) for o in outputs)\r\n outputs.append(selected_words.unsqueeze(-1))\r\n\r\n this_word_logprob = torch.gather(word_logprob, 1,\r\n selected_beam.unsqueeze(-1).expand(batch_size, beam_size,\r\n word_logprob.shape[-1]))\r\n this_word_logprob = torch.gather(this_word_logprob, 2, selected_words.unsqueeze(-1))\r\n\r\n log_probs = list(\r\n torch.gather(o, 1, selected_beam.unsqueeze(-1).expand(batch_size, beam_size, 1)) for o in log_probs)\r\n log_probs.append(this_word_logprob)\r\n selected_words = selected_words.view(-1, 1)\r\n wt = selected_words.squeeze(-1)\r\n\r\n if t == 0:\r\n for k, v in input_vars.items():\r\n input_vars[k] = expand_tensor(v, beam_size)\r\n\r\n seq_logprob, sort_idxs = torch.sort(seq_logprob, 1, descending=True)\r\n outputs = torch.cat(outputs, -1)\r\n outputs = torch.gather(outputs, 1, sort_idxs.expand(batch_size, beam_size, self.seq_len))\r\n log_probs = torch.cat(log_probs, -1)\r\n log_probs = torch.gather(log_probs, 1, sort_idxs.expand(batch_size, beam_size, self.seq_len))\r\n\r\n outputs = outputs.contiguous()[:, 0]\r\n log_probs = log_probs.contiguous()[:, 0]\r\n\r\n return outputs, log_probs\r\n\r\n def forward(self,\r\n img,\r\n img_meta,\r\n det_result,\r\n gt_result=None,\r\n is_testing=False,\r\n beam_size=3):\r\n \"\"\"\r\n Obtain the relation prediction results based on detection results.\r\n Args:\r\n img (Tensor): of shape (N, C, H, W) encoding input images.\r\n Typically these should be mean centered and std scaled.\r\n\r\n img_meta (list[dict]): list of image info dict where each dict has:\r\n 'img_shape', 'scale_factor', 'flip', and may also contain\r\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\r\n For details on the values of these keys see\r\n `mmdet/datasets/pipelines/formatting.py:Collect`.\r\n det_result: (Result): Result containing bbox, label, mask, point, rels,\r\n etc. According to different mode, all the contents have been\r\n set correctly. Feel free to use it.\r\n gt_result : (Result): The ground truth information.\r\n is_testing:\r\n\r\n Returns:\r\n det_result with the following newly added keys:\r\n refine_scores (list[Tensor]): logits of object\r\n rel_scores (list[Tensor]): logits of relation\r\n rel_pair_idxes (list[Tensor]): (num_rel, 2) index of subject and object\r\n relmaps (list[Tensor]): (num_obj, num_obj):\r\n target_rel_labels (list[Tensor]): the target relation label.\r\n \"\"\"\r\n roi_feats, union_feats, det_result = self.frontend_features(img, det_result, gt_result)\r\n if roi_feats.shape[0] == 0:\r\n return det_result\r\n\r\n num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]\r\n num_objs = [len(b) for b in det_result.bboxes]\r\n\r\n num_images = img[0].size(0)\r\n assert num_images == len(det_result.rel_pair_idxes)\r\n rel_pair_index = []\r\n acc_obj = 0\r\n for i, num_obj in enumerate(num_objs):\r\n rel_pair_idx_i = det_result.rel_pair_idxes[i].clone()\r\n rel_pair_idx_i[:, 0] += acc_obj\r\n rel_pair_idx_i[:, 1] += acc_obj\r\n acc_obj += num_obj\r\n rel_pair_index.append(rel_pair_idx_i)\r\n rel_pair_index = torch.cat(rel_pair_index, 0)\r\n\r\n # prepare the pairwise subj, obj feats\r\n\r\n # forward the REM:\r\n if self.head_config.with_rem:\r\n split_roi_feats = roi_feats.split(num_objs)\r\n new_roi_feats = []\r\n for X in split_roi_feats:\r\n xgate = self.relu(self.W(X))\r\n Xa, Xb, Xc = xgate[:, :self.head_config.context_hidden_dim], \\\r\n xgate[:, self.head_config.context_hidden_dim: 2 * self.head_config.context_hidden_dim], \\\r\n xgate[:, 2 * self.head_config.context_hidden_dim: 3 * self.head_config.context_hidden_dim]\r\n\r\n R = F.softmax(torch.mm(Xa, Xb.transpose(0, 1)), -1)\r\n A = self.Wz(torch.mm(R, Xc))\r\n X = X + A\r\n new_roi_feats.append(X)\r\n roi_feats = torch.cat(new_roi_feats, 0)\r\n\r\n roi_subj_feats, roi_obj_feats = roi_feats[rel_pair_index[:, 0], :], roi_feats[rel_pair_index[:, 1], :]\r\n roi_subj_feats = self.dropout(self.relu(self.proj_subj(roi_subj_feats)))\r\n roi_obj_feats = self.dropout(self.relu(self.proj_obj(roi_obj_feats)))\r\n union_feats = self.dropout(self.relu(self.proj_union(union_feats)))\r\n\r\n # Relational Captioning Part\r\n if self.with_relcaption:\r\n batch_size = roi_subj_feats.size(0)\r\n # init hidden states\r\n state = self.init_relcap_hidden(batch_size, roi_subj_feats.device)\r\n\r\n if not is_testing:\r\n tgt_rel_inputs, tgt_rel_targets, tgt_rel_ipts = det_result.tgt_rel_cap_inputs, det_result.tgt_rel_cap_targets, det_result.tgt_rel_ipts\r\n tgt_rel_inputs = torch.cat(tgt_rel_inputs, 0)\r\n tgt_rel_targets = torch.cat(tgt_rel_targets, 0)\r\n assert tgt_rel_inputs is not None and tgt_rel_targets is not None\r\n tgt_rel_inputs, tgt_rel_targets = self.preprocess_seq(tgt_rel_inputs, tgt_rel_targets)\r\n det_result.tgt_rel_cap_inputs = tgt_rel_inputs\r\n det_result.tgt_rel_cap_targets = tgt_rel_targets\r\n\r\n rel_cap_scores = torch.zeros(batch_size, tgt_rel_inputs.size(1), self.vocab_size).to(\r\n roi_obj_feats.device)\r\n for t in range(tgt_rel_inputs.size(1)):\r\n if t >= 1 and self.ss_prob > 0:\r\n prob = torch.empty(batch_size).cuda().uniform_(0, 1)\r\n mask = prob < self.ss_prob\r\n if mask.sum() == 0:\r\n wt = tgt_rel_inputs[:, t].clone()\r\n else:\r\n ind = mask.nonzero().view(-1)\r\n wt = tgt_rel_inputs[:, t].clone()\r\n prob_prev = torch.exp(rel_cap_scores[:, t - 1].detach())\r\n wt.index_copy_(0, ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, ind))\r\n else:\r\n wt = tgt_rel_inputs[:, t].clone()\r\n\r\n if t >= 1 and tgt_rel_inputs[:, t].max() == 0:\r\n break\r\n\r\n logit, state = self.relcaption_forward(roi_subj_feats, roi_obj_feats, union_feats, state, wt)\r\n logit = self.dropout(self.logit(logit))\r\n rel_cap_scores[:, t] = logit\r\n\r\n det_result.rel_cap_scores = rel_cap_scores\r\n\r\n else:\r\n # beam search\r\n outputs, log_probs = self.decode_beam(beam_size, batch_size, roi_subj_feats.device,\r\n self.get_relcaption_logprobs_state,\r\n roi_subj_feats=roi_subj_feats,\r\n roi_obj_feats=roi_obj_feats,\r\n union_feats=union_feats)\r\n det_result.rel_cap_scores = log_probs\r\n det_result.rel_cap_seqs = outputs\r\n\r\n return det_result\r\n",
"import mmcv\r\nimport numpy as np\r\nimport pycocotools.mask as mask_util\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn.modules.utils import _pair\r\n\r\nfrom mmdet.core import auto_fp16, force_fp32, mask_target, get_point_from_mask\r\nfrom mmdet.ops import ConvModule, build_upsample_layer\r\nfrom mmdet.ops.carafe import CARAFEPack\r\nfrom ..builder import build_loss\r\nfrom ..registry import HEADS\r\n\r\n\r\[email protected]_module\r\nclass TransferMaskHead(nn.Module):\r\n\r\n def __init__(self,\r\n num_convs=4,\r\n roi_feat_size=14,\r\n in_channels=256,\r\n conv_kernel_size=3,\r\n conv_out_channels=256,\r\n num_classes=81,\r\n class_agnostic=False,\r\n upsample_cfg=dict(type='deconv', scale_factor=2),\r\n transfer_cfg=dict(num_fc=2, fc_in=5120, hidden_neurons=[1024, 256], relu='LeakyReLU', mlp_fusion=True),\r\n conv_cfg=None,\r\n norm_cfg=None,\r\n loss_mask=dict(\r\n type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):\r\n super(TransferMaskHead, self).__init__()\r\n self.upsample_cfg = upsample_cfg.copy()\r\n if self.upsample_cfg['type'] not in [\r\n None, 'deconv', 'nearest', 'bilinear', 'carafe'\r\n ]:\r\n raise ValueError(\r\n 'Invalid upsample method {}, accepted methods '\r\n 'are \"deconv\", \"nearest\", \"bilinear\", \"carafe\"'.format(\r\n self.upsample_cfg['type']))\r\n\r\n self.transfer_cfg = transfer_cfg.copy()\r\n if self.transfer_cfg['relu'] not in [None, 'ReLU', 'LeakyReLU']:\r\n raise ValueError('Invalid activation method {}, accpeted methods are \"ReLU\", \"LeakyReLU\"'.format(\r\n self.transfer_cfg['relu']))\r\n assert len(self.transfer_cfg['hidden_neurons']) == self.transfer_cfg['num_fc']\r\n\r\n self.num_convs = num_convs\r\n # WARN: roi_feat_size is reserved and not used\r\n self.roi_feat_size = _pair(roi_feat_size)\r\n self.in_channels = in_channels\r\n self.conv_kernel_size = conv_kernel_size\r\n self.conv_out_channels = conv_out_channels\r\n self.upsample_method = self.upsample_cfg.get('type')\r\n self.scale_factor = self.upsample_cfg.pop('scale_factor')\r\n self.num_classes = num_classes\r\n self.class_agnostic = class_agnostic\r\n self.conv_cfg = conv_cfg\r\n self.norm_cfg = norm_cfg\r\n self.fp16_enabled = False\r\n self.loss_mask = build_loss(loss_mask)\r\n\r\n self.convs = nn.ModuleList()\r\n for i in range(self.num_convs):\r\n in_channels = (\r\n self.in_channels if i == 0 else self.conv_out_channels)\r\n padding = (self.conv_kernel_size - 1) // 2\r\n self.convs.append(\r\n ConvModule(\r\n in_channels,\r\n self.conv_out_channels,\r\n self.conv_kernel_size,\r\n padding=padding,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg))\r\n upsample_in_channels = (\r\n self.conv_out_channels if self.num_convs > 0 else in_channels)\r\n upsample_cfg_ = self.upsample_cfg.copy()\r\n if self.upsample_method is None:\r\n self.upsample = None\r\n elif self.upsample_method == 'deconv':\r\n upsample_cfg_.update(\r\n in_channels=upsample_in_channels,\r\n out_channels=self.conv_out_channels,\r\n kernel_size=self.scale_factor,\r\n stride=self.scale_factor)\r\n elif self.upsample_method == 'carafe':\r\n upsample_cfg_.update(\r\n channels=upsample_in_channels, scale_factor=self.scale_factor)\r\n else:\r\n # suppress warnings\r\n align_corners = (None\r\n if self.upsample_method == 'nearest' else False)\r\n upsample_cfg_.update(\r\n scale_factor=self.scale_factor,\r\n mode=self.upsample_method,\r\n align_corners=align_corners)\r\n self.upsample = build_upsample_layer(upsample_cfg_)\r\n\r\n transfer_modules = []\r\n for i in range(self.transfer_cfg.get('num_fc', 2)):\r\n if i == 0:\r\n feat_in = self.transfer_cfg.get('fc_in', 5120)\r\n else:\r\n feat_in = self.transfer_cfg['hidden_neurons'][i - 1]\r\n feat_out = self.transfer_cfg['hidden_neurons'][i]\r\n transfer_modules.append(nn.Linear(feat_in, feat_out))\r\n if self.transfer_cfg['relu'] == 'ReLU':\r\n relu = nn.ReLU(inplace=True)\r\n elif self.transfer_cfg['relu'] == 'LeakyReLU':\r\n relu = nn.LeakyReLU(inplace=True)\r\n transfer_modules.append(relu)\r\n self.transfer = nn.Sequential(*transfer_modules)\r\n\r\n if self.transfer_cfg['mlp_fusion']:\r\n if self.upsample is not None:\r\n feat_in = self.conv_out_channels * (roi_feat_size * 2) ** 2\r\n feat_out = (roi_feat_size * 2) ** 2\r\n else:\r\n feat_in = self.conv_out_channels * (roi_feat_size ** 2)\r\n feat_out = roi_feat_size ** 2\r\n self.fusion_mlp = nn.Linear(feat_in, feat_out)\r\n else:\r\n self.fusion_mlp = None\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n self.debug_imgs = None\r\n\r\n def init_weights(self):\r\n for m in [self.upsample]:\r\n if m is None:\r\n continue\r\n elif isinstance(m, CARAFEPack):\r\n m.init_weights()\r\n else:\r\n nn.init.kaiming_normal_(\r\n m.weight, mode='fan_out', nonlinearity='relu')\r\n nn.init.constant_(m.bias, 0)\r\n\r\n for module_list in [self.transfer]:\r\n for m in module_list.modules():\r\n if isinstance(m, nn.Linear):\r\n nn.init.xavier_uniform_(m.weight)\r\n nn.init.constant_(m.bias, 0)\r\n if self.fusion_mlp is not None:\r\n nn.init.xavier_uniform_(self.fusion_mlp.weight)\r\n nn.init.constant_(self.fusion_mlp.bias, 0)\r\n\r\n @auto_fp16()\r\n def forward(self, x, det_weight):\r\n for conv in self.convs:\r\n x = conv(x)\r\n if self.upsample is not None:\r\n x = self.upsample(x)\r\n if self.upsample_method == 'deconv':\r\n x = self.relu(x)\r\n batch_size, conv_out_channels, conv_out_h, conv_out_w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]\r\n\r\n if self.fusion_mlp is not None:\r\n mlp_x = self.fusion_mlp(x.view(batch_size, -1))\r\n mlp_x = mlp_x.view(batch_size, 1, conv_out_h, conv_out_w)\r\n\r\n mask_weight = self.transfer(det_weight).T\r\n mask_opt = torch.matmul(x.permute(0, 2, 3, 1).contiguous().view(-1, conv_out_channels), mask_weight)\r\n x = mask_opt.view(-1, conv_out_h, conv_out_w, self.num_classes)\r\n x = x.permute(0, 3, 1, 2).contiguous()\r\n\r\n if self.fusion_mlp is not None:\r\n x = x + mlp_x\r\n mask_pred = x\r\n return mask_pred\r\n\r\n def get_target(self, sampling_results, gt_masks, rcnn_train_cfg):\r\n pos_proposals = [res.pos_bboxes for res in sampling_results]\r\n pos_assigned_gt_inds = [\r\n res.pos_assigned_gt_inds for res in sampling_results\r\n ]\r\n mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,\r\n gt_masks, rcnn_train_cfg)\r\n return mask_targets\r\n\r\n @force_fp32(apply_to=('mask_pred',))\r\n def loss(self, mask_pred, mask_targets, labels):\r\n loss = dict()\r\n if self.class_agnostic:\r\n loss_mask = self.loss_mask(mask_pred, mask_targets,\r\n torch.zeros_like(labels))\r\n else:\r\n loss_mask = self.loss_mask(mask_pred, mask_targets, labels)\r\n loss['loss_mask'] = loss_mask\r\n return loss\r\n\r\n def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,\r\n ori_shape, scale_factor, rescale, with_point=False):\r\n \"\"\"Get segmentation masks from mask_pred and bboxes.\r\n\r\n Args:\r\n mask_pred (Tensor or ndarray): shape (n, #class+1, h, w).\r\n For single-scale testing, mask_pred is the direct output of\r\n model, whose type is Tensor, while for multi-scale testing,\r\n it will be converted to numpy array outside of this method.\r\n det_bboxes (Tensor): shape (n, 4/5)\r\n det_labels (Tensor): shape (n, )\r\n img_shape (Tensor): shape (3, )\r\n rcnn_test_cfg (dict): rcnn testing config\r\n ori_shape: original image size\r\n\r\n Returns:\r\n list[list]: encoded masks\r\n \"\"\"\r\n if isinstance(mask_pred, torch.Tensor):\r\n mask_pred = mask_pred.sigmoid().cpu().numpy()\r\n assert isinstance(mask_pred, np.ndarray)\r\n # when enabling mixed precision training, mask_pred may be float16\r\n # numpy array\r\n mask_pred = mask_pred.astype(np.float32)\r\n\r\n format_mask_result = rcnn_test_cfg.get('format_mask_result', True)\r\n if format_mask_result:\r\n cls_segms = [[] for _ in range(self.num_classes - 1)]\r\n else:\r\n cls_segms = []\r\n bboxes = det_bboxes.cpu().numpy()[:, :4]\r\n labels = det_labels.cpu().numpy() + 1\r\n\r\n if rescale:\r\n img_h, img_w = ori_shape[:2]\r\n else:\r\n img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)\r\n img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)\r\n scale_factor = 1.0\r\n\r\n if with_point:\r\n if format_mask_result:\r\n contour_points = [[] for _ in range(self.num_classes - 1)]\r\n else:\r\n contour_points = []\r\n\r\n for i in range(bboxes.shape[0]):\r\n if not isinstance(scale_factor, (float, np.ndarray)):\r\n scale_factor = scale_factor.cpu().numpy()\r\n bbox = (bboxes[i, :] / scale_factor).astype(np.int32)\r\n label = labels[i]\r\n w = max(bbox[2] - bbox[0] + 1, 1)\r\n h = max(bbox[3] - bbox[1] + 1, 1)\r\n\r\n if not self.class_agnostic:\r\n mask_pred_ = mask_pred[i, label, :, :]\r\n else:\r\n mask_pred_ = mask_pred[i, 0, :, :]\r\n\r\n bbox_mask = mmcv.imresize(mask_pred_, (w, h))\r\n bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype(\r\n np.uint8)\r\n # transform to the whole image mask\r\n im_mask_np = np.zeros((img_h, img_w), dtype=np.uint8)\r\n im_mask_np[bbox[1]:min(bbox[1] + h, img_h), bbox[0]:min(bbox[0] + w, img_w)] = \\\r\n bbox_mask[:min(h, img_h - bbox[1]), :min(w, img_w - bbox[0])]\r\n\r\n # When for mask roi align/pooling, set the crop_mask=True; otherwise, it must be false.\r\n if rcnn_test_cfg.get('crop_mask', False):\r\n mask_result = bbox_mask\r\n else:\r\n mask_result = im_mask_np\r\n\r\n if rcnn_test_cfg.get('to_tensor', False):\r\n im_mask = torch.from_numpy(mask_result).to(det_labels)\r\n else:\r\n im_mask = mask_result.copy()\r\n\r\n if rcnn_test_cfg.get('rle_mask_encode', True):\r\n if isinstance(im_mask, torch.Tensor):\r\n mask_for_encode = im_mask.cpu().numpy().astype(np.uint8)\r\n else:\r\n mask_for_encode = im_mask\r\n rle = mask_util.encode(\r\n np.array(mask_for_encode[:, :, np.newaxis], order='F'))[0]\r\n if format_mask_result:\r\n cls_segms[label - 1].append(rle)\r\n else:\r\n cls_segms.append(rle)\r\n else:\r\n if format_mask_result:\r\n cls_segms[label - 1].append(im_mask)\r\n else:\r\n cls_segms.append(im_mask)\r\n\r\n # point: you must use the image mask rather than the box mask to get the points\r\n if with_point:\r\n points = get_point_from_mask([im_mask_np], [bbox],\r\n rcnn_test_cfg.get('mask_size', 56),\r\n rcnn_test_cfg.get('sample_num', 729),\r\n rcnn_test_cfg.get('dist_sample_thr', 1))[0]\r\n if rcnn_test_cfg.get('to_tensor', False):\r\n points = torch.from_numpy(points).to(det_bboxes)\r\n if format_mask_result:\r\n contour_points[label - 1].append(points)\r\n else:\r\n contour_points.append(points)\r\n\r\n if with_point:\r\n return cls_segms, contour_points\r\n else:\r\n return cls_segms\r\n"
] | [
[
"torch.stack",
"torch.cat"
],
[
"numpy.hstack",
"torch.randn",
"torch.from_numpy",
"numpy.random.rand",
"numpy.random.randint"
],
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.mm",
"torch.ones",
"torch.empty",
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.LSTMCell",
"torch.nn.Linear",
"torch.multinomial",
"torch.sort",
"torch.stack",
"torch.nn.ReLU"
],
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.zeros_like",
"torch.from_numpy",
"numpy.round",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.nn.LeakyReLU",
"torch.nn.modules.utils._pair",
"torch.nn.ReLU",
"numpy.array",
"numpy.zeros",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jvel07/ast | [
"600e7cf952ec59ac9cc1bb3170d3da7578e1f384"
] | [
"src/models/ast_models.py"
] | [
"# -*- coding: utf-8 -*-\n# @Time : 6/10/21 5:04 PM\n# @Author : Yuan Gong\n# @Affiliation : Massachusetts Institute of Technology\n# @Email : [email protected]\n# @File : ast_models.py\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.cuda.amp import autocast\nimport wget\n\nos.environ['TORCH_HOME'] = '../../pretrained_models'\nimport timm\nfrom timm.models.layers import to_2tuple, trunc_normal_\n\n\n# override the timm package to relax the input shape constraint.\nclass PatchEmbed(nn.Module):\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n\n def forward(self, x):\n x = self.proj(x).flatten(2).transpose(1, 2)\n return x\n\n\nclass ASTModel(nn.Module):\n \"\"\"\n The AST model.\n :param label_dim: the label dimension, i.e., the number of total classes, it is 527 for AudioSet, 50 for ESC-50, and 35 for speechcommands v2-35\n :param fstride: the stride of patch spliting on the frequency dimension, for 16*16 patchs, fstride=16 means no overlap, fstride=10 means overlap of 6\n :param tstride: the stride of patch spliting on the time dimension, for 16*16 patchs, tstride=16 means no overlap, tstride=10 means overlap of 6\n :param input_fdim: the number of frequency bins of the input spectrogram\n :param input_tdim: the number of time frames of the input spectrogram\n :param imagenet_pretrain: if use ImageNet pretrained model\n :param audioset_pretrain: if use full AudioSet and ImageNet pretrained model\n :param model_size: the model size of AST, should be in [tiny224, small224, base224, base384], base224 and base 384 are same model, but are trained differently during ImageNet pretraining.\n \"\"\"\n\n def __init__(self, label_dim=3, fstride=10, tstride=10, input_fdim=128, input_tdim=1024, imagenet_pretrain=True,\n audioset_pretrain=True, model_size='base384', verbose=True):\n\n super(ASTModel, self).__init__()\n assert timm.__version__ == '0.4.5', 'Please use timm == 0.4.5, the code might not be compatible with newer versions.'\n\n if verbose == True:\n print('---------------AST Model Summary---------------')\n print('ImageNet pretraining: {:s}, AudioSet pretraining: {:s}'.format(str(imagenet_pretrain),\n str(audioset_pretrain)))\n # override timm input shape restriction\n timm.models.vision_transformer.PatchEmbed = PatchEmbed\n\n # if AudioSet pretraining is not used (but ImageNet pretraining may still apply)\n if audioset_pretrain == False:\n if model_size == 'tiny224':\n self.v = timm.create_model('vit_deit_tiny_distilled_patch16_224', pretrained=imagenet_pretrain)\n elif model_size == 'small224':\n self.v = timm.create_model('vit_deit_small_distilled_patch16_224', pretrained=imagenet_pretrain)\n elif model_size == 'base224':\n self.v = timm.create_model('vit_deit_base_distilled_patch16_224', pretrained=imagenet_pretrain)\n elif model_size == 'base384':\n self.v = timm.create_model('vit_deit_base_distilled_patch16_384', pretrained=imagenet_pretrain)\n else:\n raise Exception('Model size must be one of tiny224, small224, base224, base384.')\n self.original_num_patches = self.v.patch_embed.num_patches\n self.oringal_hw = int(self.original_num_patches ** 0.5)\n self.original_embedding_dim = self.v.pos_embed.shape[2]\n self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),\n nn.Linear(self.original_embedding_dim, label_dim))\n\n # automatcially get the intermediate shape\n f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)\n num_patches = f_dim * t_dim\n self.v.patch_embed.num_patches = num_patches\n if verbose == True:\n print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))\n print('number of patches={:d}'.format(num_patches))\n\n # the linear projection layer\n new_proj = torch.nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))\n if imagenet_pretrain == True:\n new_proj.weight = torch.nn.Parameter(torch.sum(self.v.patch_embed.proj.weight, dim=1).unsqueeze(1))\n new_proj.bias = self.v.patch_embed.proj.bias\n self.v.patch_embed.proj = new_proj\n\n # the positional embedding\n if imagenet_pretrain == True:\n # get the positional embedding from deit model, skip the first two tokens (cls token and distillation token), reshape it to original 2D shape (24*24).\n new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, self.original_num_patches,\n self.original_embedding_dim).transpose(1,\n 2).reshape(\n 1, self.original_embedding_dim, self.oringal_hw, self.oringal_hw)\n # cut (from middle) or interpolate the second dimension of the positional embedding\n if t_dim <= self.oringal_hw:\n new_pos_embed = new_pos_embed[:, :, :,\n int(self.oringal_hw / 2) - int(t_dim / 2): int(self.oringal_hw / 2) - int(\n t_dim / 2) + t_dim]\n else:\n new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(self.oringal_hw, t_dim),\n mode='bilinear')\n # cut (from middle) or interpolate the first dimension of the positional embedding\n if f_dim <= self.oringal_hw:\n new_pos_embed = new_pos_embed[:, :,\n int(self.oringal_hw / 2) - int(f_dim / 2): int(self.oringal_hw / 2) - int(\n f_dim / 2) + f_dim, :]\n else:\n new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(f_dim, t_dim), mode='bilinear')\n # flatten the positional embedding\n new_pos_embed = new_pos_embed.reshape(1, self.original_embedding_dim, num_patches).transpose(1, 2)\n # concatenate the above positional embedding with the cls token and distillation token of the deit model.\n self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))\n else:\n # if not use imagenet pretrained model, just randomly initialize a learnable positional embedding\n # TODO can use sinusoidal positional embedding instead\n new_pos_embed = nn.Parameter(\n torch.zeros(1, self.v.patch_embed.num_patches + 2, self.original_embedding_dim))\n self.v.pos_embed = new_pos_embed\n trunc_normal_(self.v.pos_embed, std=.02)\n\n # now load a model that is pretrained on both ImageNet and AudioSet\n elif audioset_pretrain == True:\n if audioset_pretrain == True and imagenet_pretrain == False:\n raise ValueError(\n 'currently model pretrained on only audioset is not supported, please set imagenet_pretrain = True to use audioset pretrained model.')\n if model_size != 'base384':\n raise ValueError('currently only has base384 AudioSet pretrained model.')\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n if os.path.exists('../../pretrained_models/audioset_10_10_0.4593.pth') == False:\n # this model performs 0.4593 mAP on the audioset eval set\n audioset_mdl_url = 'https://www.dropbox.com/s/cv4knew8mvbrnvq/audioset_0.4593.pth?dl=1'\n wget.download(audioset_mdl_url, out='../../pretrained_models/audioset_10_10_0.4593.pth')\n sd = torch.load('../../pretrained_models/audioset_10_10_0.4593.pth', map_location=device)\n # sd = torch.load('../../pretrained_models/ast_audioset.pth', map_location=device)\n audio_model = ASTModel(label_dim=527, fstride=10, tstride=10, input_fdim=128, input_tdim=1024,\n imagenet_pretrain=False, audioset_pretrain=False, model_size='base384',\n verbose=False)\n audio_model = torch.nn.DataParallel(audio_model)\n print(\"***************USING=>\", torch.cuda.current_device())\n audio_model.load_state_dict(sd, strict=False)\n self.v = audio_model.module.v\n self.original_embedding_dim = self.v.pos_embed.shape[2]\n self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),\n nn.Linear(self.original_embedding_dim, label_dim))\n\n f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)\n num_patches = f_dim * t_dim\n self.v.patch_embed.num_patches = num_patches\n if verbose == True:\n print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))\n print('number of patches={:d}'.format(num_patches))\n\n new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, 1212, 768).transpose(1, 2).reshape(1, 768,\n 12, 101)\n # if the input sequence length is larger than the original audioset (10s), then cut the positional embedding\n if t_dim < 101:\n new_pos_embed = new_pos_embed[:, :, :, 50 - int(t_dim / 2): 50 - int(t_dim / 2) + t_dim]\n # otherwise interpolate\n else:\n new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(12, t_dim), mode='bilinear')\n print(\"NEW POST EMBED:\", new_pos_embed.shape)\n new_pos_embed = new_pos_embed.reshape(1, 768, num_patches).transpose(1, 2)\n print(\"NEW POST EMBED:\", new_pos_embed.shape)\n self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))\n\n def get_shape(self, fstride, tstride, input_fdim=128, input_tdim=1024):\n test_input = torch.randn(1, 1, input_fdim, input_tdim)\n test_proj = nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))\n test_out = test_proj(test_input)\n f_dim = test_out.shape[2]\n t_dim = test_out.shape[3]\n return f_dim, t_dim\n\n @autocast()\n def forward(self, x):\n \"\"\"\n :param x: the input spectrogram, expected shape: (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)\n :return: prediction\n \"\"\"\n # expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)\n x = x.unsqueeze(1)\n x = x.transpose(2, 3)\n\n B = x.shape[0]\n x = self.v.patch_embed(x)\n cls_tokens = self.v.cls_token.expand(B, -1, -1)\n dist_token = self.v.dist_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, dist_token, x), dim=1)\n x = x + self.v.pos_embed\n x = self.v.pos_drop(x)\n for blk in self.v.blocks:\n x = blk(x)\n x = self.v.norm(x)\n x = (x[:, 0] + x[:, 1]) / 2\n\n # x = self.mlp_head(x)\n return x\n\n\n# if __name__ == '__main__':\n# input_tdim = 100\n# ast_mdl = ASTModel(input_tdim=input_tdim)\n# # input a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins\n# test_input = torch.rand([10, input_tdim, 128])\n# test_output = ast_mdl(test_input)\n# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.\n# print(test_output.shape)\n#\n# input_tdim = 512\n# ast_mdl = ASTModel(input_tdim=input_tdim, label_dim=50, audioset_pretrain=True)\n# # input a batch of 10 spectrogram, each with 512 time frames and 128 frequency bins\n# test_input = torch.rand([10, input_tdim, 128])\n# test_output = ast_mdl(test_input)\n# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.\n# print(test_output.shape)\n"
] | [
[
"torch.cat",
"torch.load",
"torch.randn",
"torch.zeros",
"torch.nn.Conv2d",
"torch.cuda.current_device",
"torch.sum",
"torch.nn.LayerNorm",
"torch.cuda.amp.autocast",
"torch.nn.Linear",
"torch.nn.functional.interpolate",
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhaohengyin/irgail_example | [
"89f7661b5ab08bdf79686eaf8933ad7b5badced4"
] | [
"utils.py"
] | [
"from tqdm import tqdm\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\ndef build_mlp(input_dim, output_dim, hidden_units=[64, 64],\n hidden_activation=nn.Tanh(), output_activation=None):\n layers = []\n units = input_dim\n for next_units in hidden_units:\n layers.append(nn.Linear(units, next_units))\n layers.append(hidden_activation)\n units = next_units\n layers.append(nn.Linear(units, output_dim))\n if output_activation is not None:\n layers.append(output_activation)\n return nn.Sequential(*layers)\n\ndef dict_concat(x):\n return torch.cat([value for key, value in x.items()], dim=0)\n\ndef dict_config_concat(x):\n return torch.cat([torch.cat((value, key.repeat(value.size(0),1)), dim=1) for key, value in x.items()], dim=0)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.Tanh"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kyuhoJeong11/GrewRL | [
"a514698df8d38df34de0bd1667d99927f0aa3885",
"a514698df8d38df34de0bd1667d99927f0aa3885",
"a514698df8d38df34de0bd1667d99927f0aa3885",
"a514698df8d38df34de0bd1667d99927f0aa3885"
] | [
"carla/PythonAPI/examples/tutorial4.py",
"robustRL-master/robustRL/samplers.py",
"robustRL-master/robustRL/gym_env.py",
"maml_rl_taewoo/rllab/misc/logger.py"
] | [
"import glob\nimport os\nimport sys\nimport random\nimport time\nimport numpy as np\nimport cv2\nimport math\nfrom collections import deque\nfrom keras.applications.xception import Xception\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.optimizers import Adam\nfrom keras.models import Model\n\n'''\nCarla 패키지가 사용하는 egg파일 탐색\n'''\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\nimport carla\n\nSHOW_PREVIEW = False\nIM_WIDTH = 640\nIM_HEIGHT = 480\nSECONDS_PER_EPISODE = 10\nREPLAY_MEMORY_SIZE = 5_000\nMIN_REPLAY_MEMORY_SIZE = 1_000\nMINIBATCH_SIZE = 16\nPREDICTION_BATCH_SIZE = 1\nTRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4\nUPDATE_TARGET_EVERY = 5\nMODEL_NAME = \"Xception\"\n\nMEMORY_FRACTION = 0.8\nMIN_REWARD = -200\n\nEPISODES = 100\n\nDISCOUNT = 0.99\nepsilon = 1\nEPSILON_DECAY = 0.95\nMIN_EPSILON = 0.001\n\nAGGREGATE_STATS_EVERY = 10\n\n'''\n환경 class 세팅\n'''\nclass CarEnv:\n SHOW_CAM = SHOW_PREVIEW # 미리보기 여부\n STEER_AMT = 1.0\n im_width = IM_WIDTH\n im_height = IM_HEIGHT\n front_camera = None\n actor_list = []\n collision_hist = [] # collision 목록\n\n def __init__(self):\n self.client = carla.Client(\"localhost\", 2000)\n self.client.set_timeout(2.0)\n\n # client가 켜져 있다면, world 검색 가능.\n self.world = self.client.get_world()\n\n # world에는 우리가 시뮬레이션에 액터를 새로 추가할 때 사용할 수 있는 bp 목록이 있다.\n self.blueprint_library = self.world.get_blueprint_library()\n\n # 차량 모델 지정\n self.model_3 = self.blueprint_library.filter(\"model3\")[0]\n\n def reset(self):\n self.collision_hist = []\n self.actor_list = []\n\n # 랜덤한 위치에 차량 생성 후 actor list에 추가\n self.transform = random.choice(self.world.get_map().get_spawn_points())\n self.vehicle = self.world.spawn_actor(self.model_3, self.transform)\n self.actor_list.append(self.vehicle)\n\n # rgb Camera 센서의 bp 가져오기\n self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')\n\n # rgb Camera 센서로 입력 받은 이미지의 크기 조절\n self.rgb_cam.set_attribute(\"image_size_x\", f\"{self.im_width}\")\n self.rgb_cam.set_attribute(\"image_size_y\", f\"{self.im_height}\")\n self.rgb_cam.set_attribute(\"fov\", f\"110\")\n\n # sensor의 위치 조정\n transform = carla.Transform(carla.Location(x=2.5, z=0.7))\n\n # 센서의 생성 및 리스트 추가.\n self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)\n self.actor_list.append(self.sensor)\n\n # 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용\n self.sensor.listen(lambda data: self.process_img(data))\n\n self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))\n\n '''\n 차량 생성 시 차가 지면에 부딪히면 충돌이 발생. \n 또는 센서들이 초기화되고 값을 반환하는 데 시간이 걸릴 수 있음. \n 따라서 4초 정도의 대기시간을 사용.\n '''\n time.sleep(4)\n\n # collision 센서의 bp 가져오기\n colsensor = self.blueprint_library.find(\"sensor.other.collision\")\n\n # 센서의 생성 및 리스트 추가\n self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)\n self.actor_list.append(self.colsensor)\n\n # 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용\n self.colsensor.listen(lambda event: self.collision_data(event))\n\n while self.front_camera is None:\n time.sleep(0.01)\n\n '''\n 에피소드의 실제 확인 시간 기록.\n 브레이크와 스로틀이 사용되지 않는지 확인 후\n 첫 번째 관찰 결과 반환. \n '''\n self.episode_start = time.time()\n self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))\n\n return self.front_camera\n\n # collision data 처리\n def collision_data(self, event):\n self.collision_hist.append(event)\n\n # image data 처리\n def process_img(self, image):\n i = np.array(image.raw_data)\n #print(i.shape)\n i2 = i.reshape((self.im_height, self.im_width, 4))\n i3 = i2[:, :, :3]\n if self.SHOW_CAM:\n cv2.imshow(\"\", i3)\n cv2.waitKey(1)\n self.front_camera = i3\n\n # action, reward, done, any_extra_info 관리\n def step(self, action):\n if action == 0:\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1*self.STEER_AMT))\n elif action == 1:\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0))\n elif action == 2:\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1*self.STEER_AMT))\n\n v = self.vehicle.get_velocity()\n kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))\n\n if len(self.collision_hist) != 0:\n done = True\n reward = -200\n elif kmh < 50:\n done = False\n reward = -1\n else:\n done = False\n reward = 1\n\n if self.episode_start + SECONDS_PER_EPISODE < time.time():\n done = True\n\n return self.front_camera, reward, done, None\n\n# 강화 학습\nclass DQNAgent:\n def __init__(self):\n self.model = self.create_model()\n self.target_model = self.create_model()\n self.target_model.set_weights(self.model.get_weights())\n\n self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)\n\n self.tensorboard = ModifiedTensorBoard(log_dir=f\"logs/{MODEL_NAME}-{int(time.time())}\")\n self.target_update_counter = 0\n self.graph = tf.get_default_graph()\n\n self.terminate = False\n self.last_logged_episode = 0\n self.training_initialized = False\n\n # 모델 생성\n def create_model(self):\n base_model = Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n\n predictions = Dense(3, activation=\"linear\")(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n model.compile(loss=\"mse\", optimizer=Adam(lr=0.001), metrics=[\"accuracy\"])\n return model\n\n\n def update_replay_memory(self, transition):\n # transition = (current_state, action, reward, new_state, done)\n self.replay_memory.append(transition)\n\n def train(self):\n if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:\n return\n\n minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)\n\n current_states = np.array([transition[0] for transition in minibatch])/255\n with self.graph.as_default():\n current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE)\n\n new_current_states = np.array([transition[3] for transition in minibatch])/255\n with self.graph.as_default():\n future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)\n\n # x = input / y = output\n X = []\n y = []\n\n for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):\n if not done:\n max_future_q = np.max(future_qs_list[index])\n new_q = reward + DISCOUNT * max_future_q\n else:\n new_q = reward\n\n current_qs = current_qs_list[index]\n current_qs[action] = new_q\n\n X.append(current_state)\n y.append(current_qs)\n\n '''\n step 단위가 아니라 episode 단위로 log 기록\n log_this_step이 true일 때만 TensorBoard에 log 기록\n '''\n log_this_step = False\n if self.tensorboard.step > self.last_logged_episode:\n log_this_step = True\n self.last_log_episode = self.tensorboard.step\n\n with self.graph.as_default():\n self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)\n\n\n if log_this_step:\n self.target_update_counter += 1\n\n # target_model 업데이트 여부 확인\n if self.target_update_counter > UPDATE_TARGET_EVERY:\n self.target_model.set_weights(self.model.get_weights())\n self.target_update_counter = 0\n\n def get_qs(self, state):\n return self.model.predict(np.array(state).reshape(-1 *state.shape)/255)[0]\n\n # train 진행\n def train_in_loop(self):\n X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)\n y = np.random.uniform(size=(1, 3)).astype(np.float32)\n with self.graph.as_default():\n self.model.fit(X,y, verbose=False, batch_size=1)\n\n self.training_initialized = True\n\n while True:\n if self.terminate:\n return\n self.train()\n time.sleep(0.01)",
"\"\"\"\n Sampler functions to be used with the policy search algorithms\n\n Aravind Rajeswaran, 08/04/16\n\"\"\"\n\nimport numpy as np\nimport copy\nimport multiprocessing as mp\n\nfrom rllab.misc import tensor_utils\n\nfrom MDP_funcs import *\n# above MDP_funs is local copy\n\n# =======================================================================================\n# Functions for sampling paths\n\ndef sample_paths(N, \n policy, \n baseline, \n env_mode='train', \n T=1e6, \n gamma=1,\n mujoco_env=True, \n normalized_env=False,\n env=None):\n # Directly specifying env works only when sampling in series\n\n # set random seed (needed for multiprocessing)\n np.random.seed()\n\n if env == None:\n env = get_environment(env_mode)\n T = min(T, env.horizon)\n T = max(1, T) \n # sometimes, env is not initialized correctly in multiprocessing\n # this is just a sanity check and step size should essentially be zero.\n\n print(\"####### Worker started #######\")\n\n paths = []\n\n for ep in range(N):\n \n observations=[]\n actions=[]\n rewards=[]\n agent_infos = []\n env_infos = []\n qpos = []\n qvel = []\n\n o = env.reset()\n if mujoco_env == True:\n if normalized_env:\n qpos.append(env.wrapped_env.env.model.data.qpos.reshape(-1))\n qvel.append(env.wrapped_env.env.model.data.qvel.reshape(-1))\n else:\n qpos.append(env.env.model.data.qpos.reshape(-1))\n qvel.append(env.env.model.data.qvel.reshape(-1))\n done = False\n t = 0\n\n while t < T and done != True:\n a, agent_info = policy.get_action(o)\n next_o, r, done, env_info = env.step(a)\n observations.append(env.observation_space.flatten(o))\n actions.append(env.action_space.flatten(a))\n rewards.append(r)\n agent_infos.append(agent_info)\n env_infos.append(env_info)\n if mujoco_env == True:\n if normalized_env:\n qpos.append(env.wrapped_env.env.model.data.qpos.reshape(-1))\n qvel.append(env.wrapped_env.env.model.data.qvel.reshape(-1))\n else:\n qpos.append(env.env.model.data.qpos.reshape(-1))\n qvel.append(env.env.model.data.qvel.reshape(-1))\n o = next_o\n t += 1\n\n # make a path dictionary\n # Also store the path belief and env data used in the trajectory\n #try:\n # path_belief = env.env.belief\n #except Exception as e:\n # path_belief = str(e)\n # path_model = env.env\n\n qpos_flat = tensor_utils.stack_tensor_list(qpos)\n qvel_flat = tensor_utils.stack_tensor_list(qvel)\n\n path = dict(\n observations=tensor_utils.stack_tensor_list(observations),\n actions=tensor_utils.stack_tensor_list(actions),\n rewards=tensor_utils.stack_tensor_list(rewards),\n agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),\n env_infos=tensor_utils.stack_tensor_dict_list(env_infos),\n qpos=qpos_flat,\n qvel=qvel_flat,\n #path_belief=path_belief,\n #path_model=path_model,\n )\n\n # TODO: Storing the path model is too space inefficient. Need to find alternative\n \n # compute returns using the path\n path_baseline = baseline.predict(path)\n advantages = []\n returns = []\n return_so_far = 0\n for t in range(len(rewards) - 1, -1, -1):\n return_so_far = rewards[t] + gamma * return_so_far\n returns.append(return_so_far)\n advantage = return_so_far - path_baseline[t]\n advantages.append(advantage)\n\n # advantages and returns are stored backward in time\n advantages = np.array(advantages[::-1])\n returns = np.array(returns[::-1])\n \n # normalize advantages\n advantages = (advantages - np.mean(advantages)) / (np.std(advantages) + 1e-8)\n \n path[\"advantages\"] = advantages\n path[\"returns\"] = returns\n\n paths.append(path)\n\n #print \"Env body_mass : \", env.env.model.body_mass[1]\n print(\"====== Worker finished ======\")\n\n return paths\n\n\ndef _sample_paths_star(args_list):\n \"\"\" Constructor function to pass an args_list.\n Can call pool.map on this function \"\"\"\n return sample_paths(*args_list)\n\n\ndef sample_paths_parallel(N,\n policy,\n baseline,\n env_mode='train',\n T=1e6, gamma=1,\n num_cpu=None,\n max_process_time=120,\n max_timeouts=4,\n mujoco_env=True,\n normalized_env=False):\n \n if num_cpu == None or num_cpu == 'max':\n num_cpu = mp.cpu_count()\n elif num_cpu == 1:\n return sample_paths(N, policy, baseline, evn_mode, T, gamma, mujoco_env, normalized_env)\n else:\n num_cpu = min(mp.cpu_count(), num_cpu) \n\n paths_per_cpu = int(np.ceil(N/num_cpu))\n args_list = [paths_per_cpu, policy, baseline, env_mode, T, gamma, mujoco_env, normalized_env]\n\n results = _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts)\n\n paths = []\n # result is a paths type and results is list of paths\n for result in results:\n for path in result:\n paths.append(path) \n\n return paths\n\n\ndef _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts):\n \n # Base case\n if max_timeouts == 0:\n return None\n\n pool = mp.Pool(processes=num_cpu, maxtasksperchild=1)\n parallel_runs = [pool.apply_async(_sample_paths_star, args=(args_list,)) for _ in range(num_cpu)]\n\n try:\n results = [p.get(timeout=max_process_time) for p in parallel_runs]\n except Exception as e:\n print(str(e))\n print(\"Timeout Error raised... Trying again\")\n pool.close()\n pool.terminate()\n pool.join() \n return _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts-1)\n\n pool.close()\n pool.terminate()\n pool.join() \n return results\n\n# =======================================================================================\n# Functions for performance evaluation\n\ndef policy_evaluation(policy, \n env_mode='train',\n num_episodes=10,\n horizon=1e6,\n visual=False,\n gamma=1):\n # TODO: Add functionality to sample parallel paths and evaluate policy\n\n env = get_environment(env_mode)\n horizon = min(env.horizon, horizon)\n\n ep_returns = np.zeros(num_episodes)\n\n for ep in range(num_episodes):\n o = env.reset()\n t = 0\n done = False\n while t < horizon and done != True:\n if visual == True:\n env.render()\n a = policy.get_action(o)[0]\n o, r, done, _ = env.step(a)\n ep_returns[ep] += (gamma ** t) * r\n t += 1\n\n mean_eval = np.mean(ep_returns)\n std_eval = np.std(ep_returns)\n min_eval = np.amin(ep_returns)\n max_eval = np.amax(ep_returns)\n soft_min = np.percentile(ep_returns,15)\n soft_max = np.percentile(ep_returns,85)\n\n return (mean_eval, std_eval, min_eval, max_eval, soft_min, soft_max, num_episodes)\n",
"from __future__ import print_function\nfrom __future__ import absolute_import\n\nimport gym\nimport gym.envs\nimport gym.spaces\nfrom gym.monitoring import monitor\nimport os\nimport os.path as osp\nfrom rllab.envs.base import Env, Step\nfrom rllab.core.serializable import Serializable\nfrom rllab.spaces.box import Box\nfrom rllab.spaces.discrete import Discrete\nfrom rllab.misc import logger\nimport logging\nimport numpy as np\n\ndef convert_gym_space(space):\n if isinstance(space, gym.spaces.Box):\n return Box(low=space.low, high=space.high)\n elif isinstance(space, gym.spaces.Discrete):\n return Discrete(n=space.n)\n else:\n raise NotImplementedError\n\n\nclass CappedCubicVideoSchedule(object):\n def __call__(self, count):\n return monitor.capped_cubic_video_schedule(count)\n\n\nclass FixedIntervalVideoSchedule(object):\n\n def __init__(self, interval):\n self.interval = interval\n\n def __call__(self, count):\n return count % self.interval == 0\n\n\nclass NoVideoSchedule(object):\n def __call__(self, count):\n return False\n\n\nclass GymEnv(Env, Serializable):\n def __init__(self, env_name, record_video=True, video_schedule=None, log_dir=None):\n\n## following lines modified by me (correspondingly commented out below) to suppress the warning messages\n if log_dir is None and logger.get_snapshot_dir() is not None:\n log_dir = os.path.join(logger.get_snapshot_dir(), \"gym_log\")\n\n# *********************\n# if log_dir is None:\n# if logger.get_snapshot_dir() is None:\n# logger.log(\"Warning: skipping Gym environment monitoring since snapshot_dir not configured.\")\n# else:\n# log_dir = os.path.join(logger.get_snapshot_dir(), \"gym_log\")\n# *********************\n\n Serializable.quick_init(self, locals())\n\n env = gym.envs.make(env_name)\n self.env = env\n self.env_id = env.spec.id\n\n monitor.logger.setLevel(logging.CRITICAL)\n\n if log_dir is None:\n self.monitoring = False\n else:\n if not record_video:\n video_schedule = NoVideoSchedule()\n else:\n if video_schedule is None:\n video_schedule = CappedCubicVideoSchedule()\n self.env.monitor.start(log_dir, video_schedule)\n self.monitoring = True\n\n self._observation_space = convert_gym_space(env.observation_space)\n self._action_space = convert_gym_space(env.action_space)\n self._horizon = env.spec.timestep_limit\n self._log_dir = log_dir\n\n @property\n def observation_space(self):\n return self._observation_space\n\n @property\n def action_space(self):\n return self._action_space\n\n @property\n def horizon(self):\n return self._horizon\n\n def reset(self):\n return self.env.reset()\n\n def step(self, action):\n next_obs, reward, done, info = self.env.step(action)\n return Step(next_obs, reward, done, **info)\n\n def render(self):\n self.env.render()\n\n # I have writtin the method evaluate_policy for easy use\n def evaluate_policy(self, policy, num_episodes=5, horizon=1e6, gamma=1, visual=False,\n percentile=[], get_full_dist=False):\n horizon = min(horizon, self._horizon)\n mean_eval, std, min_eval, max_eval = 0.0, 0.0, -1e8, -1e8\n ep_returns = np.zeros(num_episodes)\n for ep in range(num_episodes):\n o = self.reset()\n t, done = 0, False\n while t < horizon and done != True:\n if visual == True:\n self.render()\n a = policy.get_action(o)[0]\n o, r, done, _ = self.step(a)\n ep_returns[ep] += (gamma ** t) * r\n t += 1\n #if visual == True and done == True:\n # s = self.env.state_vector()\n # posafter,height,ang = self.env.model.data.qpos[0:3,0]\n # print(\"Termination reason : \\n\", np.isfinite(s).all(), (np.abs(s[2:]) < 100).all(), \\\n # (height > .7), (abs(ang) < .2) )\n \n mean_eval, std = np.mean(ep_returns), np.std(ep_returns)\n min_eval, max_eval = np.amin(ep_returns), np.amax(ep_returns)\n base_stats = [mean_eval, std, min_eval, max_eval]\n\n percentile_stats = []\n full_dist = []\n\n for p in percentile:\n percentile_stats.append(np.percentile(ep_returns, p))\n\n if get_full_dist == True:\n full_dist = ep_returns\n\n return [base_stats, percentile_stats, full_dist]\n\n\n def terminate(self):\n if self.monitoring:\n self.env.monitor.close()\n if self._log_dir is not None:\n print(\"\"\"\n ***************************\n\n Training finished! You can upload results to OpenAI Gym by running the following command:\n\n python scripts/submit_gym.py %s\n\n ***************************\n \"\"\" % self._log_dir)\n\n",
"from enum import Enum\n\nfrom rllab.misc.tabulate import tabulate\nfrom rllab.misc.console import mkdir_p, colorize\nfrom rllab.misc.autoargs import get_all_parameters\nfrom contextlib import contextmanager\nimport numpy as np\nimport os\nimport os.path as osp\nimport sys\nimport datetime\nimport dateutil.tz\nimport csv\nimport joblib\nimport json\nimport pickle\nimport base64\n\n_prefixes = []\n_prefix_str = ''\n\n_tabular_prefixes = []\n_tabular_prefix_str = ''\n\n_tabular = []\n\n_text_outputs = []\n_tabular_outputs = []\n\n_text_fds = {}\n_tabular_fds = {}\n_tabular_header_written = set()\n\n_snapshot_dir = None\n_snapshot_mode = 'all'\n_snapshot_gap = 1\n\n_log_tabular_only = False\n_header_printed = False\n\n\ndef _add_output(file_name, arr, fds, mode='a'):\n if file_name not in arr:\n mkdir_p(os.path.dirname(file_name))\n arr.append(file_name)\n #fds[file_name] = open(file_name, mode)\n fds[file_name] = open(file_name, mode, -1, 'utf-8') #유니코드 에러 해결 코드\n\n\ndef _remove_output(file_name, arr, fds):\n if file_name in arr:\n fds[file_name].close()\n del fds[file_name]\n arr.remove(file_name)\n\n\ndef push_prefix(prefix):\n _prefixes.append(prefix)\n global _prefix_str\n _prefix_str = ''.join(_prefixes)\n\n\ndef add_text_output(file_name):\n _add_output(file_name, _text_outputs, _text_fds, mode='a')\n\n\ndef remove_text_output(file_name):\n _remove_output(file_name, _text_outputs, _text_fds)\n\n\ndef add_tabular_output(file_name):\n _add_output(file_name, _tabular_outputs, _tabular_fds, mode='w')\n\n\ndef remove_tabular_output(file_name):\n if _tabular_fds[file_name] in _tabular_header_written:\n _tabular_header_written.remove(_tabular_fds[file_name])\n _remove_output(file_name, _tabular_outputs, _tabular_fds)\n\n\ndef set_snapshot_dir(dir_name):\n global _snapshot_dir\n _snapshot_dir = dir_name\n\n\ndef get_snapshot_dir():\n return _snapshot_dir\n\n\ndef get_snapshot_mode():\n return _snapshot_mode\n\n\ndef set_snapshot_mode(mode):\n global _snapshot_mode\n _snapshot_mode = mode\n\ndef get_snapshot_gap():\n return _snapshot_gap\n\ndef set_snapshot_gap(gap):\n global _snapshot_gap\n _snapshot_gap = gap\n\ndef set_log_tabular_only(log_tabular_only):\n global _log_tabular_only\n _log_tabular_only = log_tabular_only\n\n\ndef get_log_tabular_only():\n return _log_tabular_only\n\n\ndef log(s, with_prefix=True, with_timestamp=True, color=None):\n out = s\n if with_prefix:\n out = _prefix_str + out\n if with_timestamp:\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')\n out = \"%s | %s\" % (timestamp, out)\n if color is not None:\n out = colorize(out, color)\n if not _log_tabular_only:\n # Also log to stdout\n for fd in list(_text_fds.values()):\n fd.write(out + '\\n')\n fd.flush()\n sys.stdout.flush()\n\n\ndef record_tabular(key, val):\n _tabular.append((_tabular_prefix_str + str(key), str(val)))\n\n\ndef push_tabular_prefix(key):\n _tabular_prefixes.append(key)\n global _tabular_prefix_str\n _tabular_prefix_str = ''.join(_tabular_prefixes)\n\n\ndef pop_tabular_prefix():\n del _tabular_prefixes[-1]\n global _tabular_prefix_str\n _tabular_prefix_str = ''.join(_tabular_prefixes)\n\n\n@contextmanager\ndef prefix(key):\n push_prefix(key)\n try:\n yield\n finally:\n pop_prefix()\n\n\n@contextmanager\ndef tabular_prefix(key):\n push_tabular_prefix(key)\n yield\n pop_tabular_prefix()\n\n\nclass TerminalTablePrinter(object):\n def __init__(self):\n self.headers = None\n self.tabulars = []\n\n def print_tabular(self, new_tabular):\n if self.headers is None:\n self.headers = [x[0] for x in new_tabular]\n else:\n assert len(self.headers) == len(new_tabular)\n self.tabulars.append([x[1] for x in new_tabular])\n self.refresh()\n\n def refresh(self):\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n tabulars = self.tabulars[-(int(rows) - 3):]\n sys.stdout.write(\"\\x1b[2J\\x1b[H\")\n sys.stdout.write(tabulate(tabulars, self.headers))\n sys.stdout.write(\"\\n\")\n\n\ntable_printer = TerminalTablePrinter()\n\n\ndef dump_tabular(*args, **kwargs):\n wh = kwargs.pop(\"write_header\", None)\n if len(_tabular) > 0:\n if _log_tabular_only:\n table_printer.print_tabular(_tabular)\n else:\n for line in tabulate(_tabular).split('\\n'):\n log(line, *args, **kwargs)\n tabular_dict = dict(_tabular)\n # Also write to the csv files\n # This assumes that the keys in each iteration won't change!\n for tabular_fd in list(_tabular_fds.values()):\n writer = csv.DictWriter(tabular_fd, fieldnames=list(tabular_dict.keys()))\n if wh or (wh is None and tabular_fd not in _tabular_header_written):\n writer.writeheader()\n _tabular_header_written.add(tabular_fd)\n writer.writerow(tabular_dict)\n tabular_fd.flush()\n del _tabular[:]\n\n\ndef pop_prefix():\n del _prefixes[-1]\n global _prefix_str\n _prefix_str = ''.join(_prefixes)\n\n\ndef save_itr_params(itr, params):\n if _snapshot_dir:\n if _snapshot_mode == 'all':\n file_name = osp.join(_snapshot_dir, 'itr_%d.pkl' % itr)\n joblib.dump(params, file_name, compress=3)\n elif _snapshot_mode == 'last':\n # override previous params\n file_name = osp.join(_snapshot_dir, 'params.pkl')\n joblib.dump(params, file_name, compress=3)\n elif _snapshot_mode == \"gap\":\n if itr % _snapshot_gap == 0:\n file_name = osp.join(_snapshot_dir, 'itr_%d.pkl' % itr)\n joblib.dump(params, file_name, compress=3)\n elif _snapshot_mode == 'none':\n pass\n else:\n raise NotImplementedError\n\n\ndef log_parameters(log_file, args, classes):\n log_params = {}\n for param_name, param_value in args.__dict__.items():\n if any([param_name.startswith(x) for x in list(classes.keys())]):\n continue\n log_params[param_name] = param_value\n for name, cls in classes.items():\n if isinstance(cls, type):\n params = get_all_parameters(cls, args)\n params[\"_name\"] = getattr(args, name)\n log_params[name] = params\n else:\n log_params[name] = getattr(cls, \"__kwargs\", dict())\n log_params[name][\"_name\"] = cls.__module__ + \".\" + cls.__class__.__name__\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(log_params, f, indent=2, sort_keys=True)\n\n\ndef stub_to_json(stub_sth):\n from rllab.misc import instrument\n if isinstance(stub_sth, instrument.StubObject):\n assert len(stub_sth.args) == 0\n data = dict()\n for k, v in stub_sth.kwargs.items():\n data[k] = stub_to_json(v)\n data[\"_name\"] = stub_sth.proxy_class.__module__ + \".\" + stub_sth.proxy_class.__name__\n return data\n elif isinstance(stub_sth, instrument.StubAttr):\n return dict(\n obj=stub_to_json(stub_sth.obj),\n attr=stub_to_json(stub_sth.attr_name)\n )\n elif isinstance(stub_sth, instrument.StubMethodCall):\n return dict(\n obj=stub_to_json(stub_sth.obj),\n method_name=stub_to_json(stub_sth.method_name),\n args=stub_to_json(stub_sth.args),\n kwargs=stub_to_json(stub_sth.kwargs),\n )\n elif isinstance(stub_sth, instrument.BinaryOp):\n return \"binary_op\"\n elif isinstance(stub_sth, instrument.StubClass):\n return stub_sth.proxy_class.__module__ + \".\" + stub_sth.proxy_class.__name__\n elif isinstance(stub_sth, dict):\n return {stub_to_json(k): stub_to_json(v) for k, v in stub_sth.items()}\n elif isinstance(stub_sth, (list, tuple)):\n return list(map(stub_to_json, stub_sth))\n elif type(stub_sth) == type(lambda: None):\n if stub_sth.__module__ is not None:\n return stub_sth.__module__ + \".\" + stub_sth.__name__\n return stub_sth.__name__\n elif \"theano\" in str(type(stub_sth)):\n return repr(stub_sth)\n return stub_sth\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, type):\n return {'$class': o.__module__ + \".\" + o.__name__}\n elif isinstance(o, Enum):\n return {'$enum': o.__module__ + \".\" + o.__class__.__name__ + '.' + o.name}\n return json.JSONEncoder.default(self, o)\n\n\ndef log_parameters_lite(log_file, args):\n log_params = {}\n for param_name, param_value in args.__dict__.items():\n log_params[param_name] = param_value\n if args.args_data is not None:\n stub_method = pickle.loads(base64.b64decode(args.args_data))\n method_args = stub_method.kwargs\n log_params[\"json_args\"] = dict()\n for k, v in list(method_args.items()):\n log_params[\"json_args\"][k] = stub_to_json(v)\n kwargs = stub_method.obj.kwargs\n for k in [\"baseline\", \"env\", \"policy\"]:\n if k in kwargs:\n log_params[\"json_args\"][k] = stub_to_json(kwargs.pop(k))\n log_params[\"json_args\"][\"algo\"] = stub_to_json(stub_method.obj)\n mkdir_p(os.path.dirname(log_file))\n #with open(log_file, \"w\") as f: #mydelete\n # print(\"call Encoder\")\n # json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n\ndef log_variant(log_file, variant_data):\n mkdir_p(os.path.dirname(log_file))\n if hasattr(variant_data, \"dump\"):\n variant_data = variant_data.dump()\n variant_json = stub_to_json(variant_data)\n with open(log_file, \"w\") as f:\n json.dump(variant_json, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n\ndef record_tabular_misc_stat(key, values):\n record_tabular(key + \"Average\", np.average(values))\n record_tabular(key + \"Std\", np.std(values))\n record_tabular(key + \"Median\", np.median(values))\n record_tabular(key + \"Min\", np.amin(values))\n record_tabular(key + \"Max\", np.amax(values))\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.random.uniform"
],
[
"numpy.amax",
"numpy.random.seed",
"numpy.amin",
"numpy.percentile",
"numpy.ceil",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.zeros"
],
[
"numpy.amax",
"numpy.amin",
"numpy.percentile",
"numpy.std",
"numpy.mean",
"numpy.zeros"
],
[
"numpy.amax",
"numpy.amin",
"numpy.median",
"numpy.std",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vanderhe/fortnet-python | [
"118237f0ce750852d973b213161fc04623fd7f82",
"118237f0ce750852d973b213161fc04623fd7f82"
] | [
"test/test_fnetout.py",
"src/fortformat/fnetout.py"
] | [
"#!/usr/bin/env python3\n#------------------------------------------------------------------------------#\n# fortnet-python: Python Tools for the Fortnet Software Package #\n# Copyright (C) 2021 - 2022 T. W. van der Heide #\n# #\n# See the LICENSE file for terms of usage and distribution. #\n#------------------------------------------------------------------------------#\n\n\n'''\nRegression tests covering the Fnetout class of Fortformat.\n'''\n\n\nimport os\nimport pytest\nimport numpy as np\n\nfrom common import compare_fnetout_references\n\n\nREFPATH = os.path.join(os.getcwd(), 'test', 'references', 'Fnetout')\n\n\ndef test_predict_atomic():\n '''Test extraction capabilities for a prediction run\n with a network that was trained on atomic targets.\n '''\n\n fname = 'predict_atomic.hdf5'\n\n ref = {}\n ref['mode'] = 'predict'\n ref['ndatapoints'] = 5\n ref['nglobaltargets'] = 0\n ref['natomictargets'] = 2\n ref['tforces'] = False\n ref['forces'] = None\n ref['atomictargets'] = None\n ref['globaltargets'] = None\n ref['globalpredictions'] = None\n ref['globalpredictions_atomic'] = None\n ref['atomicpredictions'] = [\n np.array([[1.961575401201565427e-01, 9.168128808877051839e-01],\n [1.325239781646761206e-01, 7.994346410064820940e-01],\n [1.826092611054506987e-01, 8.918864627286081648e-01],\n [1.951603716977679814e-01, 9.149779051068115399e-01],\n [1.963975544054146483e-01, 9.172546297234291934e-01],\n [1.365085697599923986e-01, 8.068187835637852245e-01],\n [1.937271428648690563e-01, 9.123404738385268997e-01],\n [1.963833753374974733e-01, 9.172283491672438283e-01],\n [-2.963259061179163711e-01, 6.622931487753776381e+00],\n [-3.116645694102148090e-01, 6.341542248977436458e+00],\n [-2.954852994924470622e-01, 6.639489278084699464e+00],\n [-3.046303752343871851e-01, 6.455384967114186523e+00]],\n dtype=float),\n np.array([[1.811418904020697107e-01, 8.890399580545689240e-01],\n [1.286134726005213336e-01, 7.921870956352004001e-01],\n [1.287072680065694807e-01, 7.923610013248644224e-01],\n [1.285878019428332852e-01, 7.921394561667119971e-01],\n [-3.205833278148639831e-01, 6.199868006587744951e+00],\n [-3.205832449473826062e-01, 6.199870243635043465e+00]],\n dtype=float),\n np.array([[1.508316035937055932e-01, 8.333084902706219266e-01],\n [1.963987299989748136e-01, 9.172568038424152581e-01],\n [1.963985352644728455e-01, 9.172564425915140651e-01],\n [1.314458979434688091e-01, 7.974318952109518133e-01],\n [1.959840207934034628e-01, 9.164924149116437935e-01],\n [1.962475111339566924e-01, 9.169785285430018806e-01],\n [1.963735428400687211e-01, 9.172103673056410944e-01],\n [1.692361060177546561e-01, 8.672524620359242098e-01],\n [-2.953595347026437556e-01, 6.642087650077651340e+00],\n [-3.151594350113108844e-01, 6.282255421963240494e+00],\n [-2.991868120084945071e-01, 6.559077847747195378e+00],\n [-3.170787084631181418e-01, 6.252835565560094011e+00]],\n dtype=float),\n np.array([[1.304479687184249281e-01, 7.955871276861898878e-01],\n [1.297462265528342706e-01, 7.942881684589961910e-01],\n [1.298443617239196379e-01, 7.944708584405727470e-01],\n [1.961872820312715870e-01, 9.168651269507970270e-01],\n [-3.205789586106497779e-01, 6.199943703977714549e+00],\n [-3.205781729831197469e-01, 6.199947713843369179e+00]],\n dtype=float),\n np.array([[1.288099388080513885e-01, 7.925517780736619500e-01],\n [1.286199169387698682e-01, 7.921996037242402533e-01],\n [1.286878255987483899e-01, 7.923246429757131448e-01],\n [1.312376406171068266e-01, 7.970445915261700209e-01],\n [-3.205835576648750629e-01, 6.199865084107108792e+00],\n [-3.205822580166140523e-01, 6.199887555086769808e+00]],\n dtype=float)]\n\n equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))\n\n assert equal\n\n\ndef test_predict_global():\n '''Test extraction capabilities for a prediction run\n with a network that was trained on global targets.\n '''\n\n fname = 'predict_global.hdf5'\n\n ref = {}\n ref['mode'] = 'predict'\n ref['ndatapoints'] = 5\n ref['nglobaltargets'] = 1\n ref['natomictargets'] = 0\n ref['tforces'] = False\n ref['forces'] = None\n ref['atomictargets'] = None\n ref['globaltargets'] = None\n\n ref['globalpredictions_atomic'] = [\n np.array([-1.526436789762218496e+02], dtype=float),\n np.array([[-4.585193773117663341e+02],\n [-4.585193773117663341e+02]], dtype=float) / 2.0,\n np.array([[-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02]], dtype=float) / 3.0,\n np.array([[-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02]], dtype=float) / 4.0,\n np.array([[-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02]], dtype=float) / 5.0]\n\n ref['globalpredictions'] = [\n np.array([-1.526436789762218496e+02], dtype=float),\n np.array([-4.585193773117663341e+02], dtype=float),\n np.array([-2.290754290677185736e+02], dtype=float),\n np.array([-6.877477714671086915e+02], dtype=float),\n np.array([-5.349057545062817098e+02], dtype=float)]\n\n ref['atomicpredictions'] = None\n\n equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))\n\n assert equal\n\n\ndef test_predict_global_singleforces():\n '''Test extraction capabilities for a prediction run with a network\n that was trained on global targets and calculates atomic forces.\n '''\n\n fname = 'predict_global_singleforces.hdf5'\n\n ref = {}\n ref['mode'] = 'predict'\n ref['ndatapoints'] = 2\n ref['nglobaltargets'] = 1\n ref['natomictargets'] = 0\n\n ref['atomictargets'] = None\n ref['globaltargets'] = None\n ref['atomicpredictions'] = None\n\n ref['tforces'] = True\n ref['forces'] = []\n ref['forces'].append([])\n ref['forces'].append([])\n ref['forces'][0].append(np.array([\n [-1.129280561189105470e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [1.129280561189105470e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n ref['forces'][1].append(np.array([\n [-8.464270111301352983e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [8.464270111301352983e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n\n ref['globalpredictions_atomic'] = [\n np.array([[-4.301790810131604914e-01],\n [-4.301790810131604914e-01]], dtype=float) / 2.0,\n np.array([[-5.025593389423121948e-01],\n [-5.025593389423121948e-01]], dtype=float) / 2.0]\n\n ref['globalpredictions'] = [\n np.array([-4.301790810131604914e-01], dtype=float),\n np.array([-5.025593389423121948e-01], dtype=float)]\n\n equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))\n\n assert equal\n\n\ndef test_predict_global_multiforces():\n '''Test extraction capabilities for a prediction run with a network\n that was trained on global targets and calculates atomic forces.\n '''\n\n fname = 'predict_global_multiforces.hdf5'\n\n ref = {}\n ref['mode'] = 'predict'\n ref['ndatapoints'] = 2\n ref['nglobaltargets'] = 3\n ref['natomictargets'] = 0\n\n ref['atomictargets'] = None\n ref['globaltargets'] = None\n ref['atomicpredictions'] = None\n\n ref['tforces'] = True\n ref['forces'] = []\n ref['forces'].append([])\n ref['forces'].append([])\n ref['forces'][0].append(np.array([\n [-1.113504383113195217e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [1.113504383113195217e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n ref['forces'][0].append(np.array([\n [-1.117387033151562292e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [1.117387033151562292e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n ref['forces'][0].append(np.array([\n [-1.110108965167277972e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [1.110108965167277972e+00, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n ref['forces'][1].append(np.array([\n [-8.450938994823964379e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [8.450938994823964379e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n ref['forces'][1].append(np.array([\n [-8.465140042623886529e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [8.465140042623886529e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n ref['forces'][1].append(np.array([\n [-8.438788427604926312e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00],\n [8.438788427604926312e-01, 0.000000000000000000e+00,\n 0.000000000000000000e+00]], dtype=float))\n\n ref['globalpredictions_atomic'] = [\n np.array([[-4.304246998683396441e-01, -4.302864774322330277e-01,\n -4.305433861504512905e-01],\n [-4.304246998683396441e-01, -4.302864774322330277e-01,\n -4.305433861504512905e-01]], dtype=float) / 2.0,\n np.array([[-5.022394949529731534e-01, -5.022869347972704901e-01,\n -5.021969559503443037e-01],\n [-5.022394949529731534e-01, -5.022869347972704901e-01,\n -5.021969559503443037e-01]], dtype=float) / 2.0]\n\n ref['globalpredictions'] = [\n np.array([-4.304246998683396441e-01, -4.302864774322330277e-01,\n -4.305433861504512905e-01], dtype=float),\n np.array([-5.022394949529731534e-01, -5.022869347972704901e-01,\n -5.021969559503443037e-01], dtype=float)]\n\n equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))\n\n assert equal\n\n\ndef test_validate_atomic():\n '''Test extraction capabilities for a validation run\n with a network that was trained on atomic targets.\n '''\n\n fname = 'validate_atomic.hdf5'\n\n ref = {}\n ref['mode'] = 'validate'\n ref['ndatapoints'] = 5\n ref['nglobaltargets'] = 0\n ref['natomictargets'] = 2\n\n ref['globaltargets'] = None\n ref['globalpredictions'] = None\n ref['globalpredictions_atomic'] = None\n\n ref['tforces'] = False\n ref['forces'] = None\n\n ref['atomictargets'] = [\n np.array([\n [1.540549993515014648e-01, 8.459450006484985352e-01],\n [1.883080005645751953e-01, 8.116919994354248047e-01],\n [1.595949977636337280e-01, 8.404050022363662720e-01],\n [1.432220041751861572e-01, 8.567779958248138428e-01],\n [1.232710033655166626e-01, 8.767289966344833374e-01],\n [1.735100001096725464e-01, 8.264899998903274536e-01],\n [1.588409990072250366e-01, 8.411590009927749634e-01],\n [1.403059959411621094e-01, 8.596940040588378906e-01],\n [-2.634609937667846680e-01, 6.263460993766784668e+00],\n [-3.214380145072937012e-01, 6.321438014507293701e+00],\n [-3.043099939823150635e-01, 6.304309993982315063e+00],\n [-3.519429862499237061e-01, 6.351942986249923706e+00]],\n dtype=float),\n np.array([\n [1.272429972887039185e-01, 8.727570027112960815e-01],\n [1.549790054559707642e-01, 8.450209945440292358e-01],\n [1.774729937314987183e-01, 8.225270062685012817e-01],\n [1.796700060367584229e-01, 8.203299939632415771e-01],\n [-3.525030016899108887e-01, 6.352503001689910889e+00],\n [-2.868520021438598633e-01, 6.286852002143859863e+00]],\n dtype=float),\n np.array([\n [1.852180063724517822e-01, 8.147819936275482178e-01],\n [1.311800032854080200e-01, 8.688199967145919800e-01],\n [1.232030019164085388e-01, 8.767969980835914612e-01],\n [1.774370074272155762e-01, 8.225629925727844238e-01],\n [1.587480008602142334e-01, 8.412519991397857666e-01],\n [1.444180011749267578e-01, 8.555819988250732422e-01],\n [1.365029960870742798e-01, 8.634970039129257202e-01],\n [1.802569925785064697e-01, 8.197430074214935303e-01],\n [-2.689329981803894043e-01, 6.268932998180389404e+00],\n [-3.368290066719055176e-01, 6.336829006671905518e+00],\n [-3.142969906330108643e-01, 6.314296990633010864e+00],\n [-3.169249892234802246e-01, 6.316924989223480225e+00]],\n dtype=float),\n np.array([\n [1.770180016756057739e-01, 8.229819983243942261e-01],\n [1.812230050563812256e-01, 8.187769949436187744e-01],\n [1.482979953289031982e-01, 8.517020046710968018e-01],\n [9.460300207138061523e-02, 9.053969979286193848e-01],\n [-2.429430037736892700e-01, 6.242943003773689270e+00],\n [-3.581880033016204834e-01, 6.358188003301620483e+00]],\n dtype=float),\n np.array([\n [1.596090048551559448e-01, 8.403909951448440552e-01],\n [1.659840047359466553e-01, 8.340159952640533447e-01],\n [1.713179945945739746e-01, 8.286820054054260254e-01],\n [1.658540070056915283e-01, 8.341459929943084717e-01],\n [-3.264440000057220459e-01, 6.326444000005722046e+00],\n [-3.363139927387237549e-01, 6.336313992738723755e+00]],\n dtype=float)]\n ref['atomicpredictions'] = [\n np.array([\n [1.961575401201565427e-01, 9.168128808877051839e-01],\n [1.325239781646761206e-01, 7.994346410064820940e-01],\n [1.826092611054506987e-01, 8.918864627286081648e-01],\n [1.951603716977679814e-01, 9.149779051068115399e-01],\n [1.963975544054146483e-01, 9.172546297234291934e-01],\n [1.365085697599923986e-01, 8.068187835637852245e-01],\n [1.937271428648690563e-01, 9.123404738385268997e-01],\n [1.963833753374974733e-01, 9.172283491672438283e-01],\n [-2.963259061179163711e-01, 6.622931487753776381e+00],\n [-3.116645694102148090e-01, 6.341542248977436458e+00],\n [-2.954852994924470622e-01, 6.639489278084699464e+00],\n [-3.046303752343871851e-01, 6.455384967114186523e+00]],\n dtype=float),\n np.array([\n [1.811418904020697107e-01, 8.890399580545689240e-01],\n [1.286134726005213336e-01, 7.921870956352004001e-01],\n [1.287072680065694807e-01, 7.923610013248644224e-01],\n [1.285878019428332852e-01, 7.921394561667119971e-01],\n [-3.205833278148639831e-01, 6.199868006587744951e+00],\n [-3.205832449473826062e-01, 6.199870243635043465e+00]],\n dtype=float),\n np.array([\n [1.508316035937055932e-01, 8.333084902706219266e-01],\n [1.963987299989748136e-01, 9.172568038424152581e-01],\n [1.963985352644728455e-01, 9.172564425915140651e-01],\n [1.314458979434688091e-01, 7.974318952109518133e-01],\n [1.959840207934034628e-01, 9.164924149116437935e-01],\n [1.962475111339566924e-01, 9.169785285430018806e-01],\n [1.963735428400687211e-01, 9.172103673056410944e-01],\n [1.692361060177546561e-01, 8.672524620359242098e-01],\n [-2.953595347026437556e-01, 6.642087650077651340e+00],\n [-3.151594350113108844e-01, 6.282255421963240494e+00],\n [-2.991868120084945071e-01, 6.559077847747195378e+00],\n [-3.170787084631181418e-01, 6.252835565560094011e+00]],\n dtype=float),\n np.array([\n [1.304479687184249281e-01, 7.955871276861898878e-01],\n [1.297462265528342706e-01, 7.942881684589961910e-01],\n [1.298443617239196379e-01, 7.944708584405727470e-01],\n [1.961872820312715870e-01, 9.168651269507970270e-01],\n [-3.205789586106497779e-01, 6.199943703977714549e+00],\n [-3.205781729831197469e-01, 6.199947713843369179e+00]],\n dtype=float),\n np.array([\n [1.288099388080513885e-01, 7.925517780736619500e-01],\n [1.286199169387698682e-01, 7.921996037242402533e-01],\n [1.286878255987483899e-01, 7.923246429757131448e-01],\n [1.312376406171068266e-01, 7.970445915261700209e-01],\n [-3.205835576648750629e-01, 6.199865084107108792e+00],\n [-3.205822580166140523e-01, 6.199887555086769808e+00]],\n dtype=float)]\n\n equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))\n\n assert equal\n\n\ndef test_validate_global():\n '''Test extraction capabilities for a validation run\n with a network that was trained on global targets.\n '''\n\n fname = 'validate_global.hdf5'\n\n ref = {}\n ref['mode'] = 'validate'\n ref['ndatapoints'] = 5\n ref['nglobaltargets'] = 1\n ref['natomictargets'] = 0\n\n ref['tforces'] = False\n ref['forces'] = None\n\n ref['atomictargets'] = None\n ref['atomicpredictions'] = None\n\n ref['globaltargets'] = [\n np.array([-1.527736989418316114e+02], dtype=float),\n np.array([-4.584216715420000128e+02], dtype=float),\n np.array([-2.291870019319999869e+02], dtype=float),\n np.array([-6.876760346160000381e+02], dtype=float),\n np.array([-5.348338707069999600e+02], dtype=float)]\n\n ref['globalpredictions'] = [\n np.array([-1.526436789762218496e+02], dtype=float),\n np.array([-4.585193773117663341e+02], dtype=float),\n np.array([-2.290754290677185736e+02], dtype=float),\n np.array([-6.877477714671086915e+02], dtype=float),\n np.array([-5.349057545062817098e+02], dtype=float)]\n\n ref['globalpredictions_atomic'] = [\n np.array([[-1.526436789762218496e+02]], dtype=float),\n np.array([[-4.585193773117663341e+02],\n [-4.585193773117663341e+02]], dtype=float) / 2.0,\n np.array([[-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02]], dtype=float) / 3.0,\n np.array([[-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02]], dtype=float) / 4.0,\n np.array([[-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02]], dtype=float) / 5.0]\n\n equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))\n\n assert equal\n\n\ndef test_validate_atomic_global():\n '''Test extraction capabilities for a validation run with a\n network that was trained on both, atomic and global targets.\n '''\n\n fname = 'validate_atomic_global.hdf5'\n\n ref = {}\n ref['mode'] = 'validate'\n ref['ndatapoints'] = 5\n ref['nglobaltargets'] = 1\n ref['natomictargets'] = 2\n\n ref['targets'] = True\n ref['tforces'] = False\n ref['forces'] = None\n\n ref['atomictargets'] = [\n np.array([\n [1.540549993515014648e-01, 8.459450006484985352e-01],\n [1.883080005645751953e-01, 8.116919994354248047e-01],\n [1.595949977636337280e-01, 8.404050022363662720e-01],\n [1.432220041751861572e-01, 8.567779958248138428e-01],\n [1.232710033655166626e-01, 8.767289966344833374e-01],\n [1.735100001096725464e-01, 8.264899998903274536e-01],\n [1.588409990072250366e-01, 8.411590009927749634e-01],\n [1.403059959411621094e-01, 8.596940040588378906e-01],\n [-2.634609937667846680e-01, 6.263460993766784668e+00],\n [-3.214380145072937012e-01, 6.321438014507293701e+00],\n [-3.043099939823150635e-01, 6.304309993982315063e+00],\n [-3.519429862499237061e-01, 6.351942986249923706e+00]],\n dtype=float),\n np.array([\n [1.272429972887039185e-01, 8.727570027112960815e-01],\n [1.549790054559707642e-01, 8.450209945440292358e-01],\n [1.774729937314987183e-01, 8.225270062685012817e-01],\n [1.796700060367584229e-01, 8.203299939632415771e-01],\n [-3.525030016899108887e-01, 6.352503001689910889e+00],\n [-2.868520021438598633e-01, 6.286852002143859863e+00]],\n dtype=float),\n np.array([\n [1.852180063724517822e-01, 8.147819936275482178e-01],\n [1.311800032854080200e-01, 8.688199967145919800e-01],\n [1.232030019164085388e-01, 8.767969980835914612e-01],\n [1.774370074272155762e-01, 8.225629925727844238e-01],\n [1.587480008602142334e-01, 8.412519991397857666e-01],\n [1.444180011749267578e-01, 8.555819988250732422e-01],\n [1.365029960870742798e-01, 8.634970039129257202e-01],\n [1.802569925785064697e-01, 8.197430074214935303e-01],\n [-2.689329981803894043e-01, 6.268932998180389404e+00],\n [-3.368290066719055176e-01, 6.336829006671905518e+00],\n [-3.142969906330108643e-01, 6.314296990633010864e+00],\n [-3.169249892234802246e-01, 6.316924989223480225e+00]],\n dtype=float),\n np.array([\n [1.770180016756057739e-01, 8.229819983243942261e-01],\n [1.812230050563812256e-01, 8.187769949436187744e-01],\n [1.482979953289031982e-01, 8.517020046710968018e-01],\n [9.460300207138061523e-02, 9.053969979286193848e-01],\n [-2.429430037736892700e-01, 6.242943003773689270e+00],\n [-3.581880033016204834e-01, 6.358188003301620483e+00]],\n dtype=float),\n np.array([\n [1.596090048551559448e-01, 8.403909951448440552e-01],\n [1.659840047359466553e-01, 8.340159952640533447e-01],\n [1.713179945945739746e-01, 8.286820054054260254e-01],\n [1.658540070056915283e-01, 8.341459929943084717e-01],\n [-3.264440000057220459e-01, 6.326444000005722046e+00],\n [-3.363139927387237549e-01, 6.336313992738723755e+00]],\n dtype=float)]\n ref['atomicpredictions'] = [\n np.array([\n [1.961575401201565427e-01, 9.168128808877051839e-01],\n [1.325239781646761206e-01, 7.994346410064820940e-01],\n [1.826092611054506987e-01, 8.918864627286081648e-01],\n [1.951603716977679814e-01, 9.149779051068115399e-01],\n [1.963975544054146483e-01, 9.172546297234291934e-01],\n [1.365085697599923986e-01, 8.068187835637852245e-01],\n [1.937271428648690563e-01, 9.123404738385268997e-01],\n [1.963833753374974733e-01, 9.172283491672438283e-01],\n [-2.963259061179163711e-01, 6.622931487753776381e+00],\n [-3.116645694102148090e-01, 6.341542248977436458e+00],\n [-2.954852994924470622e-01, 6.639489278084699464e+00],\n [-3.046303752343871851e-01, 6.455384967114186523e+00]],\n dtype=float),\n np.array([\n [1.811418904020697107e-01, 8.890399580545689240e-01],\n [1.286134726005213336e-01, 7.921870956352004001e-01],\n [1.287072680065694807e-01, 7.923610013248644224e-01],\n [1.285878019428332852e-01, 7.921394561667119971e-01],\n [-3.205833278148639831e-01, 6.199868006587744951e+00],\n [-3.205832449473826062e-01, 6.199870243635043465e+00]],\n dtype=float),\n np.array([\n [1.508316035937055932e-01, 8.333084902706219266e-01],\n [1.963987299989748136e-01, 9.172568038424152581e-01],\n [1.963985352644728455e-01, 9.172564425915140651e-01],\n [1.314458979434688091e-01, 7.974318952109518133e-01],\n [1.959840207934034628e-01, 9.164924149116437935e-01],\n [1.962475111339566924e-01, 9.169785285430018806e-01],\n [1.963735428400687211e-01, 9.172103673056410944e-01],\n [1.692361060177546561e-01, 8.672524620359242098e-01],\n [-2.953595347026437556e-01, 6.642087650077651340e+00],\n [-3.151594350113108844e-01, 6.282255421963240494e+00],\n [-2.991868120084945071e-01, 6.559077847747195378e+00],\n [-3.170787084631181418e-01, 6.252835565560094011e+00]],\n dtype=float),\n np.array([\n [1.304479687184249281e-01, 7.955871276861898878e-01],\n [1.297462265528342706e-01, 7.942881684589961910e-01],\n [1.298443617239196379e-01, 7.944708584405727470e-01],\n [1.961872820312715870e-01, 9.168651269507970270e-01],\n [-3.205789586106497779e-01, 6.199943703977714549e+00],\n [-3.205781729831197469e-01, 6.199947713843369179e+00]],\n dtype=float),\n np.array([\n [1.288099388080513885e-01, 7.925517780736619500e-01],\n [1.286199169387698682e-01, 7.921996037242402533e-01],\n [1.286878255987483899e-01, 7.923246429757131448e-01],\n [1.312376406171068266e-01, 7.970445915261700209e-01],\n [-3.205835576648750629e-01, 6.199865084107108792e+00],\n [-3.205822580166140523e-01, 6.199887555086769808e+00]],\n dtype=float)]\n\n ref['globaltargets'] = [\n np.array([-1.527736989418316114e+02], dtype=float),\n np.array([-4.584216715420000128e+02], dtype=float),\n np.array([-2.291870019319999869e+02], dtype=float),\n np.array([-6.876760346160000381e+02], dtype=float),\n np.array([-5.348338707069999600e+02], dtype=float)]\n\n ref['globalpredictions'] = [\n np.array([-1.526436789762218496e+02], dtype=float) * 12.0,\n np.array([-4.585193773117663341e+02], dtype=float) * 6.0,\n np.array([-2.290754290677185736e+02], dtype=float) * 12.0,\n np.array([-6.877477714671086915e+02], dtype=float) * 6.0,\n np.array([-5.349057545062817098e+02], dtype=float) * 6.0]\n\n ref['globalpredictions_atomic'] = [\n np.array([[-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02],\n [-1.526436789762218496e+02]], dtype=float),\n np.array([[-4.585193773117663341e+02],\n [-4.585193773117663341e+02],\n [-4.585193773117663341e+02],\n [-4.585193773117663341e+02],\n [-4.585193773117663341e+02],\n [-4.585193773117663341e+02]], dtype=float),\n np.array([[-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02],\n [-2.290754290677185736e+02]], dtype=float),\n np.array([[-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02],\n [-6.877477714671086915e+02]], dtype=float),\n np.array([[-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02],\n [-5.349057545062817098e+02]], dtype=float)]\n\n equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))\n\n assert equal\n\n\nif __name__ == '__main__':\n pytest.main()\n",
"#------------------------------------------------------------------------------#\n# fortnet-python: Python Tools for the Fortnet Software Package #\n# Copyright (C) 2021 - 2022 T. W. van der Heide #\n# #\n# See the LICENSE file for terms of usage and distribution. #\n#------------------------------------------------------------------------------#\n\n\n'''\nBasic Fortnet Output Format Class\n\nThis basic Python class implements the Fortnet output file format. The Fnetout\nclass extracts certain properties of the HDF5 output for later analysis.\n'''\n\n\nimport h5py\nimport numpy as np\n\n\nclass Fnetout:\n '''Basic Fortnet Output Format Class.'''\n\n\n def __init__(self, fname):\n '''Initializes a Fnetout object.\n\n Args:\n\n fname (str): filename to extract data from\n\n '''\n\n self._fname = fname\n\n with h5py.File(self._fname, 'r') as fnetoutfile:\n fnetout = fnetoutfile['fnetout']\n self._mode = fnetout.attrs.get('mode').decode('UTF-8').strip()\n if not self._mode in ('validate', 'predict'):\n raise FnetoutError('Invalid running mode specification.')\n\n output = fnetoutfile['fnetout']['output']\n\n # read number of datapoints\n self._ndatapoints = output.attrs.get('ndatapoints')\n if len(self._ndatapoints) == 1:\n # number of datapoints stored in array of size 1\n self._ndatapoints = self._ndatapoints[0]\n else:\n msg = \"Error while reading fnetout file '\" + self._fname + \\\n \"'. Unrecognized number of datapoints obtained.\"\n raise FnetoutError(msg)\n\n # read number of system-wide targets\n self._nglobaltargets = output.attrs.get('nglobaltargets')\n if len(self._nglobaltargets) == 1:\n # number of system-wide targets stored in array of size 1\n self._nglobaltargets = self._nglobaltargets[0]\n else:\n msg = \"Error while reading fnetout file '\" + self._fname + \\\n \"'. Unrecognized number of global targets obtained.\"\n raise FnetoutError(msg)\n\n # read number of atomic targets\n self._natomictargets = output.attrs.get('natomictargets')\n if len(self._natomictargets) == 1:\n # number of atomic targets stored in array of size 1\n self._natomictargets = self._natomictargets[0]\n else:\n msg = \"Error while reading fnetout file '\" + self._fname + \\\n \"'. Unrecognized number of atomic targets obtained.\"\n raise FnetoutError(msg)\n\n # read force specification\n self._tforces = output.attrs.get('tforces')\n # account for legacy files where no force entry is present\n if self._tforces is None:\n self._tforces = [0]\n if len(self._tforces) == 1:\n # booleans stored in integer arrays of size 1\n self._tforces = bool(self._tforces[0])\n else:\n msg = \"Error while reading fnetout file '\" + self._fname + \\\n \"'. Unrecognized force specification obtained.\"\n raise FnetoutError(msg)\n\n\n @property\n def mode(self):\n '''Defines property, providing the mode of the Fortnet run.\n\n Returns:\n\n mode (str): mode of the run that produced the Fnetout file\n\n '''\n\n return self._mode\n\n\n @property\n def ndatapoints(self):\n '''Defines property, providing the number of datapoints.\n\n Returns:\n\n ndatapoints (int): total number of datapoints of the training\n\n '''\n\n return self._ndatapoints\n\n\n @property\n def nglobaltargets(self):\n '''Defines property, providing the number of system-wide targets.\n\n Returns:\n\n nglobaltargets (int): number of global targets per datapoint\n\n '''\n\n return self._nglobaltargets\n\n\n @property\n def natomictargets(self):\n '''Defines property, providing the number of atomic targets.\n\n Returns:\n\n natomictargets (int): number of atomic targets per datapoint\n\n '''\n\n return self._natomictargets\n\n\n @property\n def tforces(self):\n '''Defines property, providing hint whether atomic forces are present.\n\n Returns:\n\n tforces (bool): true, if atomic forces are supplied\n\n '''\n\n return self._tforces\n\n\n @property\n def globalpredictions(self):\n '''Defines property, providing the system-wide predictions of Fortnet.\n\n Returns:\n\n predictions (2darray): predictions of the network\n\n '''\n\n if not self._nglobaltargets > 0:\n return None\n\n predictions = np.empty((self._ndatapoints, self._nglobaltargets),\n dtype=float)\n\n with h5py.File(self._fname, 'r') as fnetoutfile:\n output = fnetoutfile['fnetout']['output']\n for idata in range(self._ndatapoints):\n dataname = 'datapoint' + str(idata + 1)\n predictions[idata, :] = np.array(\n output[dataname]['globalpredictions'],\n dtype=float)\n\n return predictions\n\n\n @property\n def globalpredictions_atomic(self):\n '''Defines property, providing the (atom-resolved) system-wide\n predictions of Fortnet.\n\n Returns:\n\n predictions (list): predictions of the network\n\n '''\n\n if not self._nglobaltargets > 0:\n return None\n\n predictions = []\n\n with h5py.File(self._fname, 'r') as fnetoutfile:\n output = fnetoutfile['fnetout']['output']\n for idata in range(self._ndatapoints):\n dataname = 'datapoint' + str(idata + 1)\n predictions.append(\n np.array(output[dataname]['rawpredictions'],\n dtype=float)[:, 0:self._nglobaltargets])\n\n return predictions\n\n\n @property\n def atomicpredictions(self):\n '''Defines property, providing the atomic predictions of Fortnet.\n\n Returns:\n\n predictions (list): predictions of the network\n\n '''\n\n if not self._natomictargets > 0:\n return None\n\n predictions = []\n\n with h5py.File(self._fname, 'r') as fnetoutfile:\n output = fnetoutfile['fnetout']['output']\n for idata in range(self._ndatapoints):\n dataname = 'datapoint' + str(idata + 1)\n predictions.append(\n np.array(output[dataname]\n ['rawpredictions'], dtype=float)\n [:, self._nglobaltargets:])\n\n return predictions\n\n\n @property\n def globaltargets(self):\n '''Defines property, providing the system-wide targets during training.\n\n Returns:\n\n targets (2darray): system-wide targets during training\n\n '''\n\n if self._mode == 'predict' or self._nglobaltargets == 0:\n return None\n\n targets = np.empty((self._ndatapoints, self._nglobaltargets),\n dtype=float)\n\n with h5py.File(self._fname, 'r') as fnetoutfile:\n output = fnetoutfile['fnetout']['output']\n for idata in range(self._ndatapoints):\n dataname = 'datapoint' + str(idata + 1)\n targets[idata, :] = np.array(\n output[dataname]['globaltargets'],\n dtype=float)\n\n return targets\n\n\n @property\n def atomictargets(self):\n '''Defines property, providing the atomic targets during training.\n\n Returns:\n\n targets (list): atomic targets during training\n\n '''\n\n if self._mode == 'predict' or self._natomictargets == 0:\n return None\n\n targets = []\n\n with h5py.File(self._fname, 'r') as fnetoutfile:\n output = fnetoutfile['fnetout']['output']\n for idata in range(self._ndatapoints):\n dataname = 'datapoint' + str(idata + 1)\n targets.append(np.array(output[dataname]\n ['atomictargets'], dtype=float))\n\n return targets\n\n\n @property\n def forces(self):\n '''Defines property, providing the atomic forces, if supplied.\n\n Returns:\n\n forces (list): atomic forces on atoms\n\n '''\n\n if not self._tforces:\n return None\n\n tmp1 = []\n\n if self._natomictargets > 0:\n msg = \"Error while extracting forces from fnetout file '\" \\\n + self._fname + \\\n \"'. Forces supplied for global property targets only.\"\n raise FnetoutError(msg)\n\n with h5py.File(self._fname, 'r') as fnetoutfile:\n output = fnetoutfile['fnetout']['output']\n for idata in range(self._ndatapoints):\n dataname = 'datapoint' + str(idata + 1)\n tmp1.append(np.array(output[dataname]['forces'], dtype=float))\n\n # convert to shape np.shape(forces[iData][iTarget]) = (iAtom, 3)\n forces = []\n for tmp2 in tmp1:\n entry = []\n if not np.shape(tmp2)[1]%3 == 0:\n msg = \"Error while extracting forces from fnetout file '\" \\\n + self._fname + \\\n \"'. Expected three force components and global target.\"\n raise FnetoutError(msg)\n for jj in range(int(np.shape(tmp2)[1] / 3)):\n entry.append(tmp2[:, 3 * jj:3 * (jj + 1)])\n forces.append(entry)\n\n return forces\n\n\nclass FnetoutError(Exception):\n '''Exception thrown by the Fnetout class.'''\n"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.shape",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iamfaith/DeepLearning | [
"467c73e2d0435f0a05255e5b5e00454260d01f27",
"467c73e2d0435f0a05255e5b5e00454260d01f27"
] | [
"books/PRML/PRML-master-Python/prml/nn/optimizer/ada_delta.py",
"books/PRML/PRML-master-Python/prml/nn/linalg/inv.py"
] | [
"import numpy as np\nfrom prml.nn.optimizer.optimizer import Optimizer\n\n\nclass AdaDelta(Optimizer):\n \"\"\"\n AdaDelta optimizer\n \"\"\"\n\n def __init__(self, parameter, rho=0.95, epsilon=1e-8):\n super().__init__(parameter, None)\n self.rho = rho\n self.epsilon = epsilon\n self.mean_squared_deriv = []\n self.mean_squared_update = []\n for p in self.parameter:\n self.mean_squared_deriv.append(np.zeros(p.shape))\n self.mean_squared_update.append(np.zeros(p.shape))\n\n def update(self):\n self.increment_iteration()\n for p, msd, msu in zip(self.parameter, self.mean_squared_deriv, self.mean_squared_update):\n if p.grad is None:\n continue\n grad = p.grad\n msd *= self.rho\n msd += (1 - self.rho) * grad ** 2\n delta = np.sqrt((msu + self.epsilon) / (msd + self.epsilon)) * grad\n msu *= self.rho\n msu *= (1 - self.rho) * delta ** 2\n p.value += delta\n",
"import numpy as np\nfrom prml.nn.tensor.constant import Constant\nfrom prml.nn.tensor.tensor import Tensor\nfrom prml.nn.function import Function\n\n\nclass Inverse(Function):\n\n def forward(self, x):\n x = self._convert2tensor(x)\n self.x = x\n self._equal_ndim(x, 2)\n self.output = np.linalg.inv(x.value)\n if isinstance(self.x, Constant):\n return Constant(self.output)\n return Tensor(self.output, function=self)\n\n def backward(self, delta):\n dx = -self.output.T @ delta @ self.output.T\n self.x.backward(dx)\n\n\ndef inv(x):\n \"\"\"\n inverse of a matrix\n Parameters\n ----------\n x : (d, d) tensor_like\n a matrix to be inverted\n Returns\n -------\n output : (d, d) tensor_like\n inverse of the input\n \"\"\"\n return Inverse().forward(x)\n"
] | [
[
"numpy.zeros",
"numpy.sqrt"
],
[
"numpy.linalg.inv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
laenan8466/DeerLab | [
"94f1942da1b506e0661a8e7e4901bb5ba6d69143",
"94f1942da1b506e0661a8e7e4901bb5ba6d69143",
"94f1942da1b506e0661a8e7e4901bb5ba6d69143"
] | [
"deerlab/utils/utils.py",
"test/test_mixmodels.py",
"test/test_fftspec.py"
] | [
"import warnings\r\nimport numpy as np\r\nimport cmath as math\r\nimport scipy as scp\r\nimport scipy.optimize as opt\r\n\r\nfrom types import FunctionType \r\n\r\n\r\ndef parse_multidatasets(V,K,weights,precondition=False):\r\n#===============================================================================\r\n \r\n # Identify if the signals have already been processed by this function\r\n if type(V) is not list:\r\n if V.size == np.atleast_1d(weights).size:\r\n # If so, just return without doing anything\r\n if precondition:\r\n return V,K,weights,[np.arange(0,len(V))],[1]\r\n else:\r\n return V,K,weights,[np.arange(0,len(V))]\r\n\r\n # If multiple signals are specified as a list...\r\n if type(V) is list and all([type(Vs) is np.ndarray for Vs in V]):\r\n nSignals = len(V)\r\n prescales = np.zeros(nSignals)\r\n Vlist = []\r\n # Pre-scale the signals, important for fitregmodel when using global fits with arbitrary scales\r\n for i in range(nSignals):\r\n if precondition:\r\n prescales[i] = max(V[i])\r\n Vlist.append(V[i]/prescales[i])\r\n else:\r\n Vlist.append(V[i])\r\n V = np.concatenate(Vlist, axis=0) # ...concatenate them along the list \r\n elif type(V) is np.ndarray:\r\n nSignals = 1\r\n prescales = [1]\r\n Vlist = [V]\r\n else:\r\n raise TypeError('The input signal(s) must be numpy array or a list of numpy arrays.')\r\n \r\n def prepareKernel(K,nSignals):\r\n # If multiple kernels are specified as a list...\r\n if type(K) is tuple:\r\n K = [Ks for Ks in K]\r\n if type(K) is list and all([type(Ks) is np.ndarray for Ks in K]):\r\n nKernels = len(K)\r\n K = np.concatenate(K, axis=0) # ...concatenate them along the list \r\n elif type(K) is np.ndarray:\r\n nKernels = 1\r\n else:\r\n raise TypeError('The input kernel(s) must be numpy array or a list of numpy arrays.')\r\n # Check that the same number of signals and kernel have been passed\r\n if nSignals!=nKernels:\r\n raise KeyError('The same number of kernels and signals must be specified as lists.')\r\n return K\r\n\r\n if type(K) is FunctionType:\r\n Kmulti = lambda p: prepareKernel(K(p),nSignals)\r\n else:\r\n Kmulti = prepareKernel(K,nSignals)\r\n\r\n # If multiple weights are specified as a list...\r\n if type(weights) is list or not hasattr(weights, \"__len__\"):\r\n weights = np.atleast_1d(weights)\r\n if len(weights)==1:\r\n weights = np.repeat(weights,nSignals)\r\n weights = weights/sum(weights)\r\n if len(weights)!=nSignals:\r\n raise KeyError('If multiple signals are passed, the same number of weights are required.')\r\n weights_ = []\r\n for i in range(len(weights)):\r\n weights_ = np.concatenate((weights_,weights[i]*np.ones(len(Vlist[i]))))\r\n weights = weights_\r\n else:\r\n raise TypeError('The input weights(s) must be numpy array or a list of numpy arrays.')\r\n\r\n # Get the indices to extract the subsets again\r\n Ns = [len(V) for V in Vlist]\r\n subset = [None]*nSignals\r\n for i in range(nSignals):\r\n if i==0:\r\n prev = 0\r\n else:\r\n prev = subset[i-1][-1]+1\r\n subset[i] = np.arange(prev,prev+Ns[i])\r\n\r\n if precondition:\r\n return V,Kmulti,weights,subset,prescales\r\n else:\r\n return V,Kmulti,weights,subset\r\n#===============================================================================\r\n\r\n\r\ndef hccm(J,*args):\r\n \"\"\"\r\n Heteroscedasticity Consistent Covariance Matrix (HCCM)\r\n ======================================================\r\n\r\n Computes the heteroscedasticity consistent covariance matrix (HCCM) of\r\n a given LSQ problem given by the Jacobian matrix (J) and the covariance\r\n matrix of the data (V). If the residual (res) is specified, the\r\n covariance matrix is estimated using some of the methods specified in\r\n (mode). The HCCM are valid for both heteroscedasticit and\r\n homoscedasticit residual vectors. \r\n\r\n Usage:\r\n ------\r\n C = hccm(J,V)\r\n C = hccm(J,res,mode)\r\n\r\n Arguments:\r\n ----------\r\n J (NxM-element array)\r\n Jacobian matrix of the residual vector\r\n res (N-element array)\r\n Vector of residuals\r\n mode (string)\r\n HCCM estimator, options are:\r\n 'HC0' - White, H. (1980)\r\n 'HC1' - MacKinnon and White, (1985)\r\n 'HC2' - MacKinnon and White, (1985)\r\n 'HC3' - Davidson and MacKinnon, (1993)\r\n 'HC4' - Cribari-Neto, (2004)\r\n 'HC5' - Cribari-Neto, (2007)\r\n\r\n Returns:\r\n --------\r\n C (MxM-element array) \r\n Heteroscedasticity consistent covariance matrix \r\n\r\n References:\r\n ------------ \r\n [1] \r\n White, H. (1980). A heteroskedasticity-consistent covariance matrix\r\n estimator and a direct test for heteroskedasticity. Econometrica, 48(4), 817-838\r\n DOI: 10.2307/1912934\r\n\r\n [2] \r\n MacKinnon and White, (1985). Some heteroskedasticity-consistent covariance\r\n matrix estimators with improved finite sample properties. Journal of Econometrics, 29 (1985), \r\n pp. 305-325. DOI: 10.1016/0304-4076(85)90158-7\r\n\r\n [3] \r\n Davidson and MacKinnon, (1993). Estimation and Inference in Econometrics\r\n Oxford University Press, New York. \r\n\r\n [4] \r\n Cribari-Neto, F. (2004). Asymptotic inference under heteroskedasticity of\r\n unknown form. Computational Statistics & Data Analysis, 45(1), 215-233\r\n DOI: 10.1016/s0167-9473(02)00366-3\r\n\r\n [5] \r\n Cribari-Neto, F., Souza, T. C., & Vasconcellos, K. L. P. (2007). Inference\r\n under heteroskedasticity and leveraged data. Communications in Statistics –\r\n Theory and Methods, 36(10), 1877-1888. DOI: 10.1080/03610920601126589\r\n \"\"\"\r\n\r\n # Unpack inputs\r\n if len(args)==2:\r\n res,mode = args\r\n V = []\r\n elif len(args)==1:\r\n V = args[0]\r\n\r\n # Hat matrix\r\n H = [email protected](J.T@J)@J.T\r\n # Get leverage\r\n h = np.diag(H)\r\n # Number of parameters (k) & Number of variables (n)\r\n n,k = np.shape(J)\r\n\r\n if isempty(V):\r\n # Select estimation method using established nomenclature\r\n if mode.upper() == 'HC0': # White,(1980),[1]\r\n # Estimate the data covariance matrix\r\n V = np.diag(res**2)\r\n \r\n elif mode.upper() == 'HC1': # MacKinnon and White,(1985),[2]\r\n # Estimate the data covariance matrix\r\n V = n/(n-k)*np.diag(res**2)\r\n \r\n elif mode.upper() == 'HC2': # MacKinnon and White,(1985),[2]\r\n # Estimate the data covariance matrix\r\n V = np.diag(res**2/(1-h))\r\n \r\n elif mode.upper() == 'HC3': # Davidson and MacKinnon,(1993),[3]\r\n # Estimate the data covariance matrix\r\n V = np.diag(res/(1-h))**2\r\n \r\n elif mode.upper() == 'HC4': # Cribari-Neto,(2004),[4]\r\n # Compute discount factor\r\n delta = np.minimum(4,n*h/k)\r\n # Estimate the data covariance matrix\r\n V = np.diag(res**2./((1 - h)**delta))\r\n \r\n elif mode.upper() == 'HC5': # Cribari-Neto,(2007),[5]\r\n # Compute inflation factor\r\n k = 0.7\r\n alpha = np.minimum(np.maximum(4,k*max(h)/np.mean(h)),h/np.mean(h))\r\n # Estimate the data covariance matrix\r\n V = np.diag(res**2./(np.sqrt((1 - h)**alpha)))\r\n \r\n else:\r\n raise KeyError('HCCM estimation mode not found.')\r\n\r\n\r\n # Heteroscedasticity Consistent Covariance Matrix (HCCM) estimator\r\n C = np.linalg.pinv(J.T@J)@J.T@V@[email protected](J.T@J)\r\n\r\n return C\r\n#===============================================================================\r\n\r\n\r\n# =================================================================\r\ndef metadata(**kwargs):\r\n \"\"\"\r\n Decorator: Set model metadata as function attributes \r\n \"\"\"\r\n attributes = list(kwargs.keys())\r\n metadata = list(kwargs.values())\r\n\r\n def _setmetadata(func):\r\n for attribute,data in zip(attributes,metadata):\r\n setattr(func,attribute,data)\r\n return func\r\n return _setmetadata\r\n# =================================================================\r\n\r\ndef gsvd(A,B):\r\n#===============================================================================\r\n m,p = A.shape\r\n n = B.shape[0]\r\n\r\n # Economy-sized.\r\n useQA = m > p\r\n useQB = n > p\r\n if useQA:\r\n QA,A = scp.linalg.qr(A)\r\n A = A[0:p,0:p]\r\n QA = QA[:,0:p]\r\n m = p\r\n\r\n if useQB:\r\n QB,B = scp.linalg.qr(B)\r\n B = B[0:p,0:p]\r\n QB = QB[:,0:p]\r\n n = p\r\n\r\n Q,_ = np.linalg.qr(np.vstack((A,B)), mode='reduced')\r\n Q1 = Q[0:m,0:p]\r\n Q2 = Q[m:m+n,0:p]\r\n C,S = csd(Q1,Q2)\r\n\r\n # Vector of generalized singular values.\r\n q = min(m+n,p)\r\n # Supress divide by 0 warning \r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore')\r\n U = np.vstack((np.zeros((q-m,1),'double'), np.diag(C,max(0,q-m)).reshape(len(np.diag(C,max(0,q-m))),1)))/np.vstack((np.diag(S,0).reshape(len(np.diag(S,0)),1), np.zeros((q-n,1),'double') ))\r\n\r\n\r\n return U\r\n#===============================================================================\r\n\r\n\r\ndef csd(Q1,Q2):\r\n#===============================================================================\r\n \"\"\"\r\n Cosine-Sine Decomposition\r\n -------------------------\r\n \r\n Given Q1 and Q2 such that Q1'* Q1 + Q2'* Q2 = I, the\r\n C-S Decomposition is a joint factorization of the form\r\n Q1 = U1*C*V' and Q2=U2*S*V'\r\n where U1,U2,V are orthogonal matrices and C and S are diagonal\r\n matrices (not necessarily square) satisfying\r\n C'* C + S'* S = I\r\n The diagonal entries of C and S are nonnegative and the\r\n diagonal elements of C are in nondecreasing order.\r\n The matrix Q1 cannot have more columns than rows.\r\n\r\n Based on the Octave code by Artiste (submitted by S.J.Leon): \r\n http://www.ar-tiste.com/m-fun/m-fun-index.html\r\n\r\n \"\"\"\r\n m,n = Q1.shape\r\n p,_ = Q2.shape\r\n if m < p:\r\n s,c = csd(Q2,Q1)\r\n j = np.flip(np.arange(n)) \r\n c = c[:,j] \r\n s = s[:,j] \r\n m = np.minimum(m,p) \r\n i = np.flip(np.arange(m)) \r\n c[np.arange(m),:] = c[i,:] \r\n n = np.minimum(n,p) \r\n i = np.flip(np.arange(n)) \r\n s[np.arange(n),:] = s[i,:] \r\n return c,s\r\n\r\n _,sdiag,v = np.linalg.svd(Q1)\r\n c = np.zeros((m, n))\r\n np.fill_diagonal(c, sdiag)\r\n v = v.T.conj()\r\n z = np.eye(n,n)\r\n z = scp.linalg.hankel(z[:,n-1])\r\n c[0:n,:] = z@c[0:n,:]@z\r\n v = v@z\r\n Q2 = Q2@v\r\n k=0\r\n for j in range(1,n):\r\n if c[j,j] <= 1/np.sqrt(2): k=j\r\n b = Q2[:,0:k]\r\n u2,r = np.linalg.qr(b,mode='complete')\r\n s = u2.T@Q2\r\n t = np.minimum(p,n)\r\n tt = np.minimum(m,p)\r\n if k<t:\r\n r2 = s[np.ix_(range(k,p),range(k,t))]\r\n _,sdiag,vt = np.linalg.svd(r2)\r\n ss= np.zeros(r2.shape)\r\n np.fill_diagonal(ss, sdiag)\r\n vt = vt.T.conj()\r\n s[k:p,k:t] = ss\r\n c[:,k:t] = c[:,k:t]@vt\r\n w = c[k:tt,k:t]\r\n z,r = np.linalg.qr(w,mode='complete')\r\n c[k:tt,k:t] = r\r\n for j in range(n):\r\n if c[j,j]<0:\r\n c[j,j] = -c[j,j]\r\n for j in range(t):\r\n if s[j,j]<0:\r\n s[j,j] = -s[j,j]\r\n\r\n return c,s\r\n#===============================================================================\r\n\r\n#===============================================================================\r\ndef diagf(X):\r\n \"\"\"\r\n Diagonal force\r\n\r\n X = diagf(X) zeros all the elements off the main diagonal of X.\r\n \"\"\"\r\n X = np.triu(np.tril(X))\r\n return X\r\n#===============================================================================\r\n\r\n#===============================================================================\r\ndef diagp(Y,X,k):\r\n \"\"\"\r\n DIAGP Diagonal positive.\r\n Y,X = diagp(Y,X,k) scales the columns of Y and the rows of X by\r\n unimodular factors to make the k-th diagonal of X real and positive.\r\n \"\"\"\r\n D = np.diag(X,k)\r\n j = np.where((D.real < 0) | (D.imag != 0))\r\n D = np.diag(np.conj(D[j])/abs(D[j]))\r\n Y[:,j] = Y[:,j]@D.T\r\n X[j,:] = D@X[j,:]\r\n X = X+0 # use \"+0\" to set possible -0 elements to 0\r\n return Y,X\r\n#===============================================================================\r\n\r\n#===============================================================================\r\ndef Jacobian(fcn, x0, lb, ub):\r\n \"\"\" \r\n Finite difference Jacobian estimation \r\n Estimates the Jacobian matrix of a vector-valued function ``fcn`` at the \r\n point ``x0`` taking into consideration box-constraints defined by the lower\r\n and upper bounds ``lb`` and ``ub``.\r\n\r\n This is a wrapper around the ``scipy.optimize._numdiff.approx_derivative`` function.\r\n\r\n \"\"\"\r\n J = opt._numdiff.approx_derivative(fcn,x0,method='2-point',bounds=(lb,ub))\r\n J = np.atleast_2d(J)\r\n return J\r\n#===============================================================================\r\n\r\n#===============================================================================\r\ndef movmean(x, N):\r\n \"\"\"\r\n Moving mean\r\n ===========\r\n\r\n Returns an array of local N-point mean values, where each mean is calculated over a sliding window of length k across neighboring elements of x.\r\n\r\n Usage:\r\n ------\r\n xfilt = movmean(x,N)\r\n\r\n Arguments:\r\n ----------\r\n x (array)\r\n Array to be filtered\r\n N (scalar)\r\n Window size\r\n \r\n Returns:\r\n --------\r\n xfilt (array)\r\n Filtered array\r\n \r\n \"\"\"\r\n xfilt = np.convolve(x, np.ones(N)/N, mode='same')\r\n return xfilt\r\n#===============================================================================\r\n\r\n#===============================================================================\r\ndef ovl(A,B):\r\n \"\"\"\r\n Overlap metric\r\n ==============\r\n\r\n Returns the overlap between two vectors A and B.\r\n\r\n Usage:\r\n ------\r\n metric = ovl(A,B)\r\n\r\n Arguments:\r\n ----------\r\n A (N-element array)\r\n First vector\r\n B (N-element array)\r\n Second vector\r\n \r\n Returns:\r\n --------\r\n metric (array)\r\n Overlap metric\r\n\r\n \"\"\"\r\n A /= np.sum(A)\r\n B /= np.sum(B)\r\n metric = np.sum(np.minimum(A,B))\r\n return metric\r\n#===============================================================================\r\n\r\n\r\ndef isempty(A):\r\n#===============================================================================Q\r\n A = np.atleast_1d(A)\r\n boolean = np.size(A)==0\r\n return boolean\r\n#===============================================================================\r\n\r\n\r\ndef multistarts(n,x0,lb,ub):\r\n#===============================================================================\r\n\r\n if n<0:\r\n raise ValueError('The number of requested starting points must be n>0.') \r\n\r\n if len(x0) != len(lb) or len(x0) != len(ub):\r\n raise ValueError('The lower/upper bound size(s) are not compatible with the initial guess vector x0.') \r\n\r\n # Generate n-1 new starting points within the bounds\r\n if n>1:\r\n x0 = np.linspace(lb,ub,n-1)\r\n else:\r\n x0 = [x0]\r\n return x0\r\n#===============================================================================\r\n",
"import numpy as np\r\nfrom deerlab import mixmodels\r\nfrom deerlab.dd_models import dd_gauss, dd_rice\r\n\r\ndef test_gaussgauss():\r\n# ======================================================================\r\n \"Check the construction of a mixed model of Gaussian-Gaussian\"\r\n\r\n r = np.linspace(2,6,100)\r\n parIn1 = [3, 0.5]\r\n P1 = dd_gauss(r,parIn1)\r\n parIn2 = [4, 0.5]\r\n P2 = dd_gauss(r,parIn2)\r\n P = 0.7*P2 + 0.3*P1\r\n\r\n mixedModel = mixmodels(dd_gauss,dd_gauss)\r\n parInMix = [3, 0.5, 0.3, 4, 0.5, 0.7]\r\n Pmix = mixedModel(r,parInMix)\r\n\r\n assert max(abs(Pmix - P)) < 1e-8\r\n# ======================================================================\r\n\r\n\r\ndef test_gaussrice():\r\n# ======================================================================\r\n \"Check the construction of a mixed model of Gaussian-Gaussian\"\r\n\r\n r = np.linspace(2,6,100)\r\n parIn1 = [3, 0.5]\r\n P1 = dd_gauss(r,parIn1)\r\n parIn2 = [4, 0.5]\r\n P2 = dd_rice(r,parIn2)\r\n P = 0.7*P2 + 0.3*P1\r\n\r\n mixedModel = mixmodels(dd_gauss,dd_rice)\r\n parInMix = [3, 0.5, 0.3, 4, 0.5, 0.7]\r\n Pmix = mixedModel(r,parInMix)\r\n\r\n assert max(abs(Pmix - P)) < 1e-8\r\n# ======================================================================\r\n",
"import numpy as np\r\nfrom deerlab import fftspec\r\n\r\n\r\ndef test_basic():\r\n# ======================================================================\r\n \"Check that the magnitude spectrum can be computed from the real/imag spectra\"\r\n\r\n t = np.linspace(0,1/np.mean(2*10)*100,100)\r\n S = np.exp(-t)*np.cos(2*np.pi*5*t)\r\n\r\n specRef = abs(np.fft.fftshift(np.fft.fft(S,2*len(S))))\r\n _,spec = fftspec(S,t,apodization=False)\r\n\r\n\r\n assert max(abs(specRef - spec)) < 1e-10\r\n# ======================================================================\r\n\r\ndef test_modes():\r\n# ======================================================================\r\n \"Check that the magnitude spectrum can be computed from the real/imag spectra\"\r\n\r\n t = np.linspace(0,1/np.mean(2*10)*100,100)\r\n S = np.exp(-t)*np.cos(2*np.pi*5*t)\r\n\r\n _,specAbs = fftspec(S,t,mode='abs')\r\n _,specRe = fftspec(S,t,mode='real')\r\n _,specIm = fftspec(S,t,mode='imag')\r\n\r\n assert max(np.abs(np.sqrt(specRe**2 + specIm**2) - specAbs)) < 1e-10\r\n# ======================================================================\r\n"
] | [
[
"numpy.diag",
"numpy.minimum",
"numpy.sqrt",
"numpy.linspace",
"numpy.concatenate",
"numpy.mean",
"numpy.fill_diagonal",
"numpy.linalg.qr",
"numpy.where",
"numpy.tril",
"numpy.linalg.svd",
"numpy.arange",
"numpy.eye",
"numpy.atleast_1d",
"numpy.size",
"scipy.linalg.hankel",
"numpy.repeat",
"numpy.zeros",
"scipy.optimize._numdiff.approx_derivative",
"numpy.atleast_2d",
"numpy.sum",
"scipy.linalg.qr",
"numpy.conj",
"numpy.ones",
"numpy.linalg.pinv",
"numpy.shape",
"numpy.vstack"
],
[
"numpy.linspace"
],
[
"numpy.sqrt",
"numpy.exp",
"numpy.mean",
"numpy.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
linxi1158/iMIX | [
"99898de97ef8b45462ca1d6bf2542e423a73d769",
"99898de97ef8b45462ca1d6bf2542e423a73d769",
"99898de97ef8b45462ca1d6bf2542e423a73d769",
"af87a17275f02c94932bb2e29f132a84db812002",
"af87a17275f02c94932bb2e29f132a84db812002",
"99898de97ef8b45462ca1d6bf2542e423a73d769",
"99898de97ef8b45462ca1d6bf2542e423a73d769"
] | [
"imix/data/infocomp/ocrvqa_infocpler.py",
"imix/models/backbones/cagraph_backbone.py",
"imix/data/vqadata/vocabprocessor.py",
"imix/solver/lr_scheduler.py",
"imix/models/vqa_models/vilbert/vilbert.py",
"imix/engine/hooks/periods/tensorboard_logger.py",
"imix/models/losses/triple_logit_binary_cross_entropy.py"
] | [
"import torch\n\nfrom ..utils.stream import ItemFeature\nfrom .base_infocpler import BaseInfoCpler\n\n\nclass OCRVQAInfoCpler(BaseInfoCpler):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n\n def complete_info(self, item_feature: ItemFeature):\n tokens = self.tokenizer.tokenize(item_feature.question.strip())\n tokens = self.tokenizer.get_limited_tokens(tokens, self.max_seq_length - 2)\n tokens, input_lm_label_ids = self.tokenizer.random_mask_tokens(tokens, self.word_mask_ratio)\n tokens = [self._CLS_TOKEN] + tokens + [self._SEP_TOEKN]\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(tokens)\n input_segment = [0] * len(tokens)\n input_lm_label_ids = [-1] * len(tokens)\n # while len(input_ids) < self.max_seq_length:\n # input_ids.append(int(self.pad_idx))\n # input_mask.append(0)\n # input_segment.append(0)\n # input_lm_label_ids.append(-1)\n to_extd_length = self.max_seq_length - len(input_ids)\n self.info_extend(to_extd_length, (input_ids, int(self.pad_idx)), (input_mask, 0), (input_segment, 0),\n (input_lm_label_ids, -1))\n # ocr vectors\n ocr_tokens = self.tokenizer.get_limited_tokens(item_feature.ocr_tokens, self.max_ocr_length)\n item_feature.ocr_vectors_glove = self.get_tokens_glove_vectors(ocr_tokens)\n item_feature.ocr_vectors_order = self.get_tokens_order_vectors(ocr_tokens)\n item_feature.ocr_vectors_phoc = self.get_tokens_phoc_vectors(ocr_tokens)\n item_feature.ocr_vectors_fasttext = self.get_tokens_fasttext_vectors(ocr_tokens)\n\n # ocr features and bboxes\n features_ocr = torch.zeros(\n (self.max_ocr_length,\n item_feature.features_ocr.shape[1] if item_feature.features_ocr is not None else 2048),\n dtype=torch.float)\n bbox_ocr_normalized = torch.zeros(\n (self.max_ocr_length,\n item_feature.ocr_normalized_boxes.shape[1] if item_feature.ocr_normalized_boxes is not None else 4),\n dtype=torch.float)\n if item_feature.features_ocr is not None:\n limit = min(self.max_ocr_length, len(item_feature.features_ocr))\n features_ocr[:limit] = torch.tensor(item_feature.features_ocr[:limit])\n bbox_ocr_normalized[:limit] = torch.tensor(item_feature.ocr_normalized_boxes[:limit])\n item_feature.features_ocr = features_ocr\n item_feature.ocr_normalized_boxes = bbox_ocr_normalized\n\n # features and bboxes\n img_h = item_feature.image_height\n img_w = item_feature.image_width\n item_feature.bbox = self._get_bbox_from_normalized(item_feature.obj_normalized_boxes, img_h, img_w)\n item_feature.bbox_normalized = item_feature.obj_normalized_boxes\n item_feature.bbox_ocr = self._get_bbox_from_normalized(item_feature.ocr_normalized_boxes, img_h, img_w)\n item_feature.bbox_ocr_normalized = item_feature.ocr_normalized_boxes\n\n item_feature.input_ids = torch.tensor(input_ids, dtype=torch.long)\n item_feature.input_mask = torch.tensor(input_mask, dtype=torch.int)\n item_feature.input_segment = torch.tensor(input_segment, dtype=torch.int)\n item_feature.input_lm_label_ids = torch.tensor(input_lm_label_ids, dtype=torch.long)\n item_feature.qa_ids = [self.qa_ans2id[ans] for ans in item_feature.answers if ans in self.qa_ans2id]\n # item_feature.qa_allids = [self.qa_ans2id[ans] for ans in item_feature.all_answers if ans in self.qa_ans2id]\n item_feature.answers_scores = self.compute_answers_scores(torch.Tensor(item_feature.qa_ids))\n return item_feature\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.utils.weight_norm import weight_norm\n\nfrom ..builder import BACKBONES\n\n\[email protected]_module()\nclass CAGRAPH_BACKBONE(nn.Module):\n\n def __init__(self, rnn_type, nlayers, ninp, nhid, dropout):\n super().__init__()\n self.d = dropout\n self.ninp = ninp\n self.nhid = nhid\n self.nlayers = nlayers\n self.rnn_type = rnn_type\n self.neighbourhood_size = 8\n\n self.Wq_1 = nn.Linear(self.nhid, self.nhid) # attention\n self.Wh_1 = nn.Linear(self.nhid, self.nhid)\n self.Wa_1 = nn.Linear(self.nhid, 1)\n\n self.ref_att = FCNet([self.nhid, self.nhid])\n self.Wqt = nn.Linear(self.nhid, 1)\n\n self.ref_att2 = FCNet([self.nhid, self.nhid])\n self.Wqt2 = nn.Linear(self.nhid, 1)\n\n self.ref_att3 = FCNet([self.nhid, self.nhid])\n self.Wqt3 = nn.Linear(self.nhid, 1)\n\n self.W1 = nn.Linear(self.nhid, self.nhid)\n self.W2 = nn.Linear(self.nhid, self.nhid)\n\n self.W3 = nn.Linear(self.nhid * 2, self.nhid)\n self.W4 = nn.Linear(self.ninp, self.nhid)\n self.W5 = nn.Linear(self.nhid * 2, self.nhid)\n\n self.W6 = nn.Linear(self.nhid * 2, self.nhid)\n self.W7 = nn.Linear(self.ninp, self.nhid)\n self.W8 = nn.Linear(self.nhid * 2, self.nhid)\n\n self.W9 = nn.Linear(self.nhid * 2, self.nhid)\n self.W10 = nn.Linear(self.nhid, self.nhid)\n self.W11 = nn.Linear(self.nhid, 1)\n\n self.fc1 = nn.Linear(self.nhid * 4, self.ninp)\n\n def forward(self, ques_feat, his_feat, rcnn_feat, ques_emb, rnd):\n\n L = ques_emb.size(0)\n # history attention ##############################\n ques_emb_1 = self.Wq_1(ques_feat[-1]).view(-1, 1, self.nhid)\n his_emb_1 = self.Wh_1(his_feat).view(-1, rnd, self.nhid)\n atten_emb_1 = F.tanh(his_emb_1 + ques_emb_1.expand_as(his_emb_1))\n his_atten_weight = F.softmax(\n self.Wa_1(F.dropout(atten_emb_1, self.d, training=self.training).view(-1, self.nhid)).view(-1, rnd))\n h_emb = torch.bmm(his_atten_weight.view(-1, 1, rnd), his_feat.view(-1, rnd, self.nhid))\n\n # graph constrution ############################\n graph = torch.cat((rcnn_feat, h_emb.expand_as(rcnn_feat)), dim=2)\n\n # T == 1 #######################################\n # question command #############################\n q_norm = F.normalize(self.ref_att(ques_feat.transpose(0, 1)), p=2, dim=-1)\n at = F.softmax(self.Wqt(F.dropout(q_norm, self.d, training=self.training).view(-1, self.nhid)).view(-1, L))\n q_c = torch.bmm(at.view(-1, 1, L), ques_emb.transpose(0, 1)).squeeze(1)\n # belief_matrix #############################\n mes_b = self.W3(graph) * self.W4(q_c).unsqueeze(1)\n belief_mat = torch.bmm(self.W5(graph), mes_b.transpose(1, 2))\n # belief = F.softmax(belief_mat, dim=2)\n # message passing ###########################\n mes = self.W6(graph) * self.W7(q_c).unsqueeze(1)\n sum_mes = self._create_neighbourhood(mes, belief_mat, self.neighbourhood_size)\n context_1 = self.W8(torch.cat((h_emb.expand_as(rcnn_feat), sum_mes), dim=2))\n graph2 = torch.cat((rcnn_feat, context_1), dim=2)\n\n # T == 2 #######################################\n # question command #############################\n q_norm2 = F.normalize(self.ref_att2(ques_feat.transpose(0, 1)), p=2, dim=-1)\n at2 = F.softmax(self.Wqt2(F.dropout(q_norm2, self.d, training=self.training).view(-1, self.nhid)).view(-1, L))\n q_c2 = torch.bmm(at2.view(-1, 1, L), ques_emb.transpose(0, 1)).squeeze(1)\n # belief_matrix #############################\n mes_b2 = self.W3(graph2) * self.W4(q_c2).unsqueeze(1)\n belief_mat2 = torch.bmm(self.W5(graph2), mes_b2.transpose(1, 2))\n # belief2 = F.softmax(belief_mat2, dim=2)\n # message passing ###########################\n mes2 = self.W6(graph2) * self.W7(q_c2).unsqueeze(1)\n sum_mes2 = self._create_neighbourhood(mes2, belief_mat2, self.neighbourhood_size)\n context_2 = self.W8(torch.cat((context_1, sum_mes2), dim=2))\n graph3 = torch.cat((rcnn_feat, context_2), dim=2)\n\n # T == 3 #######################################\n # question command #############################\n q_norm3 = F.normalize(self.ref_att3(ques_feat.transpose(0, 1)), p=2, dim=-1)\n at3 = F.softmax(self.Wqt3(F.dropout(q_norm3, self.d, training=self.training).view(-1, self.nhid)).view(-1, L))\n q_c3 = torch.bmm(at3.view(-1, 1, L), ques_emb.transpose(0, 1)).squeeze(1)\n # belief_matrix #############################\n mes_b3 = self.W3(graph3) * self.W4(q_c3).unsqueeze(1)\n belief_mat3 = torch.bmm(self.W5(graph3), mes_b3.transpose(1, 2))\n # belief3 = F.softmax(belief_mat3, dim=2)\n # message passing ###########################\n mes3 = self.W6(graph3) * self.W7(q_c3).unsqueeze(1)\n sum_mes3 = self._create_neighbourhood(mes3, belief_mat3, self.neighbourhood_size)\n context_3 = self.W8(torch.cat((context_2, sum_mes3), dim=2))\n graph4 = torch.cat((rcnn_feat, context_3), dim=2)\n\n # Graph Attention ##############################\n g2_emb = self.W9(graph4).view(-1, 36, self.nhid)\n q_emb = self.W10(ques_feat[-1]).view(-1, 1, self.nhid)\n att_gq_emb = F.tanh(g2_emb + q_emb.expand_as(g2_emb))\n graph_att = F.softmax(\n self.W11(F.dropout(att_gq_emb, self.d, training=self.training).view(-1, self.nhid)).view(-1,\n 36)).unsqueeze(1)\n graph_emb = torch.bmm(graph_att, graph4)\n\n # Multi-modal Fusion ############################\n concat_feat = torch.cat(\n (graph_emb.view(-1, 2 * self.nhid), ques_feat[-1].view(-1, self.nhid), h_emb.view(-1, self.nhid)), 1)\n final_feat = F.tanh(self.fc1(F.dropout(concat_feat, self.d, training=self.training)))\n\n return final_feat\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n if self.rnn_type == 'LSTM':\n return (Variable(weight.new(self.nlayers, bsz,\n self.nhid).zero_()), Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))\n else:\n return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())\n\n def _create_neighbourhood_mes(self, message, top_ind):\n \"\"\"## Inputs:\n\n - message (batch_size, K, feat_dim)\n - top_ind (batch_size, K, neighbourhood_size)\n ## Returns:\n - neighbourhood_message (batch_size, K, neighbourhood_size, feat_dim)\n \"\"\"\n\n batch_size = message.size(0)\n K = message.size(1)\n feat_dim = message.size(2)\n neighbourhood_size = top_ind.size(-1)\n message = message.unsqueeze(1).expand(batch_size, K, K, feat_dim)\n idx = top_ind.unsqueeze(-1).expand(batch_size, K, neighbourhood_size, feat_dim)\n return torch.gather(message, dim=2, index=idx)\n\n def _create_neighbourhood(self, message, belief_matrix, neighbourhood_size):\n \"\"\"Creates a neighbourhood system for each graph node/image object.\n\n ## Inputs:\n - message (batch_size, K, feat_dim): input message features\n - adjacency_matrix (batch_size, K, K): learned adjacency matrix\n - neighbourhood_size (int)\n - weight (bool): specify if the features should be weighted by the adjacency matrix values\n\n ## Returns:\n - sum_messages (batch_size, K, neighbourhood_size, feat_dim)\n \"\"\"\n\n # Number of graph nodes\n K = message.size(1)\n # pdb.set_trace()\n\n # extract top k neighbours for each node and normalise\n top_k, top_ind = torch.topk(belief_matrix, k=neighbourhood_size, dim=-1, sorted=False)\n top_k = torch.stack([F.softmax(top_k[:, k])\n for k in range(K)]).transpose(0, 1) # (batch_size, K, neighbourhood_size)\n\n # extract top k features\n neighbourhood_mes = self._create_neighbourhood_mes(message, top_ind)\n\n sum_mes = torch.sum(top_k.unsqueeze(-1) * neighbourhood_mes, dim=2)\n\n return sum_mes\n\n\nclass FCNet(nn.Module):\n \"\"\"Simple class for non-linear fully connect network.\"\"\"\n\n def __init__(self, dims, dropout=0.2):\n super(FCNet, self).__init__()\n\n layers = []\n for i in range(len(dims) - 2):\n in_dim = dims[i]\n out_dim = dims[i + 1]\n if 0 < dropout:\n layers.append(nn.Dropout(dropout))\n layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))\n layers.append(nn.Tanh())\n if 0 < dropout:\n layers.append(nn.Dropout(dropout))\n layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))\n layers.append(nn.Sigmoid())\n\n self.main = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.main(x)\n",
"import torch\n\nfrom ..builder import VOCAB\nfrom .baseprocessor import BaseProcessor\n\n\[email protected]_module()\nclass VocabProcessor(BaseProcessor):\n \"\"\"Use VocabProcessor when you have vocab file and you want to process\n words to indices. Expects UNK token as \"<unk>\" and pads sentences using\n \"<pad>\" token. Config parameters can have ``preprocessor`` property which\n is used to preprocess the item passed and ``max_length`` property which\n points to maximum length of the sentence/tokens which can be convert to\n indices. If the length is smaller, the sentence will be padded. Parameters\n for \"vocab\" are necessary to be passed.\n\n **Key**: vocab\n\n Example Config::\n\n task_attributes:\n vqa:\n vqa2:\n processors:\n text_processor:\n type: vocab\n params:\n max_length: 14\n vocab:\n type: intersected\n embedding_name: glove.6B.300d\n vocab_file: vocabs/vocabulary_100k.txt\n\n Args:\n config (DictConfig): node containing configuration parameters of\n the processor\n\n Attributes:\n vocab (Vocab): Vocab class object which is abstraction over the vocab\n file passed.\n \"\"\"\n\n MAX_LENGTH_DEFAULT = 50\n PAD_TOKEN = '<pad>'\n PAD_INDEX = 0\n\n def __init__(self,\n vocab=dict(\n type='IntersectedVocab',\n vocab_file='textvqa/defaults/extras/vocabs/vocabulary_100k.txt',\n embedding_name='glove.6B.300d'),\n preprocessor=dict(type='SimpleSentenceProcessor'),\n *args,\n **kwargs):\n\n # self.vocab = Vocab(*args, **config.vocab, **kwargs)\n # self.vocab = build_vocab(vocab)\n self.vocab = None\n self.max_length = self.MAX_LENGTH_DEFAULT\n # self.preprocessor = build_preprocessor(preprocessor)\n self.preprocessor = None\n\n # self._init_extras(config)\n\n # def _init_extras(self, config, *args, **kwargs):\n # self.writer = registry.get(\"writer\")\n # self.preprocessor = None\n #\n # if hasattr(config, \"max_length\"):\n # self.max_length = config.max_length\n # else:\n # warnings.warn(\n # \"No 'max_length' parameter in Processor's \"\n # \"configuration. Setting to {}.\".format(self.MAX_LENGTH_DEFAULT)\n # )\n # self.max_length = self.MAX_LENGTH_DEFAULT\n #\n # if \"preprocessor\" in config:\n # self.preprocessor = Processor(config.preprocessor, *args, **kwargs)\n #\n # if self.preprocessor is None:\n # raise ValueError(\n # f\"No text processor named {config.preprocessor} is defined.\"\n # )\n\n def __call__(self, item):\n \"\"\"Call requires item to have either \"tokens\" attribute or either\n \"text\" attribute. If \"text\" is present, it will tokenized using the\n preprocessor.\n\n Args:\n item (Dict): Dict containing the \"text\" or \"tokens\".\n\n Returns:\n Dict: Dict containing indices in \"text\" key, \"tokens\" in \"tokens\"\n key and \"length\" of the string in \"length\" key.\n \"\"\"\n indices = None\n if not isinstance(item, dict):\n raise TypeError('Argument passed to the processor must be '\n \"a dict with either 'text' or 'tokens' as \"\n 'keys')\n if 'tokens' in item:\n tokens = item['tokens']\n indices = self._map_strings_to_indices(item['tokens'])\n elif 'text' in item:\n if self.preprocessor is None:\n raise AssertionError('If tokens are not provided, a text ' 'processor must be defined in the config')\n\n tokens = self.preprocessor({'text': item['text']})['text']\n indices = self._map_strings_to_indices(tokens)\n else:\n raise AssertionError(\"A dict with either 'text' or 'tokens' keys \" 'must be passed to the processor')\n\n tokens, length = self._pad_tokens(tokens)\n\n return {'text': indices, 'tokens': tokens, 'length': length}\n\n def _pad_tokens(self, tokens):\n padded_tokens = [self.PAD_TOKEN] * self.max_length\n token_length = min(len(tokens), self.max_length)\n padded_tokens[:token_length] = tokens[:token_length]\n token_length = torch.tensor(token_length, dtype=torch.long)\n return padded_tokens, token_length\n\n def get_pad_index(self):\n \"\"\"Get index of padding <pad> token in vocabulary.\n\n Returns:\n int: index of the padding token.\n \"\"\"\n return self.vocab.get_pad_index()\n\n def get_vocab_size(self):\n \"\"\"Get size of the vocabulary.\n\n Returns:\n int: size of the vocabulary.\n \"\"\"\n return self.vocab.get_size()\n\n def _map_strings_to_indices(self, tokens):\n length = min(len(tokens), self.max_length)\n tokens = tokens[:length]\n\n output = torch.zeros(self.max_length, dtype=torch.long)\n output.fill_(self.vocab.get_pad_index())\n\n for idx, token in enumerate(tokens):\n output[idx] = self.vocab.stoi[token]\n\n return output\n",
"import math\nfrom bisect import bisect_right, bisect\nfrom typing import List\nfrom functools import lru_cache\n\nimport torch\nfrom .builder import LR_SCHEDULERS\nfrom torch.optim.lr_scheduler import LambdaLR, _LRScheduler\nfrom .optimization import BertAdam\nimport imix.utils.distributed_info as comm\nimport logging\nfrom transformers.optimization import (\n get_constant_schedule,\n get_constant_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n get_cosine_schedule_with_warmup,\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n)\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupMultiStepLR(_LRScheduler):\n\n def __init__(\n self,\n optimizer: torch.optim.Optimizer,\n milestones: List[int],\n *,\n gamma: float = 0.1,\n warmup_factor: float = 0.001,\n warmup_iters: int = 1000,\n warmup_method: str = 'linear',\n last_epoch: int = -1,\n ):\n if not list(milestones) == sorted(milestones):\n raise ValueError('Milestones should be a list of' ' increasing integers. Got {}', milestones)\n\n self.milestones = milestones\n self.gamma = gamma\n self.warmup_factor = warmup_factor\n self.warmup_iters = warmup_iters\n self.warmup_method = warmup_method\n\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self) -> List[float]:\n warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_iters,\n self.warmup_factor)\n\n @lru_cache\n def calculate_lr(base_lr):\n return base_lr * warmup_factor * self.gamma**bisect_right(self.milestones, self.last_epoch)\n\n return [calculate_lr(base_lr) for base_lr in self.base_lrs]\n\n def _compute_values(self) -> List[float]:\n return self.get_lr()\n\n\n@LR_SCHEDULERS.register_module()\nclass ReduceOnPlateauSchedule(torch.optim.lr_scheduler.ReduceLROnPlateau):\n\n def __init__(self, optimizer: torch.optim.Optimizer, **kwargs):\n self.factor = kwargs['factor']\n self.mode = kwargs['mode']\n self.patience = kwargs['patience']\n self.verbose = kwargs['verbose']\n self.cooldown = kwargs['cooldown']\n super().__init__(\n optimizer,\n mode=self.mode,\n factor=self.factor,\n patience=self.patience,\n verbose=self.verbose,\n cooldown=self.cooldown)\n\n def get_lr(self):\n return self.get_last_lr()\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupCosineLR(_LRScheduler):\n\n def __init__(\n self,\n optimizer: torch.optim.Optimizer,\n max_iters: int,\n *,\n warmup_factor: float = 0.001,\n warmup_iters: int = 1000,\n warmup_method: str = 'linear',\n last_epoch: int = -1,\n ):\n self.max_iters = max_iters\n self.warmup_factor = warmup_factor\n self.warmup_iters = warmup_iters\n self.warmup_method = warmup_method\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self) -> List[float]:\n warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_iters,\n self.warmup_factor)\n\n @lru_cache\n def calculate_lr(base_lr):\n return base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))\n\n return [calculate_lr(base_lr) for base_lr in self.base_lrs]\n\n def _compute_values(self) -> List[float]:\n return self.get_lr()\n\n\n@LR_SCHEDULERS.register_module()\nclass PythiaScheduler(LambdaLR):\n\n def __init__(self, optimizer, *args, **kwargs):\n self._lambda_func = lr_lambda_update\n\n super().__init__(optimizer, self.lr_lambda, *args, **kwargs)\n\n def lr_lambda(self, step):\n return self._lambda_func(step, self._global_config)\n\n\n@LR_SCHEDULERS.register_module()\nclass MultiStepScheduler(PythiaScheduler):\n\n def __init__(self, optimizer, *args, **kwargs):\n self.use_warmup = kwargs['use_warmup']\n self.lr_steps = kwargs['lr_steps']\n self.lr_ratio = kwargs['lr_ratio']\n self.warmup_iterations = kwargs['warmup_iterations'] if self.use_warmup else 0\n self.warmup_factor = kwargs['warmup_factor']\n assert self.warmup_iterations < self.lr_steps[0]\n super().__init__(optimizer)\n\n def get_lr(self):\n if self.last_epoch <= self.warmup_iterations and self.use_warmup is True:\n alpha = float(self.last_epoch) / float(self.warmup_iterations)\n lr_ratio = self.warmup_factor * (1.0 - alpha) + alpha\n\n return [base_lr * lr_ratio for base_lr in self.base_lrs]\n else:\n\n @lru_cache\n def calculate_lr(base_lr):\n return base_lr * self.lr_ratio**bisect_right(self.lr_steps, self.last_epoch)\n\n return [calculate_lr(base_lr) for base_lr in self.base_lrs]\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupLinearScheduleNonZero(_LRScheduler):\n \"\"\"Linear warmup and then linear decay. Linearly increases learning rate\n from 0 to max_lr over `warmup_steps` training steps.\n\n Linearly decreases learning rate linearly to min_lr over remaining `t_total - warmup_steps` steps.\n \"\"\"\n\n def __init__(self, optimizer, t_total, warmup_iterations=0, use_warmup=False, min_lr=1e-5, last_epoch=-1):\n self.use_warmup = use_warmup\n self.warmup_iters = warmup_iterations\n self.t_total = t_total\n self.min_lr = min_lr\n super(WarmupLinearScheduleNonZero, self).__init__(optimizer, last_epoch=last_epoch)\n\n def get_lr(self):\n step = self.last_epoch\n if step < self.warmup_iters:\n lr_factor = float(step) / float(max(1, self.warmup_iters))\n else:\n lr_factor = max(0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_iters)))\n\n return [\n base_lr * lr_factor if (base_lr * lr_factor) > self.min_lr else self.min_lr for base_lr in self.base_lrs\n ]\n\n\ndef _get_warmup_factor_at_iter(method: str, iter: int, warmup_iters: int, warmup_factor: float) -> float:\n \"\"\"Return the learning rate warmup factor at a specific iteration. See\n :paper:`in1k1h` for more details.\n\n Args:\n method (str): warmup method; either \"constant\" or \"linear\".\n iter (int): iteration at which to calculate the warmup factor.\n warmup_iters (int): the number of warmup iterations.\n warmup_factor (float): the base warmup factor (the meaning changes according\n to the method used).\n\n Returns:\n float: the effective warmup factor at the given iteration.\n \"\"\"\n if iter >= warmup_iters:\n return 1.0\n support_method = ['constant', 'linear']\n\n def constant_method():\n return warmup_factor\n\n def linear_method():\n alpha = iter / warmup_iters\n return warmup_factor * (1 - alpha) + alpha\n\n if method in support_method:\n return eval(method + '_method')()\n else:\n raise ValueError('Unknown warmup method: {}'.format(method))\n\n\ndef lr_lambda_update(i_iter, cfg):\n if cfg.training.use_warmup is True and i_iter <= cfg.training.warmup_iterations:\n alpha = float(i_iter) / float(cfg.training.warmup_iterations)\n return cfg.training.warmup_factor * (1.0 - alpha) + alpha\n else:\n idx = bisect(cfg.training.lr_steps, i_iter)\n return pow(cfg.training.lr_ratio, idx)\n\n\ndef warmup_cosine(x, warmup=0.002):\n if x < warmup:\n return x / warmup\n return 0.5 * (1.0 + torch.cos(math.pi * x))\n\n\ndef warmup_constant(x, warmup=0.002):\n \"\"\"Linearly increases learning rate over `warmup`*`t_total` (as provided to\n BertAdam) training steps.\n\n Learning rate is 1. afterwards.\n \"\"\"\n if x < warmup:\n return x / warmup\n return 1.0\n\n\ndef warmup_linear(x, warmup=0.002):\n \"\"\"Specifies a triangular learning rate schedule where peak is reached at\n `warmup`*`t_total`-th (as provided to BertAdam) training step.\n\n After `t_total`-th training step, learning rate is zero.\n \"\"\"\n if x < warmup:\n return x / warmup\n return max((x - 1.) / (warmup - 1.), 0)\n\n\nSCHEDULES = {\n 'warmup_cosine': warmup_cosine,\n 'warmup_constant': warmup_constant,\n 'warmup_linear': warmup_linear,\n}\n\n\n@LR_SCHEDULERS.register_module()\nclass BertWarmupLinearLR(torch.optim.lr_scheduler._LRScheduler):\n \"\"\"Implements BERT version of Warmup Linear lr algorithm\n Params:\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n \"\"\"\n\n def __init__(\n self,\n optimizer: BertAdam,\n max_iters: int,\n warmup: float = -1,\n warmup_method: str = 'warmup_linear',\n last_epoch: int = -1,\n ):\n if warmup_method not in SCHEDULES:\n raise ValueError('Invalid schedule parameter: {}'.format(warmup_method))\n if not 0.0 <= warmup < 1.0 and not warmup == -1:\n raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))\n\n self.max_iters = max_iters\n self.warmup = warmup\n self.warmup_method = warmup_method\n self.warned_for_t_total = False\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self) -> List[float]:\n if self.max_iters != -1:\n if comm.is_main_process():\n logger = logging.getLogger(__name__)\n\n schedule_fct = SCHEDULES[self.warmup_method]\n progress = self.last_epoch / self.max_iters\n lr_cur = [base_lr * schedule_fct(progress, self.warmup) for base_lr in self.base_lrs]\n # warning for exceeding t_total (only active with warmup_linear\n if self.warmup_method == 'warmup_linear' and progress > 1. and not self.warned_for_t_total:\n if comm.is_main_process():\n logger.info(\n \"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. \"\n \"Please set 't_total' of {} correctly.\".format(self.warmup_method, lr_cur,\n self.__class__.__name__))\n self.warned_for_t_total = True\n # end warning\n else:\n lr_cur = [base_lr for base_lr in self.base_lrs]\n\n # Different definitions of half-cosine with warmup are possible. For\n # simplicity we multiply the standard half-cosine schedule by the warmup\n # factor. An alternative is to start the period of the cosine at warmup_iters\n # instead of at 0. In the case that warmup_iters << max_iters the two are\n # very close to each other.\n return lr_cur\n\n def _compute_values(self) -> List[float]:\n # The new interface\n return self.get_lr()\n\n\n@LR_SCHEDULERS.register_module()\nclass ConstantSchedule(LambdaLR):\n\n def __new__(cls, optimizer, *args, **kwargs):\n return get_constant_schedule(optimizer, *args, **kwargs)\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupConstantSchedule(LambdaLR):\n\n def __new__(cls, optimizer, *args, **kwargs):\n return get_constant_schedule_with_warmup(optimizer, *args, **kwargs)\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupLinearSchedule(LambdaLR):\n \"\"\"Linear warmup and then linear decay. Linearly increases learning rate\n from 0 to 1 over `warmup_steps` training steps.\n\n Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.\n \"\"\"\n\n def __new__(cls, optimizer, *args, **kwargs):\n return get_linear_schedule_with_warmup(optimizer, *args, **kwargs)\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupCosineSchedule(LambdaLR):\n\n def __new__(cls, optimizer, *args, **kwargs):\n return get_cosine_schedule_with_warmup(optimizer, *args, **kwargs)\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupCosineWithHardRestartsSchedule(LambdaLR):\n\n def __new__(cls, optimizer, *args, **kwargs):\n return get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, *args, **kwargs)\n\n\n@LR_SCHEDULERS.register_module()\nclass WarmupPolynomialSchedule(LambdaLR):\n\n def __new__(cls, optimizer, *args, **kwargs):\n return get_polynomial_decay_schedule_with_warmup(optimizer, *args, **kwargs)\n",
"import copy\nimport logging\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nimport torch.nn.functional as F\nfrom transformers.modeling_bert import (\n BertConfig,\n BertPreTrainedModel,\n BertPredictionHeadTransform,\n BertPooler,\n BertLayer,\n BertIntermediate,\n BertOutput,\n # BertLMPredictionHead,\n # BertEmbeddings,\n # BertEncoder,\n # BertAttention,\n # BertSelfAttention,\n # BertSelfOutput,\n ACT2FN,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass GeLU(nn.Module):\n \"\"\"Implementation of the gelu activation function. For information: OpenAI\n GPT's gelu is slightly different (and gives slightly different results):\n\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return ACT2FN['gelu'](x)\n\n\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept ImportError:\n logger.info('Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .')\n\n class BertLayerNorm(nn.Module):\n\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the\n square root).\"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type\n embeddings.\"\"\"\n\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n\n self.task_specific_tokens = config.task_specific_tokens\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n if self.task_specific_tokens:\n self.task_embeddings = nn.Embedding(20, config.hidden_size)\n\n def forward(self, input_ids, token_type_ids=None, task_ids=None, position_ids=None):\n\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n\n if self.task_specific_tokens:\n task_embeddings = self.task_embeddings(task_ids)\n embeddings = torch.cat([embeddings[:, 0:1], task_embeddings, embeddings[:, 1:]], dim=1)\n\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n\n return embeddings\n\n\nclass RobertaEmbeddings(BertEmbeddings):\n \"\"\"Same as BertEmbeddings with a tiny tweak for positional embeddings\n indexing.\"\"\"\n\n def __init__(self, config):\n super(RobertaEmbeddings, self).__init__(config)\n self.padding_idx = 1\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n seq_length = input_ids.size(1)\n if position_ids is None:\n # Position numbers begin at padding_idx+1. Padding symbols are ignored.\n # cf. fairseq's `utils.make_positions`\n position_ids = torch.arange(\n self.padding_idx + 1,\n seq_length + self.padding_idx + 1,\n dtype=torch.long,\n device=input_ids.device,\n )\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n return super(RobertaEmbeddings, self).forward(\n input_ids, token_type_ids=token_type_ids, position_ids=position_ids)\n\n\nclass BertBiAttention(nn.Module):\n\n def __init__(self, config):\n super(BertBiAttention, self).__init__()\n if config.bi_hidden_size % config.bi_num_attention_heads != 0:\n raise ValueError('The hidden size (%d) is not a multiple of the number of attention '\n 'heads (%d)' % (config.bi_hidden_size, config.bi_num_attention_heads))\n\n self.visualization = config.visualization\n self.num_attention_heads = config.bi_num_attention_heads\n self.attention_head_size = int(config.bi_hidden_size / config.bi_num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n # self.scale = nn.Linear(1, self.num_attention_heads, bias=False)\n # self.scale_act_fn = ACT2FN['relu']\n v_config = BertConfig.from_dict(config.v_config)\n\n self.query1 = nn.Linear(v_config.hidden_size, self.all_head_size)\n self.key1 = nn.Linear(v_config.hidden_size, self.all_head_size)\n self.value1 = nn.Linear(v_config.hidden_size, self.all_head_size)\n # self.logit1 = nn.Linear(config.hidden_size, self.num_attention_heads)\n self.dropout1 = nn.Dropout(v_config.attention_probs_dropout_prob)\n\n t_config = BertConfig.from_dict(config.t_config)\n self.query2 = nn.Linear(t_config.hidden_size, self.all_head_size)\n self.key2 = nn.Linear(t_config.hidden_size, self.all_head_size)\n self.value2 = nn.Linear(t_config.hidden_size, self.all_head_size)\n # self.logit2 = nn.Linear(config.hidden_size, self.num_attention_heads)\n self.dropout2 = nn.Dropout(t_config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (\n self.num_attention_heads,\n self.attention_head_size,\n )\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n input_tensor1,\n attention_mask1,\n input_tensor2,\n attention_mask2,\n co_attention_mask=None,\n use_co_attention_mask=False,\n ):\n\n # for vision input.\n mixed_query_layer1 = self.query1(input_tensor1)\n mixed_key_layer1 = self.key1(input_tensor1)\n mixed_value_layer1 = self.value1(input_tensor1)\n # mixed_logit_layer1 = self.logit1(input_tensor1)\n\n query_layer1 = self.transpose_for_scores(mixed_query_layer1)\n key_layer1 = self.transpose_for_scores(mixed_key_layer1)\n value_layer1 = self.transpose_for_scores(mixed_value_layer1)\n # logit_layer1 = self.transpose_for_logits(mixed_logit_layer1)\n\n # for text input:\n mixed_query_layer2 = self.query2(input_tensor2)\n mixed_key_layer2 = self.key2(input_tensor2)\n mixed_value_layer2 = self.value2(input_tensor2)\n # mixed_logit_layer2 = self.logit2(input_tensor2)\n\n query_layer2 = self.transpose_for_scores(mixed_query_layer2)\n key_layer2 = self.transpose_for_scores(mixed_key_layer2)\n value_layer2 = self.transpose_for_scores(mixed_value_layer2)\n # logit_layer2 = self.transpose_for_logits(mixed_logit_layer2)\n\n # Take the dot product between \"query2\" and \"key1\" to get the raw attention scores for value 1.\n attention_scores1 = torch.matmul(query_layer2, key_layer1.transpose(-1, -2))\n attention_scores1 = attention_scores1 / math.sqrt(self.attention_head_size)\n attention_scores1 = attention_scores1 + attention_mask1\n # if use_co_attention_mask:\n # attention_scores1 = attention_scores1 + co_attention_mask.permute(0,1,3,2)\n\n # Normalize the attention scores to probabilities.\n attention_probs1 = nn.Softmax(dim=-1)(attention_scores1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs1 = self.dropout1(attention_probs1)\n\n context_layer1 = torch.matmul(attention_probs1, value_layer1)\n context_layer1 = context_layer1.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape1 = context_layer1.size()[:-2] + (self.all_head_size, )\n context_layer1 = context_layer1.view(*new_context_layer_shape1)\n\n # Take the dot product between \"query1\" and \"key2\" to get the raw attention scores for value 2.\n attention_scores2 = torch.matmul(query_layer1, key_layer2.transpose(-1, -2))\n attention_scores2 = attention_scores2 / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n\n # we can comment this line for single flow.\n attention_scores2 = attention_scores2 + attention_mask2\n # if use_co_attention_mask:\n # attention_scores2 = attention_scores2 + co_attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs2 = nn.Softmax(dim=-1)(attention_scores2)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs2 = self.dropout2(attention_probs2)\n\n context_layer2 = torch.matmul(attention_probs2, value_layer2)\n context_layer2 = context_layer2.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape2 = context_layer2.size()[:-2] + (self.all_head_size, )\n context_layer2 = context_layer2.view(*new_context_layer_shape2)\n\n attn_data = None\n\n if self.visualization:\n attn_data = {\n 'attn1': attention_probs1,\n 'queries1': query_layer2,\n 'keys1': key_layer1,\n 'attn2': attention_probs2,\n 'querues2': query_layer1,\n 'keys2': key_layer2,\n }\n\n return context_layer1, context_layer2, attn_data\n\n\nclass BertBiOutput(nn.Module):\n\n def __init__(self, config):\n super(BertBiOutput, self).__init__()\n\n v_config = BertConfig.from_dict(config.v_config)\n\n self.dense1 = nn.Linear(config.bi_hidden_size, v_config.hidden_size)\n self.LayerNorm1 = BertLayerNorm(v_config.hidden_size, eps=1e-12)\n self.dropout1 = nn.Dropout(v_config.hidden_dropout_prob)\n\n # self.q_dense1 = nn.Linear(config.bi_hidden_size, v_config.hidden_size)\n # self.q_dropout1 = nn.Dropout(v_config.hidden_dropout_prob)\n\n t_config = BertConfig.from_dict(config.t_config)\n\n self.dense2 = nn.Linear(config.bi_hidden_size, t_config.hidden_size)\n self.LayerNorm2 = BertLayerNorm(t_config.hidden_size, eps=1e-12)\n self.dropout2 = nn.Dropout(t_config.hidden_dropout_prob)\n\n # self.q_dense2 = nn.Linear(config.bi_hidden_size, t_config.hidden_size)\n # self.q_dropout2 = nn.Dropout(t_config.hidden_dropout_prob)\n\n def forward(self, hidden_states1, input_tensor1, hidden_states2, input_tensor2):\n\n context_state1 = self.dense1(hidden_states1)\n context_state1 = self.dropout1(context_state1)\n\n context_state2 = self.dense2(hidden_states2)\n context_state2 = self.dropout2(context_state2)\n\n hidden_states1 = self.LayerNorm1(context_state1 + input_tensor1)\n hidden_states2 = self.LayerNorm2(context_state2 + input_tensor2)\n\n return hidden_states1, hidden_states2\n\n\nclass BertConnectionLayer(nn.Module):\n\n def __init__(self, config):\n super(BertConnectionLayer, self).__init__()\n self.biattention = BertBiAttention(config)\n self.biOutput = BertBiOutput(config)\n\n v_config = BertConfig.from_dict(config.v_config)\n self.v_intermediate = BertIntermediate(v_config)\n self.v_output = BertOutput(v_config)\n\n t_config = BertConfig.from_dict(config.t_config)\n self.t_intermediate = BertIntermediate(t_config)\n self.t_output = BertOutput(t_config)\n\n def forward(\n self,\n input_tensor1,\n attention_mask1,\n input_tensor2,\n attention_mask2,\n co_attention_mask=None,\n use_co_attention_mask=False,\n ):\n\n bi_output1, bi_output2, co_attention_probs = self.biattention(\n input_tensor1,\n attention_mask1,\n input_tensor2,\n attention_mask2,\n co_attention_mask,\n use_co_attention_mask,\n )\n\n attention_output1, attention_output2 = self.biOutput(bi_output2, input_tensor1, bi_output1, input_tensor2)\n\n intermediate_output1 = self.v_intermediate(attention_output1)\n layer_output1 = self.v_output(intermediate_output1, attention_output1)\n\n intermediate_output2 = self.t_intermediate(attention_output2)\n layer_output2 = self.t_output(intermediate_output2, attention_output2)\n\n return layer_output1, layer_output2, co_attention_probs\n\n\nclass BertEncoder(nn.Module):\n\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n\n # in the bert encoder, we need to extract three things here.\n # text bert layer: BertLayer\n # vision bert layer: BertImageLayer\n # Bi-Attention: Given the output of two bertlayer, perform bi-directional\n # attention and add on two layers.\n t_config = BertConfig.from_dict(config.t_config)\n v_config = BertConfig.from_dict(config.v_config)\n\n self.FAST_MODE = config.fast_mode\n self.with_coattention = config.with_coattention\n self.v_biattention_id = v_config.biattention_id\n self.t_biattention_id = t_config.biattention_id\n self.in_batch_pairs = config.in_batch_pairs\n self.fixed_t_layer = config.fixed_t_layer\n self.fixed_v_layer = config.fixed_v_layer\n\n # layer = BertLayer(config)\n layer = BertLayer(t_config)\n v_layer = BertLayer(v_config)\n connect_layer = BertConnectionLayer(config)\n\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(t_config.num_hidden_layers)])\n self.v_layer = nn.ModuleList([copy.deepcopy(v_layer) for _ in range(v_config.num_hidden_layers)])\n self.c_layer = nn.ModuleList([copy.deepcopy(connect_layer) for _ in range(len(v_config.biattention_id))])\n\n def forward(\n self,\n txt_embedding,\n image_embedding,\n txt_attention_mask,\n txt_attention_mask2,\n image_attention_mask,\n co_attention_mask=None,\n output_all_encoded_layers=True,\n output_all_attention_masks=False,\n ):\n\n v_start = 0\n t_start = 0\n count = 0\n all_encoder_layers_t = []\n all_encoder_layers_v = []\n\n all_attention_mask_t = []\n all_attnetion_mask_v = []\n all_attention_mask_c = []\n\n batch_size, num_words, t_hidden_size = txt_embedding.size()\n _, num_regions, v_hidden_size = image_embedding.size()\n\n use_co_attention_mask = False\n for v_layer_id, t_layer_id in zip(self.v_biattention_id, self.t_biattention_id):\n\n v_end = v_layer_id\n t_end = t_layer_id\n\n assert self.fixed_t_layer <= t_end\n assert self.fixed_v_layer <= v_end\n\n for idx in range(t_start, self.fixed_t_layer):\n with torch.no_grad():\n outputs = self.layer[idx](txt_embedding, txt_attention_mask)\n txt_embedding, txt_attention_probs = outputs[0], outputs[1:]\n t_start = self.fixed_t_layer\n if output_all_attention_masks:\n all_attention_mask_t.append(txt_attention_probs)\n\n for idx in range(t_start, t_end):\n outputs = self.layer[idx](txt_embedding, txt_attention_mask)\n txt_embedding, txt_attention_probs = outputs[0], outputs[1:]\n if output_all_attention_masks:\n all_attention_mask_t.append(txt_attention_probs)\n\n for idx in range(v_start, self.fixed_v_layer):\n with torch.no_grad():\n outputs = self.v_layer[idx](\n image_embedding,\n image_attention_mask,\n )\n image_embedding, image_attention_probs = outputs[0], outputs[1:]\n\n v_start = self.fixed_v_layer\n\n if output_all_attention_masks:\n all_attnetion_mask_v.append(image_attention_probs)\n\n for idx in range(v_start, v_end):\n outputs = self.v_layer[idx](\n image_embedding,\n image_attention_mask,\n )\n image_embedding, image_attention_probs = outputs[0], outputs[1:]\n\n if output_all_attention_masks:\n all_attnetion_mask_v.append(image_attention_probs)\n\n if count == 0 and self.in_batch_pairs:\n # new batch size is the batch_size ^2\n image_embedding = image_embedding.unsqueeze(0).expand(batch_size, batch_size, num_regions,\n v_hidden_size).contiguous().view(\n batch_size * batch_size, num_regions,\n v_hidden_size)\n image_attention_mask = image_attention_mask.unsqueeze(0).expand(batch_size, batch_size, 1, 1,\n num_regions).contiguous().view(\n batch_size * batch_size, 1, 1,\n num_regions)\n\n txt_embedding = txt_embedding.unsqueeze(1).expand(batch_size, batch_size, num_words,\n t_hidden_size).contiguous().view(\n batch_size * batch_size, num_words, t_hidden_size)\n txt_attention_mask = txt_attention_mask.unsqueeze(1).expand(\n batch_size, batch_size, 1, 1, num_words).contiguous().view(batch_size * batch_size, 1, 1, num_words)\n co_attention_mask = co_attention_mask.unsqueeze(1).expand(batch_size, batch_size, 1, num_regions,\n num_words).contiguous().view(\n batch_size * batch_size, 1, num_regions,\n num_words)\n\n if count == 0 and self.FAST_MODE:\n txt_embedding = txt_embedding.expand(\n image_embedding.size(0),\n txt_embedding.size(1),\n txt_embedding.size(2),\n )\n txt_attention_mask = txt_attention_mask.expand(\n image_embedding.size(0),\n txt_attention_mask.size(1),\n txt_attention_mask.size(2),\n txt_attention_mask.size(3),\n )\n\n if self.with_coattention:\n # do the bi attention.\n image_embedding, txt_embedding, co_attention_probs = self.c_layer[count](\n image_embedding,\n image_attention_mask,\n txt_embedding,\n txt_attention_mask,\n co_attention_mask,\n use_co_attention_mask,\n )\n\n if output_all_attention_masks:\n all_attention_mask_c.append(co_attention_probs)\n\n v_start = v_end\n t_start = t_end\n count += 1\n\n if output_all_encoded_layers:\n all_encoder_layers_t.append(txt_embedding)\n all_encoder_layers_v.append(image_embedding)\n\n for idx in range(v_start, len(self.v_layer)):\n outputs = self.v_layer[idx](\n image_embedding,\n image_attention_mask,\n )\n image_embedding, image_attention_probs = outputs[0], outputs[1:]\n\n if output_all_attention_masks:\n all_attnetion_mask_v.append(image_attention_probs)\n\n for idx in range(t_start, len(self.layer)):\n outputs = self.layer[idx](txt_embedding, txt_attention_mask)\n txt_embedding, txt_attention_probs = outputs[0], outputs[1:]\n\n if output_all_attention_masks:\n all_attention_mask_t.append(txt_attention_probs)\n\n # add the end part to finish.\n if not output_all_encoded_layers:\n all_encoder_layers_t.append(txt_embedding)\n all_encoder_layers_v.append(image_embedding)\n\n return (\n all_encoder_layers_t,\n all_encoder_layers_v,\n (all_attention_mask_t, all_attnetion_mask_v, all_attention_mask_c),\n )\n\n\nclass BertTextPooler(BertPooler):\n\n def __init__(self, config):\n super(BertTextPooler, self).__init__(config)\n self.dense = nn.Linear(config.t_config['hidden_size'], config.bi_hidden_size)\n self.activation = nn.ReLU()\n\n\nclass BertImagePooler(BertPooler):\n\n def __init__(self, config):\n super(BertImagePooler, self).__init__(config)\n self.dense = nn.Linear(config.v_config['hidden_size'], config.bi_hidden_size)\n self.activation = nn.ReLU()\n\n\nclass BertLMPredictionHead(nn.Module):\n\n def __init__(self, config, bert_model_embedding_weights):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(\n bert_model_embedding_weights.size(1),\n bert_model_embedding_weights.size(0),\n bias=False,\n )\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertPreTrainingHeads(nn.Module):\n\n def __init__(self, config, bert_model_embedding_weights):\n super(BertPreTrainingHeads, self).__init__()\n t_config = BertConfig.from_dict(config.t_config)\n self.predictions = BertLMPredictionHead(t_config, bert_model_embedding_weights)\n self.bi_seq_relationship = nn.Linear(config.bi_hidden_size, 2)\n\n v_config = BertConfig.from_dict(config.v_config)\n self.imagePredictions = BertImagePredictionHead(v_config)\n self.fusion_method = config.fusion_method\n self.dropout = nn.Dropout(0.1)\n\n def forward(\n self,\n sequence_output_t,\n sequence_output_v,\n pooled_output_t,\n pooled_output_v,\n ):\n\n if self.fusion_method == 'sum':\n pooled_output = self.dropout(pooled_output_t + pooled_output_v)\n elif self.fusion_method == 'mul':\n pooled_output = self.dropout(pooled_output_t * pooled_output_v)\n else:\n assert False\n\n prediction_scores_t = self.predictions(sequence_output_t)\n seq_relationship_score = self.bi_seq_relationship(pooled_output)\n prediction_scores_v = self.imagePredictions(sequence_output_v)\n\n return prediction_scores_t, prediction_scores_v, seq_relationship_score\n\n\nclass BertImagePredictionHead(nn.Module):\n\n def __init__(self, config):\n super(BertImagePredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.target_size)\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass BertModel(BertPreTrainedModel):\n\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n\n self.task_specific_tokens = config.task_specific_tokens\n\n t_config = BertConfig.from_dict(config.t_config)\n v_config = BertConfig.from_dict(config.v_config)\n\n # initilize word embedding\n if config.model == 'bert':\n self.embeddings = BertEmbeddings(t_config)\n elif config.model == 'roberta':\n self.embeddings = RobertaEmbeddings(t_config)\n\n # initlize the vision embedding\n self.v_embeddings = BertImageEmbeddings(v_config)\n\n self.encoder = BertEncoder(config)\n self.t_pooler = BertTextPooler(config)\n self.v_pooler = BertImagePooler(config)\n\n self.init_weights()\n\n def forward(\n self,\n input_txt,\n input_imgs,\n image_loc,\n token_type_ids=None,\n attention_mask=None,\n image_attention_mask=None,\n co_attention_mask=None,\n task_ids=None,\n output_all_encoded_layers=False,\n output_all_attention_masks=False,\n ):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_txt)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_txt)\n if image_attention_mask is None:\n image_attention_mask = torch.ones(input_imgs.size(0), input_imgs.size(1)).type_as(input_txt)\n\n if self.task_specific_tokens:\n # extend the mask\n mask_tokens = input_txt.new().resize_(input_txt.size(0), 1).fill_(1)\n attention_mask = torch.cat([mask_tokens, attention_mask], dim=1)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_image_attention_mask = image_attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask2 = attention_mask.unsqueeze(2)\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n extended_attention_mask2 = extended_attention_mask2.to(dtype=next(\n self.parameters()).dtype) # fp16 compatibility\n\n extended_image_attention_mask = extended_image_attention_mask.to(dtype=next(\n self.parameters()).dtype) # fp16 compatibility\n extended_image_attention_mask = (1.0 - extended_image_attention_mask) * -10000.0\n\n if co_attention_mask is None:\n co_attention_mask = torch.zeros(input_txt.size(0), input_imgs.size(1),\n input_txt.size(1)).type_as(extended_image_attention_mask)\n\n extended_co_attention_mask = co_attention_mask.unsqueeze(1)\n\n # extended_co_attention_mask = co_attention_mask.unsqueeze(-1)\n extended_co_attention_mask = extended_co_attention_mask * 5.0\n extended_co_attention_mask = extended_co_attention_mask.to(dtype=next(\n self.parameters()).dtype) # fp16 compatibility\n\n embedding_output = self.embeddings(input_txt, token_type_ids, task_ids)\n v_embedding_output = self.v_embeddings(input_imgs, image_loc)\n encoded_layers_t, encoded_layers_v, all_attention_mask = self.encoder(\n embedding_output,\n v_embedding_output,\n extended_attention_mask,\n extended_attention_mask2,\n extended_image_attention_mask,\n extended_co_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n output_all_attention_masks=output_all_attention_masks,\n )\n\n sequence_output_t = encoded_layers_t[-1]\n sequence_output_v = encoded_layers_v[-1]\n\n pooled_output_t = self.t_pooler(sequence_output_t)\n pooled_output_v = self.v_pooler(sequence_output_v)\n\n if not output_all_encoded_layers:\n encoded_layers_t = encoded_layers_t[-1]\n encoded_layers_v = encoded_layers_v[-1]\n\n return (\n encoded_layers_t,\n encoded_layers_v,\n pooled_output_t,\n pooled_output_v,\n all_attention_mask,\n )\n\n\nclass BertImageEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from image, spatial location (omit now) and\n token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super(BertImageEmbeddings, self).__init__()\n\n self.image_embeddings = nn.Linear(config.feature_size, config.hidden_size)\n self.image_location_embeddings = nn.Linear(5, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, input_loc):\n\n img_embeddings = self.image_embeddings(input_ids)\n loc_embeddings = self.image_location_embeddings(input_loc)\n\n # TODO: we want to make the padding_idx == 0, however, with custom initilization, it seems it will have a bias.\n # Let's do masking for now\n embeddings = self.LayerNorm(img_embeddings + loc_embeddings)\n embeddings = self.dropout(embeddings)\n\n return embeddings\n\n\nclass BertForMultiModalPreTraining(BertPreTrainedModel):\n \"\"\"BERT model with multi modal pre-training heads.\"\"\"\n\n def __init__(self, config):\n super(BertForMultiModalPreTraining, self).__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n\n self.init_weights()\n self.visual_target = config.visual_target\n self.num_negative = config.num_negative\n self.loss_fct = CrossEntropyLoss(ignore_index=-1)\n\n print(\"model's visual target is \", config.visual_target)\n\n if self.visual_target == 0:\n self.vis_criterion = nn.KLDivLoss(reduction='none')\n elif self.visual_target == 1:\n self.vis_criterion = nn.MSELoss(reduction='none')\n elif self.visual_target == 2:\n self.vis_criterion = CrossEntropyLoss()\n\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\"Make sure we are sharing the input and output embeddings.\n\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\n \"\"\"\n self._tie_or_clone_weights(self.cls.predictions.decoder, self.bert.embeddings.word_embeddings)\n\n def forward(\n self,\n input_ids,\n image_feat,\n image_loc,\n token_type_ids=None,\n attention_mask=None,\n image_attention_mask=None,\n masked_lm_labels=None,\n image_label=None,\n image_target=None,\n next_sentence_label=None,\n output_all_attention_masks=False,\n ):\n # in this model, we first embed the images.\n sequence_output_t, sequence_output_v, pooled_output_t, pooled_output_v, all_attention_mask = self.bert(\n input_ids,\n image_feat,\n image_loc,\n token_type_ids,\n attention_mask,\n image_attention_mask,\n output_all_encoded_layers=False,\n output_all_attention_masks=output_all_attention_masks,\n )\n\n prediction_scores_t, prediction_scores_v, seq_relationship_score = self.cls(\n sequence_output_t,\n sequence_output_v,\n pooled_output_t,\n pooled_output_v,\n )\n\n if masked_lm_labels is not None and next_sentence_label is not None and image_target is not None:\n prediction_scores_v = prediction_scores_v[:, 1:]\n if self.visual_target == 1:\n img_loss = self.vis_criterion(prediction_scores_v, image_target)\n masked_img_loss = torch.sum(img_loss * (image_label == 1).unsqueeze(2).float()) / max(\n torch.sum((image_label == 1).unsqueeze(2).expand_as(img_loss)), 1)\n\n elif self.visual_target == 0:\n img_loss = self.vis_criterion(F.log_softmax(prediction_scores_v, dim=2), image_target)\n\n masked_img_loss = torch.sum(img_loss * (image_label == 1).unsqueeze(2).float()) / max(\n torch.sum((image_label == 1)), 0)\n elif self.visual_target == 2:\n # generate negative sampled index.\n # num_negative = self.num_negative\n num_across_batch = int(self.num_negative * 0.7)\n num_inside_batch = int(self.num_negative * 0.3)\n\n batch_size, num_regions, _ = prediction_scores_v.size()\n assert batch_size != 0\n # random negative across batches.\n row_across_index = input_ids.new(batch_size, num_regions, num_across_batch).random_(0, batch_size - 1)\n col_across_index = input_ids.new(batch_size, num_regions, num_across_batch).random_(0, num_regions)\n\n for i in range(batch_size - 1):\n row_across_index[i][row_across_index[i] == i] = batch_size - 1\n final_across_index = row_across_index * num_regions + col_across_index\n\n # random negative inside batches.\n row_inside_index = input_ids.new(batch_size, num_regions, num_inside_batch).zero_()\n col_inside_index = input_ids.new(batch_size, num_regions, num_inside_batch).random_(0, num_regions - 1)\n\n for i in range(batch_size):\n row_inside_index[i] = i\n for i in range(num_regions - 1):\n col_inside_index[:, i, :][col_inside_index[:, i, :] == i] = (num_regions - 1)\n final_inside_index = row_inside_index * num_regions + col_inside_index\n\n final_index = torch.cat((final_across_index, final_inside_index), dim=2)\n\n # Let's first sample where we need to compute.\n predict_v = prediction_scores_v[image_label == 1]\n neg_index_v = final_index[image_label == 1]\n\n flat_image_target = image_target.view(batch_size * num_regions, -1)\n # we also need to append the target feature at the begining.\n negative_v = flat_image_target[neg_index_v]\n positive_v = image_target[image_label == 1]\n sample_v = torch.cat((positive_v.unsqueeze(1), negative_v), dim=1)\n\n # calculate the loss.\n score = torch.bmm(sample_v, predict_v.unsqueeze(2)).squeeze(2)\n masked_img_loss = self.vis_criterion(score, input_ids.new(score.size(0)).zero_())\n\n # masked_img_loss = torch.sum(img_loss) / (img_loss.shape[0] * img_loss.shape[1])\n masked_lm_loss = self.loss_fct(\n prediction_scores_t.view(-1, self.config.vocab_size),\n masked_lm_labels.view(-1),\n )\n\n next_sentence_loss = self.loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n return (\n masked_lm_loss.unsqueeze(0),\n masked_img_loss.unsqueeze(0),\n next_sentence_loss.unsqueeze(0),\n )\n else:\n return (\n prediction_scores_t,\n prediction_scores_v,\n seq_relationship_score,\n all_attention_mask,\n )\n\n\nclass SimpleClassifier(nn.Module):\n\n def __init__(self, in_dim, hid_dim, out_dim, dropout):\n super().__init__()\n self.logit_fc = nn.Sequential(\n nn.Linear(in_dim, hid_dim),\n GeLU(),\n BertLayerNorm(hid_dim, eps=1e-12),\n nn.Linear(hid_dim, out_dim),\n )\n\n def forward(self, hidden_states):\n return self.logit_fc(hidden_states)\n\n\nclass VILBertForVLTasks(BertPreTrainedModel):\n\n def __init__(self, config, num_labels, dropout_prob=0.1, default_gpu=True):\n super(VILBertForVLTasks, self).__init__(config)\n self.num_labels = num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(dropout_prob)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.vil_prediction = SimpleClassifier(config.bi_hidden_size, config.bi_hidden_size * 2, 3129, 0.5)\n self.vil_prediction_gqa = SimpleClassifier(config.bi_hidden_size, config.bi_hidden_size * 2, 1533, 0.5)\n self.vil_binary_prediction = SimpleClassifier(config.bi_hidden_size * 2, config.bi_hidden_size * 2, 2, 0.5)\n self.vil_logit = nn.Linear(config.bi_hidden_size, 1)\n self.vil_tri_prediction = nn.Linear(config.bi_hidden_size, 3) # for Visual Entailiment tasks\n self.vision_logit = nn.Linear(config.v_config['hidden_size'], 1)\n self.linguisic_logit = nn.Linear(config.t_config['hidden_size'], 1)\n self.fusion_method = config.fusion_method\n self.init_weights()\n\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\"Make sure we are sharing the input and output embeddings.\n\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\n \"\"\"\n self._tie_or_clone_weights(self.cls.predictions.decoder, self.bert.embeddings.word_embeddings)\n\n def forward(\n self,\n input_txt,\n input_imgs,\n image_loc,\n token_type_ids=None,\n attention_mask=None,\n image_attention_mask=None,\n co_attention_mask=None,\n task_ids=None,\n output_all_encoded_layers=False,\n output_all_attention_masks=False,\n ):\n\n (sequence_output_t, sequence_output_v, pooled_output_t, pooled_output_v, all_attention_mask) = self.bert(\n input_txt,\n input_imgs,\n image_loc,\n token_type_ids,\n attention_mask,\n image_attention_mask,\n co_attention_mask,\n task_ids,\n output_all_encoded_layers=output_all_encoded_layers,\n output_all_attention_masks=output_all_attention_masks,\n )\n\n vil_prediction = 0\n vil_logit = 0\n vil_binary_prediction = 0\n vision_prediction = 0\n vision_logit = 0\n linguisic_prediction = 0\n linguisic_logit = 0\n\n linguisic_prediction, vision_prediction, vil_binary_prediction = self.cls(sequence_output_t, sequence_output_v,\n pooled_output_t, pooled_output_v)\n\n if self.fusion_method == 'sum':\n pooled_output = self.dropout(pooled_output_t + pooled_output_v)\n elif self.fusion_method == 'mul':\n pooled_output = self.dropout(pooled_output_t * pooled_output_v)\n else:\n assert False\n\n vil_prediction = self.vil_prediction(pooled_output)\n vil_prediction_gqa = self.vil_prediction_gqa(pooled_output)\n if pooled_output.size(0) % 2 == 0:\n vil_binary_prediction = self.vil_binary_prediction(pooled_output.view(-1, pooled_output.size(1) * 2))\n vil_logit = self.vil_logit(pooled_output)\n vil_tri_prediction = self.vil_tri_prediction(pooled_output)\n vision_logit = self.vision_logit(self.dropout(sequence_output_v)) + (\n (1.0 - image_attention_mask) * -10000.0).unsqueeze(2).to(dtype=next(self.parameters()).dtype)\n linguisic_logit = self.linguisic_logit(self.dropout(sequence_output_t))\n\n return (\n vil_prediction,\n vil_prediction_gqa,\n vil_logit,\n vil_binary_prediction,\n vil_tri_prediction,\n vision_prediction,\n vision_logit,\n linguisic_prediction,\n linguisic_logit,\n all_attention_mask,\n )\n",
"from ..builder import HOOKS\nfrom .log_buffer_imix import LogBufferWriter\n\n\[email protected]_module()\nclass TensorboardLoggerHook(LogBufferWriter):\n \"\"\"Write all scalars to a tensorboard file.\"\"\"\n\n def __init__(self, log_dir: str, window_size: int = 20, **kwargs):\n \"\"\"\n Args:\n log_dir (str): the directory used to save the output events\n window_size (int): the scalars will be median-smoothed by this window size\n\n kwargs: other arguments will be passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._window_size = window_size\n\n from torch.utils.tensorboard import SummaryWriter\n self._writer = SummaryWriter(log_dir + '/runs/', **kwargs)\n\n def process_buffer_data(self):\n self._add_scalar()\n self._add_image()\n self._add_histogram()\n\n def _add_scalar(self):\n for name, value in self.log_buffer.latest_with_smoothing_hint(self._window_size).items():\n self._writer.add_scalar(name, value, self.log_buffer.iter)\n\n def _add_image(self):\n if len(self.log_buffer.vis_data) >= 1:\n for img_name, img_data, step_num in self.log_buffer.vis_data.images:\n self._writer.add_image(img_name, img_data, step_num)\n self.log_buffer.clear_images()\n\n def _add_histogram(self):\n if len(self.log_buffer.histograms) >= 1:\n for hist_params in self.log_buffer.histograms.histograms:\n self._writer.add_histogram_raw(**hist_params)\n self.log_buffer.clear_histograms()\n\n def close(self):\n if hasattr(self, '_writer'):\n self._writer.close()\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss as TorchCrossEntropyLoss\nfrom torch.nn import SmoothL1Loss as TorchSmoothL1Loss\n\nfrom ..builder import LOSSES, build_loss\nfrom .base_loss import BaseLoss\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom typing import Dict\n\n\[email protected]_module()\nclass TripleLogitBinaryCrossEntropy(BaseLoss):\n \"\"\"This is used for Three-branch fusion only.\n\n We predict scores and compute cross entropy loss for each of branches.\n \"\"\"\n\n def __init__(self):\n super().__init__(loss_name=str(self))\n\n # def forward(self, model_output, targets):\n # \"\"\"Calculates and returns the binary cross entropy for logits\n # Args:\n # sample_list (SampleList): SampleList containing `targets` attribute.\n # model_output (Dict): Model output containing `scores` attribute.\n # Returns:\n # torch.FloatTensor: Float value for loss.\n # \"\"\"\n # scores = model_output['scores']\n #\n # if scores.dim() == 3:\n # loss = (\n # F.binary_cross_entropy_with_logits(\n # scores[:, 0], targets, reduction='mean') +\n # F.binary_cross_entropy_with_logits(\n # scores[:, 1], targets, reduction='mean') +\n # F.binary_cross_entropy_with_logits(\n # scores[:, 2], targets, reduction='mean'))\n # else:\n # loss = F.binary_cross_entropy_with_logits(\n # scores, targets, reduction='mean')\n #\n # return loss * targets.size(-1)\n\n def forward(self, model_output):\n \"\"\"Calculates and returns the binary cross entropy for logits\n Args:\n sample_list (SampleList): SampleList containing `targets` attribute.\n model_output (Dict): Model output containing `scores` attribute.\n Returns:\n torch.FloatTensor: Float value for loss.\n \"\"\"\n\n scores, target = model_output['scores'], model_output['target']\n\n if scores.dim() == 3:\n loss = (\n F.binary_cross_entropy_with_logits(scores[:, 0], target, reduction='mean') +\n F.binary_cross_entropy_with_logits(scores[:, 1], target, reduction='mean') +\n F.binary_cross_entropy_with_logits(scores[:, 2], target, reduction='mean'))\n else:\n loss = F.binary_cross_entropy_with_logits(scores, target, reduction='mean')\n\n return loss * target.size(-1)\n\n def __str__(self):\n return 'triple_logit_binary_cross_entropy_loss'\n\n\[email protected]_module()\nclass BinaryCrossEntropyWithLogits(BaseLoss):\n\n def __init__(self, params=None):\n super().__init__(loss_name=str(self))\n if params is None:\n params = {}\n self.loss_fn = nn.CrossEntropyLoss(**params)\n\n def forward(self, model_output):\n predict_scores, target = model_output['scores'], model_output['target']\n return self.loss_fn(predict_scores, target)\n\n def __str__(self):\n return 'binary_cross_entropy_with_logits_loss'\n\n\[email protected]_module()\nclass CrossEntropyLoss(BaseLoss):\n\n def __init__(self, params=None):\n super().__init__(loss_name=str(self))\n if params is None:\n params = {}\n self.loss_fn = nn.CrossEntropyLoss(**params)\n\n # def forward(self, sample_list, model_output):\n # return self.loss_fn(model_output['scores'], sample_list.targets)\n\n def forward(self, model_output):\n predict_scores, target = model_output['scores'], model_output['target']\n return self.loss_fn(predict_scores, target)\n\n def __str__(self):\n return 'cross_entropy_loss'\n\n\[email protected]_module()\nclass OBJCrossEntropyLoss(BaseLoss):\n\n def __init__(self, params=None):\n super().__init__(loss_name=str(self))\n if params is None:\n params = {}\n self.loss_fn = nn.CrossEntropyLoss(**params)\n\n # def forward(self, sample_list, model_output):\n # return self.loss_fn(model_output['scores'], sample_list.targets)\n\n def forward(self, model_output):\n predict_scores, target = model_output['obj_scores'], model_output['obj_target']\n return self.loss_fn(predict_scores, target)\n\n def __str__(self):\n return 'obj_cross_entropy_loss'\n\n\[email protected]_module()\nclass LogitBinaryCrossEntropy(BaseLoss):\n \"\"\"Returns Binary Cross Entropy for logits.\n\n Attention:\n `Key`: logit_bce\n \"\"\"\n\n def __init__(self):\n super().__init__(loss_name=str(self))\n\n def forward(self, model_output):\n \"\"\"Calculates and returns the binary cross entropy for logits.\n\n Args:\n sample_list (SampleList): SampleList containing `targets` attribute.\n model_output (Dict): Model output containing `scores` attribute.\n\n Returns:\n torch.FloatTensor: Float value for loss.\n \"\"\"\n # scores = model_output[\"scores\"]\n # targets = sample_list[\"targets\"]\n scores, targets = model_output['scores'], model_output['target']\n loss = F.binary_cross_entropy_with_logits(scores, targets, reduction='mean')\n\n return loss * targets.size(1)\n\n def __str__(self):\n return 'logit_binary_cross_entropy_loss'\n\n\[email protected]_module()\nclass CaptionCrossEntropyLoss(BaseLoss):\n\n def __init__(self):\n super().__init__(loss_name=str(self))\n\n def forward(self, sample_list, model_output):\n \"\"\"Calculates and returns the cross entropy loss for captions.\n\n Args:\n sample_list (SampleList): SampleList containing `targets` attribute.\n model_output (Dict): Model output containing `scores` attribute.\n\n Returns:\n torch.FloatTensor: Float value for loss.\n \"\"\"\n scores = model_output['scores']\n targets = sample_list['targets']\n\n # If no captions(test dataset) then assume decode length to be uniform\n if hasattr(sample_list, 'caption_len'):\n caption_lengths, _ = sample_list.caption_len.sort(dim=0, descending=True)\n decode_lengths = (caption_lengths - 1).tolist()\n else:\n decode_lengths = [targets.size(1)] * targets.size(0)\n if torch.__version__ >= '1.1':\n scores = pack_padded_sequence(scores, decode_lengths, batch_first=True).data\n targets = pack_padded_sequence(targets, decode_lengths, batch_first=True).data\n else:\n scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)\n targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)\n\n loss = F.cross_entropy(scores, targets)\n\n return loss\n\n\[email protected]_module()\nclass M4CDecodingBCEWithMaskLoss(BaseLoss):\n\n def __init__(self):\n super().__init__(loss_name=str(self))\n self.one = torch.Tensor([1.0])\n\n def __str__(self):\n return 'M4CDecodingBCEWithMask_loss'\n\n def forward(self, model_output):\n scores = model_output['scores']\n targets = model_output['target']\n loss_mask = model_output['train_loss_mask']\n assert scores.dim() == 3 and loss_mask.dim() == 2\n\n losses = F.binary_cross_entropy_with_logits(scores, targets, reduction='none')\n losses *= loss_mask.unsqueeze(-1)\n\n count = torch.max(torch.sum(loss_mask), self.one.to(losses.device))\n loss = torch.sum(losses) / count\n return loss\n\n # def __str__(self):\n # return 'lxmert_pretrain_loss_v0'\n\n\[email protected]_module()\nclass LXMERTPreTrainLossV0(BaseLoss):\n\n def __init__(self, visual_losses, visual_loss_config, vocab_size, num_answers):\n super().__init__(loss_name=str(self))\n self.loss_fct_cls = TorchCrossEntropyLoss(ignore_index=-1)\n self.loss_fcts_feat = {\n 'l2': TorchSmoothL1Loss(reduction='none'),\n 'ce': TorchCrossEntropyLoss(ignore_index=-1, reduction='none')\n }\n self.visual_losses = visual_losses.split(',')\n self.visual_loss_config = visual_loss_config\n self.vocab_size = vocab_size\n self.num_answers = num_answers\n\n def forward(self, model_output):\n scores = model_output['scores']\n target = model_output['target']\n lang_prediction_scores = scores['lang_prediction_scores']\n cross_relationship_score = scores['cross_relationship_score']\n visn_prediction_scores_dict = scores['visn_prediction_scores_dict']\n answer_score = scores['answer_score']\n masked_lm_labels = target['masked_lm_labels']\n matched_label = target['matched_label']\n obj_labels = target['obj_labels']\n ans = target['ans']\n\n total_loss = 0.\n losses = ()\n\n masked_lm_loss = self.loss_fct_cls(lang_prediction_scores.view(-1, self.vocab_size), masked_lm_labels.view(-1))\n total_loss += masked_lm_loss\n losses += (masked_lm_loss.detach(), )\n\n matched_loss = self.loss_fct_cls(cross_relationship_score.view(-1, 2), matched_label.view(-1))\n total_loss += matched_loss\n losses += (matched_loss.detach(), )\n\n total_visn_loss = 0.\n for key in self.visual_losses:\n label, mask_conf = obj_labels[key]\n output_dim, loss_fct_name, label_shape, weight = self.visual_loss_config[key]\n visn_loss_fct = self.loss_fcts_feat[loss_fct_name]\n visn_prediction_scores = visn_prediction_scores_dict[key]\n visn_loss = visn_loss_fct(\n visn_prediction_scores.view(-1, output_dim),\n label.view(*label_shape),\n )\n if visn_loss.dim() > 1: # Regression Losses\n visn_loss = visn_loss.mean(1)\n visn_loss = (visn_loss * mask_conf.view(-1)).mean() * weight\n total_visn_loss += visn_loss\n losses += (visn_loss.detach(), )\n total_loss += total_visn_loss\n\n answer_loss = self.loss_fct_cls(answer_score.view(-1, self.num_answers), ans.view(-1))\n\n total_loss += answer_loss\n losses += (answer_loss.detach(), )\n\n return total_loss # , torch.stack(losses).unsqueeze(0), answer_score.detach()\n\n def __str__(self):\n return 'lxmert_pretrain_loss_v0'\n\n\[email protected]_module()\nclass VILBERTMutilLoss(BaseLoss):\n\n def __init__(self, task_cfg):\n super().__init__(loss_name=str(self))\n self.LossMap = {\n 'BCEWithLogitLoss': nn.BCEWithLogitsLoss(reduction='mean'),\n 'CrossEntropyLoss': nn.CrossEntropyLoss(),\n }\n self.task_ids = []\n self.loss_scale = {}\n self.task_cfg = task_cfg\n self.task_losses = self.LoadLosses()\n\n def __str__(self):\n return 'vilbert_mutil_loss'\n\n def LoadLosses(self):\n losses = {}\n task_types = []\n\n for i, task_id in enumerate(self.task_cfg['tasks'].split('-')):\n task = 'TASK' + task_id\n cfg = self.task_cfg.TASKS[task]\n model_type = cfg.type\n if model_type not in task_types:\n task_types.append(model_type)\n losses[task] = self.LossMap[cfg.loss]\n self.loss_scale[task] = cfg.loss_scale\n self.task_ids.append(task)\n\n return losses\n\n def forward(self, model_output):\n for task_id in self.task_ids:\n # only one task now\n pred = model_output['scores'] # model_output[task_id]['scores']\n target = model_output['target'] # model_output[task_id]['target']\n\n # for different task, we use different output to calculate the loss.\n loss = self.task_losses[task_id](pred, target)\n task_type = self.task_cfg.TASKS[task_id]['type']\n if task_type in ['VL-classifier', 'VL-classifier-GQA', 'V-logit', 'V-logit-mc']:\n loss = loss.mean() * target.size(1)\n elif task_type in ['VL-binary-classifier', 'VL-tri-classifier']:\n loss = loss.mean()\n\n loss = loss * self.loss_scale[task_id]\n\n return loss\n\n\[email protected]_module()\nclass BCEWithLogitsLoss(BaseLoss):\n \"\"\"Returns .\n\n Attention:\n `Key`: logit_bce\n \"\"\"\n\n def __init__(self, params=None):\n super().__init__(loss_name=str(self))\n if params is None:\n params = {}\n self.loss_fn = nn.BCEWithLogitsLoss(reduction='mean', **params)\n\n def forward(self, model_output):\n \"\"\"Calculates and returns the binary cross entropy for logits.\n\n Args:\n sample_list (SampleList): SampleList containing `targets` attribute.\n model_output (Dict): Model output containing `scores` attribute.\n\n Returns:\n torch.FloatTensor: Float value for loss.\n \"\"\"\n # scores = model_output[\"scores\"]\n # targets = sample_list[\"targets\"]\n scores, targets = model_output['scores'], model_output['target']\n return self.loss_fn(scores, targets)\n\n def __str__(self):\n return 'bce_with_logits_loss'\n\n\[email protected]_module()\nclass OSCARLoss(BaseLoss):\n\n def __init__(self, cfg):\n super().__init__(loss_name=str(self))\n self.loss_type = cfg.loss_type\n self.num_labels = cfg.num_labels\n\n def __str__(self):\n return 'oscar_mutil_loss'\n\n def instance_bce_with_logits(self, logits, labels, reduction='mean'):\n assert logits.dim() == 2\n loss = F.binary_cross_entropy_with_logits(logits, labels, reduction=reduction)\n if reduction == 'mean':\n loss *= labels.size(1)\n return loss\n\n def forward(self, model_output):\n logits = model_output['scores']\n labels = model_output['target']\n\n if labels is not None:\n if self.num_labels == 1: # doing regression\n loss_fct = MSELoss()\n labels = labels.to(torch.float)\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n if self.loss_type == 'kl':\n loss_fct = nn.KLDivLoss(reduction='batchmean')\n log_softmax = nn.LogSoftmax(dim=-1)\n reshaped_logits = logits.contiguous().view(-1, 3129)\n reshaped_logits = log_softmax(reshaped_logits)\n loss = loss_fct(reshaped_logits, labels.contiguous())\n elif self.loss_type == 'bce': # [VQA]\n loss = self.instance_bce_with_logits(logits, labels)\n else: # cross_entropy [GQA, Retrieval, Captioning]\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n return loss\n\n\[email protected]_module()\nclass OSCARBertCaptioningLoss(nn.Module):\n\n def __init__(self, cfg):\n super().__init__()\n self.label_smoothing = getattr(config, 'label_smoothing', 0)\n self.drop_worst_ratio = getattr(config, 'drop_worst_ratio', 0)\n self.drop_worst_after = getattr(config, 'drop_worst_after', 0)\n self.log_soft = nn.LogSoftmax(dim=1)\n self.kl = nn.KLDivLoss(reduction='none')\n self.iter = 0\n\n def __str__(self):\n return 'oscar_bert_captioning_loss'\n\n def forward(self, model_output):\n logits = model_output['scores']\n target = model_output['target']\n\n self.iter += 1\n eps = self.label_smoothing\n n_class = logits.size(1)\n one_hot = torch.zeros_like(logits).scatter(1, target.view(-1, 1), 1)\n one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)\n log_prb = self.log_soft(logits)\n loss = self.kl(log_prb, one_hot).sum(1)\n\n if self.drop_worst_ratio > 0 and self.iter > self.drop_worst_after:\n loss, _ = torch.topk(loss, k=int(loss.shape[0] * (1 - self.drop_worst_ratio)), largest=False)\n\n loss = loss.mean()\n\n return loss\n\n\[email protected]_module()\nclass VisualDialogBertLoss(BaseLoss):\n loss_name = 'visual_dialog_bert_loss'\n\n def __init__(self, MLM_loss: Dict, NSP_loss: Dict, MIR_loss: Dict, predict_feature: bool = False):\n super().__init__(loss_name=str(self))\n\n self.masked_lm_loss_coeff = MLM_loss.pop('weight_coeff')\n self.masked_lm_loss = build_loss(cfg=MLM_loss)\n\n self.masked_img_loss_coeff = MIR_loss.pop('weight_coeff')\n self.masked_img_loss = build_loss(cfg=MIR_loss)\n\n self.next_sentence_pred_loss_coeff = NSP_loss.pop('weight_coeff')\n self.next_sentence_pred_loss = build_loss(cfg=NSP_loss)\n\n self.predict_feature = predict_feature\n\n def forward(self, model_output):\n visual_predict_scores = model_output['visual_predict_scores']\n image_target = model_output['image_target']\n masked_img_loss = self.masked_img_loss_coeff * self.calcuate_img_loss(visual_predict_scores, image_target)\n\n text_predict_scores = model_output['text_predict_scores']\n masked_lm_labels = model_output['masked_lm_labels']\n masked_lm_loss = self.masked_lm_loss_coeff * self.calcuate_text_loss(text_predict_scores, masked_lm_labels)\n\n seq_relationship_scores = model_output['seq_relationship_scores']\n next_sentence_label = model_output['next_sentence_label']\n nsp_loss = self.next_sentence_pred_loss_coeff * self.calcuate_nsp_loss(seq_relationship_scores,\n next_sentence_label)\n\n output_loss = masked_lm_loss + masked_lm_loss + nsp_loss\n\n return {\n str(self): output_loss,\n 'masked_img_loss': masked_img_loss,\n 'nsp_loss': nsp_loss,\n 'masked_lm_loss': masked_lm_loss\n }\n\n def calcuate_img_loss(self, prediction, target):\n # model_output['scores'], model_output['target']\n if self.predict_feature:\n img_loss = self.masked_img_loss(model_output={'scores': prediction, 'target': target})\n max_v = max(torch.sum((target == 1).unsqueeze(2).expand_as(img_loss)), 1)\n else:\n img_loss = self.masked_img_loss(model_output={'scores': F.log_softmax(prediction, dim=2), 'target': target})\n max_v = max(torch.sum((target == 1)), 0)\n\n sum_v = torch.sum(img_loss * (target == 1))\n return sum_v / max_v\n\n def calcuate_text_loss(self, prediction, target):\n return self.masked_lm_loss(model_output={'scores': prediction, 'target': target})\n\n def calcuate_nsp_loss(self, prediction, target):\n return self.next_sentence_pred_loss(model_output={'scores': prediction, 'target': target})\n\n\[email protected]_module()\nclass KLDivLoss(BaseLoss):\n loss_name = 'KLDiv_loss'\n\n def __init__(self, params=None):\n super().__init__(loss_name=str(self))\n if params is None:\n params = {}\n self.loss_fn = nn.KLDivLoss(**params)\n\n def forward(self, model_output):\n predict_scores, target = model_output['scores'], model_output['target']\n return self.loss_fn(predict_scores, target)\n\n\[email protected]_module()\nclass VisualDialogBertDenseLoss(BaseLoss):\n loss_name = 'visual_dialog_bert_dense_loss'\n\n def __init__(self, NSP_loss: Dict, KLDiv_loss: Dict, MLM_loss: Dict, MIR_loss: Dict):\n super().__init__(loss_name=str(self))\n\n self.nsp_loss_coeff = NSP_loss.pop('weight_coeff') # next sentence prediction loss\n self.nsp_loss_fun = build_loss(cfg=NSP_loss)\n\n self.kldiv_loss_coef = KLDiv_loss.pop('weight_coeff')\n self.kldiv_loss_fun = build_loss(cfg=KLDiv_loss)\n\n self.mlm_loss_coeff = MLM_loss.pop('weight_coeff') # mask language modeling loss\n self.mlm_loss_fun = build_loss(cfg=MLM_loss)\n\n self.mir_loss_coeff = MIR_loss.pop('weight_coeff') # masked image region loss\n self.mir_loss_fun = build_loss(cfg=MIR_loss)\n\n self.predict_feature = False\n\n def forward(self, model_output):\n nsp_scores = model_output['seq_relationship_scores']\n nsp_scores = nsp_scores.view(-1, nsp_scores.shape[0], nsp_scores.shape[1])\n next_sentence_label = model_output['next_sentence_label']\n nsp_loss = self.nsp_loss_fun({\n 'scores': nsp_scores.view(-1, 2),\n 'target': next_sentence_label.view(-1)\n }) * self.nsp_loss_coeff\n\n nsp_scores = nsp_scores[:, :, 0]\n gt_relevance = model_output['gt_relevance']\n kldiv_loss = self.kldiv_loss_fun({\n 'scores': F.log_softmax(nsp_scores, dim=1),\n 'target': F.softmax(gt_relevance, dim=1)\n }) * self.kldiv_loss_coef\n\n # torch runtimeError\n visual_predict_scores = model_output['visual_predict_scores']\n image_target = model_output['image_target']\n masked_img_loss = self.mir_loss_coeff * self.calcuate_img_loss(visual_predict_scores, image_target)\n\n text_predict_scores = model_output['text_predict_scores']\n masked_lm_labels = model_output['masked_lm_labels']\n masked_lm_loss = self.mlm_loss_coeff * self.calcuate_text_loss(text_predict_scores, masked_lm_labels)\n\n kldiv_loss += nsp_loss\n\n loss = kldiv_loss + nsp_scores + masked_lm_loss + masked_img_loss\n\n return {\n str(self): loss,\n 'kldiv_loss': kldiv_loss,\n 'nsp_loss': nsp_loss,\n 'masked_img_loss': masked_img_loss,\n 'masked_lm_loss': masked_lm_loss\n }\n\n def calcuate_img_loss(self, prediction, target):\n # model_output['scores'], model_output['target']\n if self.predict_feature:\n img_loss = self.mir_loss_fun(model_output={'scores': prediction, 'target': target})\n max_v = max(torch.sum((target == 1).unsqueeze(2).expand_as(img_loss)), 1)\n else:\n img_loss = self.mir_loss_fun(model_output={'scores': F.log_softmax(prediction, dim=2), 'target': target})\n max_v = max(torch.sum((target == 1)), 0)\n\n sum_v = torch.sum(img_loss * (target == 1))\n return sum_v / max_v\n\n def calcuate_text_loss(self, prediction, target):\n return self.mlm_loss_fun(model_output={'scores': prediction, 'target': target})\n"
] | [
[
"torch.tensor",
"torch.Tensor",
"torch.zeros"
],
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.functional.dropout",
"torch.topk",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.bmm",
"torch.gather"
],
[
"torch.zeros",
"torch.tensor"
],
[
"torch.cos"
],
[
"torch.nn.Softmax",
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.nn.Embedding",
"torch.no_grad",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.sqrt",
"torch.arange",
"torch.ones_like",
"torch.zeros_like",
"torch.nn.Linear",
"torch.nn.KLDivLoss",
"torch.nn.functional.log_softmax",
"torch.matmul",
"torch.nn.ReLU",
"torch.nn.MSELoss"
],
[
"torch.utils.tensorboard.SummaryWriter"
],
[
"torch.nn.SmoothL1Loss",
"torch.nn.CrossEntropyLoss",
"torch.nn.LogSoftmax",
"torch.nn.KLDivLoss",
"torch.nn.functional.softmax",
"torch.Tensor",
"torch.nn.functional.log_softmax",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.cross_entropy",
"torch.sum",
"torch.zeros_like",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.BCEWithLogitsLoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CatrionaMarr/OnlineMCMCTest | [
"92899d082c1bdfc2d61128ced453ac59812ae03a",
"92899d082c1bdfc2d61128ced453ac59812ae03a"
] | [
"results/8112a5333cb1bb472ee14fa5342f6422/pyfile.py",
"results/4a3182dfaaf9dfd540a677b1d9c323ed/pyfile.py"
] | [
"#!/usr/bin/env python\n\n# import required packages\nimport emcee\nimport numpy as np\nfrom numpy import exp, log\n\n# import model function from separate file\nfrom mymodel import mymodel\n\n# import post-processing function from theonlinemcmc package\nfrom theonlinemcmc import *\n\n# initialise error code value\nerrval = 0\n\n# define the log posterior function\ndef lnprob(theta, x, sigma_gauss, data):\n lp = lnprior(theta)\n if not np.isfinite(lp):\n return -np.inf\n\n return lp + lnlike(theta, x, sigma_gauss, data)\n\n\n# define the log prior function\ndef lnprior(theta):\n lp = 0.\n m,c = theta\n\n if -10 < m < 10:\n lp = 0.\n else:\n return -np.inf\n\n if -10 < c < 10:\n lp = 0.\n else:\n return -np.inf\n\n return lp\n\n\n# define log likelihood function\ndef lnlike(theta, x, sigma_gauss, data):\n m,c = theta\n md = mymodel(m,c,x)\n return -0.5*np.sum(((md - data)/sigma_gauss)**2)\n\n\n# set number of MCMC points\nNmcmc = 1000\nNburnin = 1000\nNens = 100\nndim = 2\n\n# initialise the start ensemble points\ntry:\n mini = -10 + np.random.rand(Nens)*20\n cini = -10 + np.random.rand(Nens)*20\n pos = np.array([mini, cini]).T\nexcept:\n errval = PRIOR_INIT_ERR\n\n# read in the data\nif errval == 0:\n try:\n data = np.loadtxt(\"data_file.txt\")\n except:\n try:\n data = np.loadtxt(\"data_file.txt\", delimiter=\",\")\n except:\n errval = DATA_READ_ERR\n\n\n# read in the abscissa values\nif errval == 0:\n try:\n x = np.loadtxt(\"abscissa_file.txt\")\n except:\n try:\n x = np.loadtxt(\"abscissa_file.txt\", delimiter=\",\")\n except:\n errval = ABSCISSA_READ_ERR\n\n\n# read in sigma (standard deviation) values (there may be nothing here if it not applicable to your likelihood)\n\n# run the MCMC\nif errval == 0:\n if len(data) != len(x):\n errval = DATA_LENGTH_ERR\n\n argslist = (x, 0.65, data)\n\nif errval == 0:\n # set up sampler\n try:\n sampler = emcee.EnsembleSampler(Nens, ndim, lnprob, args=argslist)\n except:\n errval = MCMC_INIT_ERR\n\n # run sampler\n try:\n sampler.run_mcmc(pos, Nmcmc+Nburnin)\n # remove burn-in and flatten\n samples = sampler.chain[:, Nburnin:, :].reshape((-1, ndim))\n lnp = np.reshape(sampler.lnprobability[:, Nburnin:].flatten(), (-1,1))\n samples = np.hstack((samples, lnp))\n except:\n errval = MCMC_RUN_ERR\n\n # output the posterior samples, likelihood and variables\n try:\n np.savetxt('posterior_samples.txt.gz', samples)\n fv = open('variables.txt', 'w')\n fv.write(\"m,c\")\n fv.close()\n except:\n errval = POST_OUTPUT_ERR\n\n # run post-processing script\n try:\n postprocessing(samples, \"m,c\", x, \"x\", data, \"[email protected]\", \"http://localhost/results/8112a5333cb1bb472ee14fa5342f6422\")\n except:\n errval = POST_PROCESS_ERR\n\nsuccess = True\nif errval != 0:\n # run different script in case error codes are encountered\n errorpage(errval, \"[email protected]\", \"http://localhost/results/8112a5333cb1bb472ee14fa5342f6422\")\n success = False\n\n\n",
"#!/usr/bin/env python\n\n# import required packages\nimport emcee\nimport numpy as np\nfrom numpy import exp, log\n\n# import model function from separate file\nfrom mymodel import mymodel\n\n# import post-processing function from theonlinemcmc package\nfrom theonlinemcmc import *\n\n# initialise error code value\nerrval = 0\n\n# define the log posterior function\ndef lnprob(theta, x, sigma_gauss, data):\n lp = lnprior(theta)\n if not np.isfinite(lp):\n return -np.inf\n\n return lp + lnlike(theta, x, sigma_gauss, data)\n\n\n# define the log prior function\ndef lnprior(theta):\n lp = 0.\n m,c = theta\n\n if -10 < m < 10:\n lp = 0.\n else:\n return -np.inf\n\n if -10 < c < 10:\n lp = 0.\n else:\n return -np.inf\n\n return lp\n\n\n# define log likelihood function\ndef lnlike(theta, x, sigma_gauss, data):\n m,c = theta\n md = mymodel(m,c,x)\n return -0.5*np.sum(((md - data)/sigma_gauss)**2)\n\n\n# set number of MCMC points\nNmcmc = 1000\nNburnin = 1000\nNens = 100\nndim = 2\n\n# initialise the start ensemble points\ntry:\n mini = -10 + np.random.rand(Nens)*20\n cini = -10 + np.random.rand(Nens)*20\n pos = np.array([mini, cini]).T\nexcept:\n errval = PRIOR_INIT_ERR\n\n# read in the data\nif errval == 0:\n try:\n data = np.loadtxt(\"data_file.txt\")\n except:\n try:\n data = np.loadtxt(\"data_file.txt\", delimiter=\",\")\n except:\n errval = DATA_READ_ERR\n\n\n# read in the abscissa values\nif errval == 0:\n try:\n x = np.loadtxt(\"abscissa_file.txt\")\n except:\n try:\n x = np.loadtxt(\"abscissa_file.txt\", delimiter=\",\")\n except:\n errval = ABSCISSA_READ_ERR\n\n\n# read in sigma (standard deviation) values (there may be nothing here if it not applicable to your likelihood)\n\n# run the MCMC\nif errval == 0:\n if len(data) != len(x):\n errval = DATA_LENGTH_ERR\n\n argslist = (x, 0.65, data)\n\nif errval == 0:\n # set up sampler\n try:\n sampler = emcee.EnsembleSampler(Nens, ndim, lnprob, args=argslist)\n except:\n errval = MCMC_INIT_ERR\n\n # run sampler\n try:\n sampler.run_mcmc(pos, Nmcmc+Nburnin)\n # remove burn-in and flatten\n samples = sampler.chain[:, Nburnin:, :].reshape((-1, ndim))\n lnp = np.reshape(sampler.lnprobability[:, Nburnin:].flatten(), (-1,1))\n samples = np.hstack((samples, lnp))\n except:\n errval = MCMC_RUN_ERR\n\n # output the posterior samples, likelihood and variables\n try:\n np.savetxt('posterior_samples.txt.gz', samples)\n fv = open('variables.txt', 'w')\n fv.write(\"m,c\")\n fv.close()\n except:\n errval = POST_OUTPUT_ERR\n\n # run post-processing script\n try:\n postprocessing(samples, \"m,c\", x, \"x\", data, \"[email protected]\", \"http://localhost/results/4a3182dfaaf9dfd540a677b1d9c323ed\")\n except:\n errval = POST_PROCESS_ERR\n\nsuccess = True\nif errval != 0:\n # run different script in case error codes are encountered\n errorpage(errval, \"[email protected]\", \"http://localhost/results/4a3182dfaaf9dfd540a677b1d9c323ed\")\n success = False\n\ndatabase_add_row(\"4a3182dfaaf9dfd540a677b1d9c323ed\", \"m*x+c\", \"m,c\", 2, success)\n\n\n"
] | [
[
"numpy.hstack",
"numpy.isfinite",
"numpy.random.rand",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"numpy.loadtxt"
],
[
"numpy.hstack",
"numpy.isfinite",
"numpy.random.rand",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zjutcv/mmhid | [
"faeaf4fb5c634037c6e482f63ef73e7f2144c7b5"
] | [
"mmhid/datasets/builder.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom torch.utils.data import DataLoader\n\nfrom .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler\n\nDATA_ROOT = {\n 'BIT': './data/BIT',\n 'UT': './data/ut120',\n 'highfive': './data/highfive'\n}\n\nFRAMES_ROOT = {\n 'BIT': 'Bit-frames',\n}\n\nANNO_ROOT = {\n 'BIT': 'BIT-anno/tidy_anno'\n}\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n base_soft_limit = rlimit[0]\n hard_limit = rlimit[1]\n soft_limit = min(max(4096, base_soft_limit), hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nHID_DATASETS = Registry('hid_dataset')\nHID_PIPELINES = Registry('hid_pipeline')\n\n\ndef build_dataset(cfg, default_args=None):\n dataset = build_from_cfg(cfg, HID_DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n # DistributedGroupSampler will definitely shuffle the data to satisfy\n # that images on each GPU are in the same group\n if shuffle:\n sampler = DistributedGroupSampler(\n dataset, samples_per_gpu, world_size, rank, seed=seed)\n else:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=False, seed=seed)\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=False,\n worker_init_fn=init_fn,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amelliaaas/tugastkc4 | [
"f442382c72379e911f3780543b95345a3b1c9407",
"f442382c72379e911f3780543b95345a3b1c9407",
"f442382c72379e911f3780543b95345a3b1c9407",
"f442382c72379e911f3780543b95345a3b1c9407",
"f442382c72379e911f3780543b95345a3b1c9407",
"f442382c72379e911f3780543b95345a3b1c9407",
"f442382c72379e911f3780543b95345a3b1c9407"
] | [
"venv/Lib/site-packages/skimage/transform/tests/test_radon_transform.py",
"venv/Lib/site-packages/skimage/segmentation/tests/test_slic.py",
"venv/Lib/site-packages/matplotlib/tests/test_axes.py",
"venv/Lib/site-packages/skimage/morphology/tests/test_reconstruction.py",
"venv/Lib/site-packages/skimage/morphology/tests/test_misc.py",
"venv/Lib/site-packages/skimage/filters/lpi_filter.py",
"venv/Lib/site-packages/skimage/data/_binary_blobs.py"
] | [
"import itertools\nimport pytest\n\nimport numpy as np\nfrom skimage.data import shepp_logan_phantom\nfrom skimage.transform import radon, iradon, iradon_sart, rescale\n\nfrom skimage._shared.utils import convert_to_float\nfrom skimage._shared import testing\nfrom skimage._shared.testing import test_parallel\nfrom skimage._shared._warnings import expected_warnings\n\n\nPHANTOM = shepp_logan_phantom()[::2, ::2]\nPHANTOM = rescale(PHANTOM, 0.5, order=1,\n mode='constant', anti_aliasing=False, multichannel=False)\n\n\ndef _debug_plot(original, result, sinogram=None):\n from matplotlib import pyplot as plt\n imkwargs = dict(cmap='gray', interpolation='nearest')\n if sinogram is None:\n plt.figure(figsize=(15, 6))\n sp = 130\n else:\n plt.figure(figsize=(11, 11))\n sp = 221\n plt.subplot(sp + 0)\n plt.imshow(sinogram, aspect='auto', **imkwargs)\n plt.subplot(sp + 1)\n plt.imshow(original, **imkwargs)\n plt.subplot(sp + 2)\n plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)\n plt.subplot(sp + 3)\n plt.imshow(result - original, **imkwargs)\n plt.colorbar()\n plt.show()\n\n\ndef _rescale_intensity(x):\n x = x.astype(float)\n x -= x.min()\n x /= x.max()\n return x\n\n\ndef test_iradon_bias_circular_phantom():\n \"\"\"\n test that a uniform circular phantom has a small reconstruction bias\n \"\"\"\n pixels = 128\n xy = np.arange(-pixels / 2, pixels / 2) + 0.5\n x, y = np.meshgrid(xy, xy)\n image = x**2 + y**2 <= (pixels/4)**2\n\n theta = np.linspace(0., 180., max(image.shape), endpoint=False)\n sinogram = radon(image, theta=theta)\n\n reconstruction_fbp = iradon(sinogram, theta=theta)\n error = reconstruction_fbp - image\n\n tol = 5e-5\n roi_err = np.abs(np.mean(error))\n assert roi_err < tol\n\n\ndef check_radon_center(shape, circle, dtype, preserve_range):\n # Create a test image with only a single non-zero pixel at the origin\n image = np.zeros(shape, dtype=dtype)\n image[(shape[0] // 2, shape[1] // 2)] = 1.\n # Calculate the sinogram\n theta = np.linspace(0., 180., max(shape), endpoint=False)\n sinogram = radon(image, theta=theta, circle=circle,\n preserve_range=preserve_range)\n # The sinogram should be a straight, horizontal line\n sinogram_max = np.argmax(sinogram, axis=0)\n print(sinogram_max)\n assert np.std(sinogram_max) < 1e-6\n\n\[email protected](\"shape\", [(16, 16), (17, 17)])\[email protected](\"circle\", [False, True])\[email protected](\"dtype\", [np.float64, np.float32, np.uint8, bool])\[email protected](\"preserve_range\", [False, True])\ndef test_radon_center(shape, circle, dtype, preserve_range):\n check_radon_center(shape, circle, dtype, preserve_range)\n\n\[email protected](\"shape\", [(32, 16), (33, 17)])\[email protected](\"circle\", [False])\[email protected](\"dtype\", [np.float64, np.float32, np.uint8, bool])\[email protected](\"preserve_range\", [False, True])\ndef test_radon_center_rectangular(shape, circle, dtype, preserve_range):\n check_radon_center(shape, circle, dtype, preserve_range)\n\n\ndef check_iradon_center(size, theta, circle):\n debug = False\n # Create a test sinogram corresponding to a single projection\n # with a single non-zero pixel at the rotation center\n if circle:\n sinogram = np.zeros((size, 1), dtype=float)\n sinogram[size // 2, 0] = 1.\n else:\n diagonal = int(np.ceil(np.sqrt(2) * size))\n sinogram = np.zeros((diagonal, 1), dtype=float)\n sinogram[sinogram.shape[0] // 2, 0] = 1.\n maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)\n print('shape of generated sinogram', sinogram.shape)\n print('maximum in generated sinogram', maxpoint)\n # Compare reconstructions for theta=angle and theta=angle + 180;\n # these should be exactly equal\n reconstruction = iradon(sinogram, theta=[theta], circle=circle)\n reconstruction_opposite = iradon(sinogram, theta=[theta + 180],\n circle=circle)\n print('rms deviance:',\n np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))\n if debug:\n import matplotlib.pyplot as plt\n imkwargs = dict(cmap='gray', interpolation='nearest')\n plt.figure()\n plt.subplot(221)\n plt.imshow(sinogram, **imkwargs)\n plt.subplot(222)\n plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)\n plt.subplot(223)\n plt.imshow(reconstruction, **imkwargs)\n plt.subplot(224)\n plt.imshow(reconstruction_opposite, **imkwargs)\n plt.show()\n\n assert np.allclose(reconstruction, reconstruction_opposite)\n\n\nsizes_for_test_iradon_center = [16, 17]\nthetas_for_test_iradon_center = [0, 90]\ncircles_for_test_iradon_center = [False, True]\n\n\[email protected](\"size, theta, circle\",\n itertools.product(sizes_for_test_iradon_center,\n thetas_for_test_iradon_center,\n circles_for_test_iradon_center))\ndef test_iradon_center(size, theta, circle):\n check_iradon_center(size, theta, circle)\n\n\ndef check_radon_iradon(interpolation_type, filter_type):\n debug = False\n image = PHANTOM\n reconstructed = iradon(radon(image, circle=False), filter_name=filter_type,\n interpolation=interpolation_type, circle=False)\n delta = np.mean(np.abs(image - reconstructed))\n print('\\n\\tmean error:', delta)\n if debug:\n _debug_plot(image, reconstructed)\n if filter_type in ('ramp', 'shepp-logan'):\n if interpolation_type == 'nearest':\n allowed_delta = 0.03\n else:\n allowed_delta = 0.025\n else:\n allowed_delta = 0.05\n assert delta < allowed_delta\n\n\nfilter_types = [\"ramp\", \"shepp-logan\", \"cosine\", \"hamming\", \"hann\"]\ninterpolation_types = ['linear', 'nearest']\nradon_iradon_inputs = list(itertools.product(interpolation_types,\n filter_types))\n# cubic interpolation is slow; only run one test for it\nradon_iradon_inputs.append(('cubic', 'shepp-logan'))\n\n\[email protected](\"interpolation_type, filter_type\",\n radon_iradon_inputs)\ndef test_radon_iradon(interpolation_type, filter_type):\n check_radon_iradon(interpolation_type, filter_type)\n\n\[email protected](\"filter_type\", filter_types)\ndef test_iradon_new_signature(filter_type):\n image = PHANTOM\n sinogram = radon(image, circle=False)\n with pytest.warns(FutureWarning):\n assert np.array_equal(iradon(sinogram, filter=filter_type),\n iradon(sinogram, filter_name=filter_type))\n\n\ndef test_iradon_angles():\n \"\"\"\n Test with different number of projections\n \"\"\"\n size = 100\n # Synthetic data\n image = np.tri(size) + np.tri(size)[::-1]\n # Large number of projections: a good quality is expected\n nb_angles = 200\n theta = np.linspace(0, 180, nb_angles, endpoint=False)\n radon_image_200 = radon(image, theta=theta, circle=False)\n reconstructed = iradon(radon_image_200, circle=False)\n delta_200 = np.mean(abs(_rescale_intensity(image) -\n _rescale_intensity(reconstructed)))\n assert delta_200 < 0.03\n # Lower number of projections\n nb_angles = 80\n radon_image_80 = radon(image, theta=theta, circle=False)\n # Test whether the sum of all projections is approximately the same\n s = radon_image_80.sum(axis=0)\n assert np.allclose(s, s[0], rtol=0.01)\n reconstructed = iradon(radon_image_80, circle=False)\n delta_80 = np.mean(abs(image / np.max(image) -\n reconstructed / np.max(reconstructed)))\n # Loss of quality when the number of projections is reduced\n assert delta_80 > delta_200\n\n\ndef check_radon_iradon_minimal(shape, slices):\n debug = False\n theta = np.arange(180)\n image = np.zeros(shape, dtype=float)\n image[slices] = 1.\n sinogram = radon(image, theta, circle=False)\n reconstructed = iradon(sinogram, theta, circle=False)\n print('\\n\\tMaximum deviation:', np.max(np.abs(image - reconstructed)))\n if debug:\n _debug_plot(image, reconstructed, sinogram)\n if image.sum() == 1:\n assert (np.unravel_index(np.argmax(reconstructed), image.shape)\n == np.unravel_index(np.argmax(image), image.shape))\n\n\nshapes = [(3, 3), (4, 4), (5, 5)]\n\n\ndef generate_test_data_for_radon_iradon_minimal(shapes):\n def shape2coordinates(shape):\n c0, c1 = shape[0] // 2, shape[1] // 2\n coordinates = itertools.product((c0 - 1, c0, c0 + 1),\n (c1 - 1, c1, c1 + 1))\n return coordinates\n\n def shape2shapeandcoordinates(shape):\n return itertools.product([shape], shape2coordinates(shape))\n\n return itertools.chain.from_iterable([shape2shapeandcoordinates(shape)\n for shape in shapes])\n\n\[email protected](\"shape, coordinate\",\n generate_test_data_for_radon_iradon_minimal(shapes))\ndef test_radon_iradon_minimal(shape, coordinate):\n check_radon_iradon_minimal(shape, coordinate)\n\n\ndef test_reconstruct_with_wrong_angles():\n a = np.zeros((3, 3))\n p = radon(a, theta=[0, 1, 2], circle=False)\n iradon(p, theta=[0, 1, 2], circle=False)\n with testing.raises(ValueError):\n iradon(p, theta=[0, 1, 2, 3])\n\n\ndef _random_circle(shape):\n # Synthetic random data, zero outside reconstruction circle\n np.random.seed(98312871)\n image = np.random.rand(*shape)\n c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]\n r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)\n radius = min(shape) // 2\n image[r > radius] = 0.\n return image\n\n\ndef test_radon_circle():\n a = np.ones((10, 10))\n with expected_warnings(['reconstruction circle']):\n radon(a, circle=True)\n\n # Synthetic data, circular symmetry\n shape = (61, 79)\n c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]\n r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)\n radius = min(shape) // 2\n image = np.clip(radius - r, 0, np.inf)\n image = _rescale_intensity(image)\n angles = np.linspace(0, 180, min(shape), endpoint=False)\n sinogram = radon(image, theta=angles, circle=True)\n assert np.all(sinogram.std(axis=1) < 1e-2)\n\n # Synthetic data, random\n image = _random_circle(shape)\n sinogram = radon(image, theta=angles, circle=True)\n mass = sinogram.sum(axis=0)\n average_mass = mass.mean()\n relative_error = np.abs(mass - average_mass) / average_mass\n print(relative_error.max(), relative_error.mean())\n assert np.all(relative_error < 3.2e-3)\n\n\ndef check_sinogram_circle_to_square(size):\n from skimage.transform.radon_transform import _sinogram_circle_to_square\n image = _random_circle((size, size))\n theta = np.linspace(0., 180., size, False)\n sinogram_circle = radon(image, theta, circle=True)\n\n def argmax_shape(a):\n return np.unravel_index(np.argmax(a), a.shape)\n\n print('\\n\\targmax of circle:', argmax_shape(sinogram_circle))\n sinogram_square = radon(image, theta, circle=False)\n print('\\targmax of square:', argmax_shape(sinogram_square))\n sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)\n print('\\targmax of circle to square:',\n argmax_shape(sinogram_circle_to_square))\n error = abs(sinogram_square - sinogram_circle_to_square)\n print(np.mean(error), np.max(error))\n assert (argmax_shape(sinogram_square) ==\n argmax_shape(sinogram_circle_to_square))\n\n\[email protected](\"size\", (50, 51))\ndef test_sinogram_circle_to_square(size):\n check_sinogram_circle_to_square(size)\n\n\ndef check_radon_iradon_circle(interpolation, shape, output_size):\n # Forward and inverse radon on synthetic data\n image = _random_circle(shape)\n radius = min(shape) // 2\n sinogram_rectangle = radon(image, circle=False)\n reconstruction_rectangle = iradon(sinogram_rectangle,\n output_size=output_size,\n interpolation=interpolation,\n circle=False)\n sinogram_circle = radon(image, circle=True)\n reconstruction_circle = iradon(sinogram_circle,\n output_size=output_size,\n interpolation=interpolation,\n circle=True)\n # Crop rectangular reconstruction to match circle=True reconstruction\n width = reconstruction_circle.shape[0]\n excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))\n s = np.s_[excess:width + excess, excess:width + excess]\n reconstruction_rectangle = reconstruction_rectangle[s]\n # Find the reconstruction circle, set reconstruction to zero outside\n c0, c1 = np.ogrid[0:width, 0:width]\n r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)\n reconstruction_rectangle[r > radius] = 0.\n print(reconstruction_circle.shape)\n print(reconstruction_rectangle.shape)\n np.allclose(reconstruction_rectangle, reconstruction_circle)\n\n\n# if adding more shapes to test data, you might want to look at commit d0f2bac3f\nshapes_radon_iradon_circle = ((61, 79), )\ninterpolations = ('nearest', 'linear')\noutput_sizes = (None,\n min(shapes_radon_iradon_circle[0]),\n max(shapes_radon_iradon_circle[0]),\n 97)\n\n\[email protected](\"shape, interpolation, output_size\",\n itertools.product(shapes_radon_iradon_circle,\n interpolations, output_sizes))\ndef test_radon_iradon_circle(shape, interpolation, output_size):\n check_radon_iradon_circle(interpolation, shape, output_size)\n\n\ndef test_order_angles_golden_ratio():\n from skimage.transform.radon_transform import order_angles_golden_ratio\n np.random.seed(1231)\n lengths = [1, 4, 10, 180]\n for l in lengths:\n theta_ordered = np.linspace(0, 180, l, endpoint=False)\n theta_random = np.random.uniform(0, 180, l)\n for theta in (theta_random, theta_ordered):\n indices = [x for x in order_angles_golden_ratio(theta)]\n # no duplicate indices allowed\n assert len(indices) == len(set(indices))\n\n\n@test_parallel()\ndef test_iradon_sart():\n debug = False\n\n image = rescale(PHANTOM, 0.8, mode='reflect',\n multichannel=False, anti_aliasing=False)\n theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)\n theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)\n for theta, error_factor in ((theta_ordered, 1.),\n (theta_missing_wedge, 2.)):\n sinogram = radon(image, theta, circle=True)\n reconstructed = iradon_sart(sinogram, theta)\n\n if debug:\n from matplotlib import pyplot as plt\n plt.figure()\n plt.subplot(221)\n plt.imshow(image, interpolation='nearest')\n plt.subplot(222)\n plt.imshow(sinogram, interpolation='nearest')\n plt.subplot(223)\n plt.imshow(reconstructed, interpolation='nearest')\n plt.subplot(224)\n plt.imshow(reconstructed - image, interpolation='nearest')\n plt.show()\n\n delta = np.mean(np.abs(reconstructed - image))\n print('delta (1 iteration) =', delta)\n assert delta < 0.02 * error_factor\n reconstructed = iradon_sart(sinogram, theta, reconstructed)\n delta = np.mean(np.abs(reconstructed - image))\n print('delta (2 iterations) =', delta)\n assert delta < 0.014 * error_factor\n reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))\n delta = np.mean(np.abs(reconstructed - image))\n print('delta (1 iteration, clip) =', delta)\n assert delta < 0.018 * error_factor\n\n np.random.seed(1239867)\n shifts = np.random.uniform(-3, 3, sinogram.shape[1])\n x = np.arange(sinogram.shape[0])\n sinogram_shifted = np.vstack([np.interp(x + shifts[i], x,\n sinogram[:, i])\n for i in range(sinogram.shape[1])]).T\n reconstructed = iradon_sart(sinogram_shifted, theta,\n projection_shifts=shifts)\n if debug:\n from matplotlib import pyplot as plt\n plt.figure()\n plt.subplot(221)\n plt.imshow(image, interpolation='nearest')\n plt.subplot(222)\n plt.imshow(sinogram_shifted, interpolation='nearest')\n plt.subplot(223)\n plt.imshow(reconstructed, interpolation='nearest')\n plt.subplot(224)\n plt.imshow(reconstructed - image, interpolation='nearest')\n plt.show()\n\n delta = np.mean(np.abs(reconstructed - image))\n print('delta (1 iteration, shifted sinogram) =', delta)\n assert delta < 0.022 * error_factor\n\n\[email protected](\"preserve_range\", [True, False])\ndef test_iradon_dtype(preserve_range):\n sinogram = np.zeros((16, 1), dtype=int)\n sinogram[8, 0] = 1.\n sinogram64 = sinogram.astype('float64')\n sinogram32 = sinogram.astype('float32')\n\n assert iradon(sinogram, theta=[0],\n preserve_range=preserve_range).dtype == 'float64'\n assert iradon(sinogram64, theta=[0],\n preserve_range=preserve_range).dtype == sinogram64.dtype\n assert iradon(sinogram32, theta=[0],\n preserve_range=preserve_range).dtype == sinogram32.dtype\n\n\ndef test_radon_dtype():\n img = convert_to_float(PHANTOM, False)\n img32 = img.astype(np.float32)\n\n assert radon(img).dtype == img.dtype\n assert radon(img32).dtype == img32.dtype\n\n\[email protected](\"dtype\", [np.float32, np.float64])\ndef test_iradon_sart_dtype(dtype):\n sinogram = np.zeros((16, 1), dtype=int)\n sinogram[8, 0] = 1.\n sinogram64 = sinogram.astype('float64')\n sinogram32 = sinogram.astype('float32')\n\n with expected_warnings(['Input data is cast to float']):\n assert iradon_sart(sinogram, theta=[0]).dtype == 'float64'\n\n assert iradon_sart(sinogram64, theta=[0]).dtype == sinogram64.dtype\n assert iradon_sart(sinogram32, theta=[0]).dtype == sinogram32.dtype\n\n assert iradon_sart(sinogram, theta=[0], dtype=dtype).dtype == dtype\n assert iradon_sart(sinogram32, theta=[0], dtype=dtype).dtype == dtype\n assert iradon_sart(sinogram64, theta=[0], dtype=dtype).dtype == dtype\n\n\ndef test_iradon_sart_wrong_dtype():\n sinogram = np.zeros((16, 1))\n\n with testing.raises(ValueError):\n iradon_sart(sinogram, dtype=int)\n",
"from itertools import product\n\nimport pytest\nimport numpy as np\nfrom skimage.segmentation import slic\n\nfrom skimage._shared import testing\nfrom skimage._shared.testing import test_parallel, assert_equal\n\n\n@test_parallel()\ndef test_color_2d():\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21, 3))\n img[:10, :10, 0] = 1\n img[10:, :10, 1] = 1\n img[10:, 10:, 2] = 1\n img += 0.01 * rnd.normal(size=img.shape)\n img[img > 1] = 1\n img[img < 0] = 0\n seg = slic(img, n_segments=4, sigma=0, enforce_connectivity=False,\n start_label=0)\n\n # we expect 4 segments\n assert_equal(len(np.unique(seg)), 4)\n assert_equal(seg.shape, img.shape[:-1])\n assert_equal(seg[:10, :10], 0)\n assert_equal(seg[10:, :10], 2)\n assert_equal(seg[:10, 10:], 1)\n assert_equal(seg[10:, 10:], 3)\n\n\ndef test_multichannel_2d():\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 20, 8))\n img[:10, :10, 0:2] = 1\n img[:10, 10:, 2:4] = 1\n img[10:, :10, 4:6] = 1\n img[10:, 10:, 6:8] = 1\n img += 0.01 * rnd.normal(size=img.shape)\n img = np.clip(img, 0, 1, out=img)\n seg = slic(img, n_segments=4, enforce_connectivity=False, start_label=0)\n\n # we expect 4 segments\n assert_equal(len(np.unique(seg)), 4)\n assert_equal(seg.shape, img.shape[:-1])\n assert_equal(seg[:10, :10], 0)\n assert_equal(seg[10:, :10], 2)\n assert_equal(seg[:10, 10:], 1)\n assert_equal(seg[10:, 10:], 3)\n\n\ndef test_gray_2d():\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21))\n img[:10, :10] = 0.33\n img[10:, :10] = 0.67\n img[10:, 10:] = 1.00\n img += 0.0033 * rnd.normal(size=img.shape)\n img[img > 1] = 1\n img[img < 0] = 0\n seg = slic(img, sigma=0, n_segments=4, compactness=1,\n multichannel=False, convert2lab=False, start_label=0)\n\n assert_equal(len(np.unique(seg)), 4)\n assert_equal(seg.shape, img.shape)\n assert_equal(seg[:10, :10], 0)\n assert_equal(seg[10:, :10], 2)\n assert_equal(seg[:10, 10:], 1)\n assert_equal(seg[10:, 10:], 3)\n\n\ndef test_color_3d():\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21, 22, 3))\n slices = []\n for dim_size in img.shape[:-1]:\n midpoint = dim_size // 2\n slices.append((slice(None, midpoint), slice(midpoint, None)))\n slices = list(product(*slices))\n colors = list(product(*(([0, 1],) * 3)))\n for s, c in zip(slices, colors):\n img[s] = c\n img += 0.01 * rnd.normal(size=img.shape)\n img[img > 1] = 1\n img[img < 0] = 0\n seg = slic(img, sigma=0, n_segments=8, start_label=0)\n\n assert_equal(len(np.unique(seg)), 8)\n for s, c in zip(slices, range(8)):\n assert_equal(seg[s], c)\n\n\ndef test_gray_3d():\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21, 22))\n slices = []\n for dim_size in img.shape:\n midpoint = dim_size // 2\n slices.append((slice(None, midpoint), slice(midpoint, None)))\n slices = list(product(*slices))\n shades = np.arange(0, 1.000001, 1.0 / 7)\n for s, sh in zip(slices, shades):\n img[s] = sh\n img += 0.001 * rnd.normal(size=img.shape)\n img[img > 1] = 1\n img[img < 0] = 0\n seg = slic(img, sigma=0, n_segments=8, compactness=1,\n multichannel=False, convert2lab=False, start_label=0)\n\n assert_equal(len(np.unique(seg)), 8)\n for s, c in zip(slices, range(8)):\n assert_equal(seg[s], c)\n\n\ndef test_list_sigma():\n rnd = np.random.RandomState(0)\n img = np.array([[1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1]], float)\n img += 0.1 * rnd.normal(size=img.shape)\n result_sigma = np.array([[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]], int)\n seg_sigma = slic(img, n_segments=2, sigma=[1, 50, 1],\n multichannel=False, start_label=0)\n assert_equal(seg_sigma, result_sigma)\n\n\ndef test_spacing():\n rnd = np.random.RandomState(0)\n img = np.array([[1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0]], float)\n result_non_spaced = np.array([[0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]], int)\n result_spaced = np.array([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1]], int)\n img += 0.1 * rnd.normal(size=img.shape)\n seg_non_spaced = slic(img, n_segments=2, sigma=0, multichannel=False,\n compactness=1.0, start_label=0)\n seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[1, 500, 1],\n compactness=1.0, multichannel=False, start_label=0)\n assert_equal(seg_non_spaced, result_non_spaced)\n assert_equal(seg_spaced, result_spaced)\n\n\ndef test_invalid_lab_conversion():\n img = np.array([[1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0]], float) + 1\n with testing.raises(ValueError):\n slic(img, multichannel=True, convert2lab=True, start_label=0)\n\n\ndef test_enforce_connectivity():\n img = np.array([[0, 0, 0, 1, 1, 1],\n [1, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 0]], float)\n\n segments_connected = slic(img, 2, compactness=0.0001,\n enforce_connectivity=True,\n convert2lab=False, start_label=0)\n segments_disconnected = slic(img, 2, compactness=0.0001,\n enforce_connectivity=False,\n convert2lab=False, start_label=0)\n\n # Make sure nothing fatal occurs (e.g. buffer overflow) at low values of\n # max_size_factor\n segments_connected_low_max = slic(img, 2, compactness=0.0001,\n enforce_connectivity=True,\n convert2lab=False,\n max_size_factor=0.8,\n start_label=0)\n\n result_connected = np.array([[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]], float)\n\n result_disconnected = np.array([[0, 0, 0, 1, 1, 1],\n [1, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 0]], float)\n\n assert_equal(segments_connected, result_connected)\n assert_equal(segments_disconnected, result_disconnected)\n assert_equal(segments_connected_low_max, result_connected)\n\n\ndef test_slic_zero():\n # Same as test_color_2d but with slic_zero=True\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21, 3))\n img[:10, :10, 0] = 1\n img[10:, :10, 1] = 1\n img[10:, 10:, 2] = 1\n img += 0.01 * rnd.normal(size=img.shape)\n img[img > 1] = 1\n img[img < 0] = 0\n seg = slic(img, n_segments=4, sigma=0, slic_zero=True, start_label=0)\n\n # we expect 4 segments\n assert_equal(len(np.unique(seg)), 4)\n assert_equal(seg.shape, img.shape[:-1])\n assert_equal(seg[:10, :10], 0)\n assert_equal(seg[10:, :10], 2)\n assert_equal(seg[:10, 10:], 1)\n assert_equal(seg[10:, 10:], 3)\n\n\ndef test_more_segments_than_pixels():\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21))\n img[:10, :10] = 0.33\n img[10:, :10] = 0.67\n img[10:, 10:] = 1.00\n img += 0.0033 * rnd.normal(size=img.shape)\n img[img > 1] = 1\n img[img < 0] = 0\n seg = slic(img, sigma=0, n_segments=500, compactness=1,\n multichannel=False, convert2lab=False, start_label=0)\n assert np.all(seg.ravel() == np.arange(seg.size))\n\n\ndef test_color_2d_mask():\n rnd = np.random.RandomState(0)\n msk = np.zeros((20, 21))\n msk[2:-2, 2:-2] = 1\n img = np.zeros((20, 21, 3))\n img[:10, :10, 0] = 1\n img[10:, :10, 1] = 1\n img[10:, 10:, 2] = 1\n img += 0.01 * rnd.normal(size=img.shape)\n np.clip(img, 0, 1, out=img)\n seg = slic(img, n_segments=4, sigma=0, enforce_connectivity=False,\n mask=msk)\n\n # we expect 4 segments + masked area\n assert_equal(len(np.unique(seg)), 5)\n assert_equal(seg.shape, img.shape[:-1])\n # segments\n assert_equal(seg[2:10, 2:10], 1)\n assert_equal(seg[10:-2, 2:10], 4)\n assert_equal(seg[2:10, 10:-2], 2)\n assert_equal(seg[10:-2, 10:-2], 3)\n # non masked area\n assert_equal(seg[:2, :], 0)\n assert_equal(seg[-2:, :], 0)\n assert_equal(seg[:, :2], 0)\n assert_equal(seg[:, -2:], 0)\n\n\ndef test_multichannel_2d_mask():\n rnd = np.random.RandomState(0)\n msk = np.zeros((20, 20))\n msk[2:-2, 2:-2] = 1\n img = np.zeros((20, 20, 8))\n img[:10, :10, 0:2] = 1\n img[:10, 10:, 2:4] = 1\n img[10:, :10, 4:6] = 1\n img[10:, 10:, 6:8] = 1\n img += 0.01 * rnd.normal(size=img.shape)\n np.clip(img, 0, 1, out=img)\n seg = slic(img, n_segments=4, enforce_connectivity=False,\n mask=msk)\n\n # we expect 4 segments + masked area\n assert_equal(len(np.unique(seg)), 5)\n assert_equal(seg.shape, img.shape[:-1])\n # segments\n assert_equal(seg[2:10, 2:10], 2)\n assert_equal(seg[2:10, 10:-2], 1)\n assert_equal(seg[10:-2, 2:10], 4)\n assert_equal(seg[10:-2, 10:-2], 3)\n # non masked area\n assert_equal(seg[:2, :], 0)\n assert_equal(seg[-2:, :], 0)\n assert_equal(seg[:, :2], 0)\n assert_equal(seg[:, -2:], 0)\n\n\ndef test_gray_2d_mask():\n rnd = np.random.RandomState(0)\n msk = np.zeros((20, 21))\n msk[2:-2, 2:-2] = 1\n img = np.zeros((20, 21))\n img[:10, :10] = 0.33\n img[10:, :10] = 0.67\n img[10:, 10:] = 1.00\n img += 0.0033 * rnd.normal(size=img.shape)\n np.clip(img, 0, 1, out=img)\n seg = slic(img, sigma=0, n_segments=4, compactness=1,\n multichannel=False, convert2lab=False, mask=msk)\n\n assert_equal(len(np.unique(seg)), 5)\n assert_equal(seg.shape, img.shape)\n # segments\n assert_equal(seg[2:10, 2:10], 1)\n assert_equal(seg[2:10, 10:-2], 2)\n assert_equal(seg[10:-2, 2:10], 3)\n assert_equal(seg[10:-2, 10:-2], 4)\n # non masked area\n assert_equal(seg[:2, :], 0)\n assert_equal(seg[-2:, :], 0)\n assert_equal(seg[:, :2], 0)\n assert_equal(seg[:, -2:], 0)\n\n\ndef test_list_sigma_mask():\n rnd = np.random.RandomState(0)\n msk = np.zeros((2, 6))\n msk[:, 1:-1] = 1\n img = np.array([[1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1]], float)\n img += 0.1 * rnd.normal(size=img.shape)\n result_sigma = np.array([[0, 1, 1, 2, 2, 0],\n [0, 1, 1, 2, 2, 0]], int)\n seg_sigma = slic(img, n_segments=2, sigma=[1, 50, 1],\n multichannel=False, mask=msk)\n assert_equal(seg_sigma, result_sigma)\n\n\ndef test_spacing_mask():\n rnd = np.random.RandomState(0)\n msk = np.zeros((2, 5))\n msk[:, 1:-1] = 1\n img = np.array([[1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0]], float)\n result_non_spaced = np.array([[0, 1, 1, 2, 0],\n [0, 1, 2, 2, 0]], int)\n result_spaced = np.array([[0, 1, 1, 1, 0],\n [0, 2, 2, 2, 0]], int)\n img += 0.1 * rnd.normal(size=img.shape)\n seg_non_spaced = slic(img, n_segments=2, sigma=0, multichannel=False,\n compactness=1.0, mask=msk)\n seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[1, 50, 1],\n compactness=1.0, multichannel=False, mask=msk)\n assert_equal(seg_non_spaced, result_non_spaced)\n assert_equal(seg_spaced, result_spaced)\n\n\ndef test_enforce_connectivity_mask():\n msk = np.zeros((3, 6))\n msk[:, 1:-1] = 1\n img = np.array([[0, 0, 0, 1, 1, 1],\n [1, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 0]], float)\n\n segments_connected = slic(img, 2, compactness=0.0001,\n enforce_connectivity=True,\n convert2lab=False, mask=msk)\n segments_disconnected = slic(img, 2, compactness=0.0001,\n enforce_connectivity=False,\n convert2lab=False, mask=msk)\n\n # Make sure nothing fatal occurs (e.g. buffer overflow) at low values of\n # max_size_factor\n segments_connected_low_max = slic(img, 2, compactness=0.0001,\n enforce_connectivity=True,\n convert2lab=False,\n max_size_factor=0.8, mask=msk)\n\n result_connected = np.array([[0, 1, 1, 2, 2, 0],\n [0, 1, 1, 2, 2, 0],\n [0, 1, 1, 2, 2, 0]], float)\n\n result_disconnected = np.array([[0, 1, 1, 2, 2, 0],\n [0, 1, 1, 2, 2, 0],\n [0, 1, 1, 2, 2, 0]], float)\n\n assert_equal(segments_connected, result_connected)\n assert_equal(segments_disconnected, result_disconnected)\n assert_equal(segments_connected_low_max, result_connected)\n\n\ndef test_slic_zero_mask():\n\n rnd = np.random.RandomState(0)\n msk = np.zeros((20, 21))\n msk[2:-2, 2:-2] = 1\n img = np.zeros((20, 21, 3))\n img[:10, :10, 0] = 1\n img[10:, :10, 1] = 1\n img[10:, 10:, 2] = 1\n img += 0.01 * rnd.normal(size=img.shape)\n np.clip(img, 0, 1, out=img)\n seg = slic(img, n_segments=4, sigma=0, slic_zero=True,\n mask=msk)\n\n # we expect 4 segments + masked area\n assert_equal(len(np.unique(seg)), 5)\n assert_equal(seg.shape, img.shape[:-1])\n # segments\n assert_equal(seg[2:10, 2:10], 1)\n assert_equal(seg[2:10, 10:-2], 2)\n assert_equal(seg[10:-2, 2:10], 3)\n assert_equal(seg[10:-2, 10:-2], 4)\n # non masked area\n assert_equal(seg[:2, :], 0)\n assert_equal(seg[-2:, :], 0)\n assert_equal(seg[:, :2], 0)\n assert_equal(seg[:, -2:], 0)\n\n\ndef test_more_segments_than_pixels_mask():\n rnd = np.random.RandomState(0)\n msk = np.zeros((20, 21))\n msk[2:-2, 2:-2] = 1\n img = np.zeros((20, 21))\n img[:10, :10] = 0.33\n img[10:, :10] = 0.67\n img[10:, 10:] = 1.00\n img += 0.0033 * rnd.normal(size=img.shape)\n np.clip(img, 0, 1, out=img)\n seg = slic(img, sigma=0, n_segments=500, compactness=1,\n multichannel=False, convert2lab=False, mask=msk)\n\n expected = np.arange(seg[2:-2, 2:-2].size) + 1\n assert np.all(seg[2:-2, 2:-2].ravel() == expected)\n\n\ndef test_color_3d_mask():\n\n msk = np.zeros((20, 21, 22))\n msk[2:-2, 2:-2, 2:-2] = 1\n\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21, 22, 3))\n slices = []\n for dim_size in msk.shape:\n midpoint = dim_size // 2\n slices.append((slice(None, midpoint), slice(midpoint, None)))\n slices = list(product(*slices))\n colors = list(product(*(([0, 1],) * 3)))\n for s, c in zip(slices, colors):\n img[s] = c\n img += 0.01 * rnd.normal(size=img.shape)\n np.clip(img, 0, 1, out=img)\n\n seg = slic(img, sigma=0, n_segments=8, mask=msk)\n\n # we expect 8 segments + masked area\n assert_equal(len(np.unique(seg)), 9)\n for s, c in zip(slices, range(1, 9)):\n assert_equal(seg[s][2:-2, 2:-2, 2:-2], c)\n\n\ndef test_gray_3d_mask():\n\n msk = np.zeros((20, 21, 22))\n msk[2:-2, 2:-2, 2:-2] = 1\n\n rnd = np.random.RandomState(0)\n img = np.zeros((20, 21, 22))\n slices = []\n for dim_size in img.shape:\n midpoint = dim_size // 2\n slices.append((slice(None, midpoint), slice(midpoint, None)))\n slices = list(product(*slices))\n shades = np.linspace(0, 1, 8)\n for s, sh in zip(slices, shades):\n img[s] = sh\n img += 0.001 * rnd.normal(size=img.shape)\n np.clip(img, 0, 1, out=img)\n seg = slic(img, sigma=0, n_segments=8, multichannel=False,\n convert2lab=False, mask=msk)\n\n # we expect 8 segments + masked area\n assert_equal(len(np.unique(seg)), 9)\n for s, c in zip(slices, range(1, 9)):\n assert_equal(seg[s][2:-2, 2:-2, 2:-2], c)\n\n\[email protected](\"dtype\", ['float32', 'float64', 'uint8', 'int'])\ndef test_dtype_support(dtype):\n img = np.random.rand(28, 28).astype(dtype)\n\n # Simply run the function to assert that it runs without error\n slic(img, start_label=1)\n",
"from collections import namedtuple\nimport datetime\nfrom decimal import Decimal\nimport io\nfrom itertools import product\nimport platform\nfrom types import SimpleNamespace\ntry:\n from contextlib import nullcontext\nexcept ImportError:\n from contextlib import ExitStack as nullcontext # Py3.6.\n\nimport dateutil.tz\n\nimport numpy as np\nfrom numpy import ma\nfrom cycler import cycler\nimport pytest\n\nimport matplotlib\nimport matplotlib as mpl\nfrom matplotlib.testing.decorators import (\n image_comparison, check_figures_equal, remove_ticks_and_titles)\nimport matplotlib.colors as mcolors\nimport matplotlib.dates as mdates\nfrom matplotlib.figure import Figure\nimport matplotlib.font_manager as mfont_manager\nimport matplotlib.markers as mmarkers\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport matplotlib.transforms as mtransforms\nfrom numpy.testing import (\n assert_allclose, assert_array_equal, assert_array_almost_equal)\nfrom matplotlib import rc_context\nfrom matplotlib.cbook import MatplotlibDeprecationWarning\n\n# Note: Some test cases are run twice: once normally and once with labeled data\n# These two must be defined in the same test function or need to have\n# different baseline images to prevent race conditions when pytest runs\n# the tests with multiple threads.\n\n\ndef test_get_labels():\n fig, ax = plt.subplots()\n ax.set_xlabel('x label')\n ax.set_ylabel('y label')\n assert ax.get_xlabel() == 'x label'\n assert ax.get_ylabel() == 'y label'\n\n\n@check_figures_equal()\ndef test_label_loc_vertical(fig_test, fig_ref):\n ax = fig_test.subplots()\n sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')\n ax.legend()\n ax.set_ylabel('Y Label', loc='top')\n ax.set_xlabel('X Label', loc='right')\n cbar = fig_test.colorbar(sc)\n cbar.set_label(\"Z Label\", loc='top')\n\n ax = fig_ref.subplots()\n sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')\n ax.legend()\n ax.set_ylabel('Y Label', y=1, ha='right')\n ax.set_xlabel('X Label', x=1, ha='right')\n cbar = fig_ref.colorbar(sc)\n cbar.set_label(\"Z Label\", y=1, ha='right')\n\n\n@check_figures_equal()\ndef test_label_loc_horizontal(fig_test, fig_ref):\n ax = fig_test.subplots()\n sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')\n ax.legend()\n ax.set_ylabel('Y Label', loc='bottom')\n ax.set_xlabel('X Label', loc='left')\n cbar = fig_test.colorbar(sc, orientation='horizontal')\n cbar.set_label(\"Z Label\", loc='left')\n\n ax = fig_ref.subplots()\n sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')\n ax.legend()\n ax.set_ylabel('Y Label', y=0, ha='left')\n ax.set_xlabel('X Label', x=0, ha='left')\n cbar = fig_ref.colorbar(sc, orientation='horizontal')\n cbar.set_label(\"Z Label\", x=0, ha='left')\n\n\n@check_figures_equal()\ndef test_label_loc_rc(fig_test, fig_ref):\n with matplotlib.rc_context({\"xaxis.labellocation\": \"right\",\n \"yaxis.labellocation\": \"top\"}):\n ax = fig_test.subplots()\n sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')\n ax.legend()\n ax.set_ylabel('Y Label')\n ax.set_xlabel('X Label')\n cbar = fig_test.colorbar(sc, orientation='horizontal')\n cbar.set_label(\"Z Label\")\n\n ax = fig_ref.subplots()\n sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')\n ax.legend()\n ax.set_ylabel('Y Label', y=1, ha='right')\n ax.set_xlabel('X Label', x=1, ha='right')\n cbar = fig_ref.colorbar(sc, orientation='horizontal')\n cbar.set_label(\"Z Label\", x=1, ha='right')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_acorr(fig_test, fig_ref):\n np.random.seed(19680801)\n Nx = 512\n x = np.random.normal(0, 1, Nx).cumsum()\n maxlags = Nx-1\n\n ax_test = fig_test.subplots()\n ax_test.acorr(x, maxlags=maxlags)\n\n ax_ref = fig_ref.subplots()\n # Normalized autocorrelation\n norm_auto_corr = np.correlate(x, x, mode=\"full\")/np.dot(x, x)\n lags = np.arange(-maxlags, maxlags+1)\n norm_auto_corr = norm_auto_corr[Nx-1-maxlags:Nx+maxlags]\n ax_ref.vlines(lags, [0], norm_auto_corr)\n ax_ref.axhline(y=0, xmin=0, xmax=1)\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_spy(fig_test, fig_ref):\n np.random.seed(19680801)\n a = np.ones(32 * 32)\n a[:16 * 32] = 0\n np.random.shuffle(a)\n a = a.reshape((32, 32))\n\n axs_test = fig_test.subplots(2)\n axs_test[0].spy(a)\n axs_test[1].spy(a, marker=\".\", origin=\"lower\")\n\n axs_ref = fig_ref.subplots(2)\n axs_ref[0].imshow(a, cmap=\"gray_r\", interpolation=\"nearest\")\n axs_ref[0].xaxis.tick_top()\n axs_ref[1].plot(*np.nonzero(a)[::-1], \".\", markersize=10)\n axs_ref[1].set(\n aspect=1, xlim=axs_ref[0].get_xlim(), ylim=axs_ref[0].get_ylim()[::-1])\n for ax in axs_ref:\n ax.xaxis.set_ticks_position(\"both\")\n\n\ndef test_spy_invalid_kwargs():\n fig, ax = plt.subplots()\n for unsupported_kw in [{'interpolation': 'nearest'},\n {'marker': 'o', 'linestyle': 'solid'}]:\n with pytest.raises(TypeError):\n ax.spy(np.eye(3, 3), **unsupported_kw)\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_matshow(fig_test, fig_ref):\n mpl.style.use(\"mpl20\")\n a = np.random.rand(32, 32)\n fig_test.add_subplot().matshow(a)\n ax_ref = fig_ref.add_subplot()\n ax_ref.imshow(a)\n ax_ref.xaxis.tick_top()\n ax_ref.xaxis.set_ticks_position('both')\n\n\n@image_comparison(['formatter_ticker_001',\n 'formatter_ticker_002',\n 'formatter_ticker_003',\n 'formatter_ticker_004',\n 'formatter_ticker_005',\n ])\ndef test_formatter_ticker():\n import matplotlib.testing.jpl_units as units\n units.register()\n\n # This should affect the tick size. (Tests issue #543)\n matplotlib.rcParams['lines.markeredgewidth'] = 30\n\n # This essentially test to see if user specified labels get overwritten\n # by the auto labeler functionality of the axes.\n xdata = [x*units.sec for x in range(10)]\n ydata1 = [(1.5*y - 0.5)*units.km for y in range(10)]\n ydata2 = [(1.75*y - 1.0)*units.km for y in range(10)]\n\n ax = plt.figure().subplots()\n ax.set_xlabel(\"x-label 001\")\n\n ax = plt.figure().subplots()\n ax.set_xlabel(\"x-label 001\")\n ax.plot(xdata, ydata1, color='blue', xunits=\"sec\")\n\n ax = plt.figure().subplots()\n ax.set_xlabel(\"x-label 001\")\n ax.plot(xdata, ydata1, color='blue', xunits=\"sec\")\n ax.set_xlabel(\"x-label 003\")\n\n ax = plt.figure().subplots()\n ax.plot(xdata, ydata1, color='blue', xunits=\"sec\")\n ax.plot(xdata, ydata2, color='green', xunits=\"hour\")\n ax.set_xlabel(\"x-label 004\")\n\n # See SF bug 2846058\n # https://sourceforge.net/tracker/?func=detail&aid=2846058&group_id=80706&atid=560720\n ax = plt.figure().subplots()\n ax.plot(xdata, ydata1, color='blue', xunits=\"sec\")\n ax.plot(xdata, ydata2, color='green', xunits=\"hour\")\n ax.set_xlabel(\"x-label 005\")\n ax.autoscale_view()\n\n\ndef test_funcformatter_auto_formatter():\n def _formfunc(x, pos):\n return ''\n\n ax = plt.figure().subplots()\n\n assert ax.xaxis.isDefault_majfmt\n assert ax.xaxis.isDefault_minfmt\n assert ax.yaxis.isDefault_majfmt\n assert ax.yaxis.isDefault_minfmt\n\n ax.xaxis.set_major_formatter(_formfunc)\n\n assert not ax.xaxis.isDefault_majfmt\n assert ax.xaxis.isDefault_minfmt\n assert ax.yaxis.isDefault_majfmt\n assert ax.yaxis.isDefault_minfmt\n\n targ_funcformatter = mticker.FuncFormatter(_formfunc)\n\n assert isinstance(ax.xaxis.get_major_formatter(),\n mticker.FuncFormatter)\n\n assert ax.xaxis.get_major_formatter().func == targ_funcformatter.func\n\n\ndef test_strmethodformatter_auto_formatter():\n formstr = '{x}_{pos}'\n\n ax = plt.figure().subplots()\n\n assert ax.xaxis.isDefault_majfmt\n assert ax.xaxis.isDefault_minfmt\n assert ax.yaxis.isDefault_majfmt\n assert ax.yaxis.isDefault_minfmt\n\n ax.yaxis.set_minor_formatter(formstr)\n\n assert ax.xaxis.isDefault_majfmt\n assert ax.xaxis.isDefault_minfmt\n assert ax.yaxis.isDefault_majfmt\n assert not ax.yaxis.isDefault_minfmt\n\n targ_strformatter = mticker.StrMethodFormatter(formstr)\n\n assert isinstance(ax.yaxis.get_minor_formatter(),\n mticker.StrMethodFormatter)\n\n assert ax.yaxis.get_minor_formatter().fmt == targ_strformatter.fmt\n\n\n@image_comparison([\"twin_axis_locators_formatters\"])\ndef test_twin_axis_locators_formatters():\n vals = np.linspace(0, 1, num=5, endpoint=True)\n locs = np.sin(np.pi * vals / 2.0)\n\n majl = plt.FixedLocator(locs)\n minl = plt.FixedLocator([0.1, 0.2, 0.3])\n\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.plot([0.1, 100], [0, 1])\n ax1.yaxis.set_major_locator(majl)\n ax1.yaxis.set_minor_locator(minl)\n ax1.yaxis.set_major_formatter(plt.FormatStrFormatter('%08.2lf'))\n ax1.yaxis.set_minor_formatter(plt.FixedFormatter(['tricks', 'mind',\n 'jedi']))\n\n ax1.xaxis.set_major_locator(plt.LinearLocator())\n ax1.xaxis.set_minor_locator(plt.FixedLocator([15, 35, 55, 75]))\n ax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%05.2lf'))\n ax1.xaxis.set_minor_formatter(plt.FixedFormatter(['c', '3', 'p', 'o']))\n ax1.twiny()\n ax1.twinx()\n\n\ndef test_twinx_cla():\n fig, ax = plt.subplots()\n ax2 = ax.twinx()\n ax3 = ax2.twiny()\n plt.draw()\n assert not ax2.xaxis.get_visible()\n assert not ax2.patch.get_visible()\n ax2.cla()\n ax3.cla()\n\n assert not ax2.xaxis.get_visible()\n assert not ax2.patch.get_visible()\n assert ax2.yaxis.get_visible()\n\n assert ax3.xaxis.get_visible()\n assert not ax3.patch.get_visible()\n assert not ax3.yaxis.get_visible()\n\n assert ax.xaxis.get_visible()\n assert ax.patch.get_visible()\n assert ax.yaxis.get_visible()\n\n\[email protected]('twin', ('x', 'y'))\n@check_figures_equal(extensions=['png'], tol=0.19)\ndef test_twin_logscale(fig_test, fig_ref, twin):\n twin_func = f'twin{twin}' # test twinx or twiny\n set_scale = f'set_{twin}scale'\n x = np.arange(1, 100)\n\n # Change scale after twinning.\n ax_test = fig_test.add_subplot(2, 1, 1)\n ax_twin = getattr(ax_test, twin_func)()\n getattr(ax_test, set_scale)('log')\n ax_twin.plot(x, x)\n\n # Twin after changing scale.\n ax_test = fig_test.add_subplot(2, 1, 2)\n getattr(ax_test, set_scale)('log')\n ax_twin = getattr(ax_test, twin_func)()\n ax_twin.plot(x, x)\n\n for i in [1, 2]:\n ax_ref = fig_ref.add_subplot(2, 1, i)\n getattr(ax_ref, set_scale)('log')\n ax_ref.plot(x, x)\n\n # This is a hack because twinned Axes double-draw the frame.\n # Remove this when that is fixed.\n Path = matplotlib.path.Path\n fig_ref.add_artist(\n matplotlib.patches.PathPatch(\n Path([[0, 0], [0, 1],\n [0, 1], [1, 1],\n [1, 1], [1, 0],\n [1, 0], [0, 0]],\n [Path.MOVETO, Path.LINETO] * 4),\n transform=ax_ref.transAxes,\n facecolor='none',\n edgecolor=mpl.rcParams['axes.edgecolor'],\n linewidth=mpl.rcParams['axes.linewidth'],\n capstyle='projecting'))\n\n remove_ticks_and_titles(fig_test)\n remove_ticks_and_titles(fig_ref)\n\n\n@image_comparison(['twin_autoscale.png'])\ndef test_twinx_axis_scales():\n x = np.array([0, 0.5, 1])\n y = 0.5 * x\n x2 = np.array([0, 1, 2])\n y2 = 2 * x2\n\n fig = plt.figure()\n ax = fig.add_axes((0, 0, 1, 1), autoscalex_on=False, autoscaley_on=False)\n ax.plot(x, y, color='blue', lw=10)\n\n ax2 = plt.twinx(ax)\n ax2.plot(x2, y2, 'r--', lw=5)\n\n ax.margins(0, 0)\n ax2.margins(0, 0)\n\n\ndef test_twin_inherit_autoscale_setting():\n fig, ax = plt.subplots()\n ax_x_on = ax.twinx()\n ax.set_autoscalex_on(False)\n ax_x_off = ax.twinx()\n\n assert ax_x_on.get_autoscalex_on()\n assert not ax_x_off.get_autoscalex_on()\n\n ax_y_on = ax.twiny()\n ax.set_autoscaley_on(False)\n ax_y_off = ax.twiny()\n\n assert ax_y_on.get_autoscaley_on()\n assert not ax_y_off.get_autoscaley_on()\n\n\ndef test_inverted_cla():\n # GitHub PR #5450. Setting autoscale should reset\n # axes to be non-inverted.\n # plotting an image, then 1d graph, axis is now down\n fig = plt.figure(0)\n ax = fig.gca()\n # 1. test that a new axis is not inverted per default\n assert not ax.xaxis_inverted()\n assert not ax.yaxis_inverted()\n img = np.random.random((100, 100))\n ax.imshow(img)\n # 2. test that a image axis is inverted\n assert not ax.xaxis_inverted()\n assert ax.yaxis_inverted()\n # 3. test that clearing and plotting a line, axes are\n # not inverted\n ax.cla()\n x = np.linspace(0, 2*np.pi, 100)\n ax.plot(x, np.cos(x))\n assert not ax.xaxis_inverted()\n assert not ax.yaxis_inverted()\n\n # 4. autoscaling should not bring back axes to normal\n ax.cla()\n ax.imshow(img)\n plt.autoscale()\n assert not ax.xaxis_inverted()\n assert ax.yaxis_inverted()\n\n # 5. two shared axes. Inverting the master axis should invert the shared\n # axes; clearing the master axis should bring axes in shared\n # axes back to normal.\n ax0 = plt.subplot(211)\n ax1 = plt.subplot(212, sharey=ax0)\n ax0.yaxis.set_inverted(True)\n assert ax1.yaxis_inverted()\n ax1.plot(x, np.cos(x))\n ax0.cla()\n assert not ax1.yaxis_inverted()\n ax1.cla()\n # 6. clearing the nonmaster should not touch limits\n ax0.imshow(img)\n ax1.plot(x, np.cos(x))\n ax1.cla()\n assert ax.yaxis_inverted()\n\n # clean up\n plt.close(fig)\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_minorticks_on_rcParams_both(fig_test, fig_ref):\n with matplotlib.rc_context({\"xtick.minor.visible\": True,\n \"ytick.minor.visible\": True}):\n ax_test = fig_test.subplots()\n ax_test.plot([0, 1], [0, 1])\n ax_ref = fig_ref.subplots()\n ax_ref.plot([0, 1], [0, 1])\n ax_ref.minorticks_on()\n\n\n@image_comparison([\"autoscale_tiny_range\"], remove_text=True)\ndef test_autoscale_tiny_range():\n # github pull #904\n fig, axs = plt.subplots(2, 2)\n for i, ax in enumerate(axs.flat):\n y1 = 10**(-11 - i)\n ax.plot([0, 1], [1, 1 + y1])\n\n\[email protected]('default')\ndef test_autoscale_tight():\n fig, ax = plt.subplots(1, 1)\n ax.plot([1, 2, 3, 4])\n ax.autoscale(enable=True, axis='x', tight=False)\n ax.autoscale(enable=True, axis='y', tight=True)\n assert_allclose(ax.get_xlim(), (-0.15, 3.15))\n assert_allclose(ax.get_ylim(), (1.0, 4.0))\n\n\[email protected]('default')\ndef test_autoscale_log_shared():\n # related to github #7587\n # array starts at zero to trigger _minpos handling\n x = np.arange(100, dtype=float)\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n ax1.loglog(x, x)\n ax2.semilogx(x, x)\n ax1.autoscale(tight=True)\n ax2.autoscale(tight=True)\n plt.draw()\n lims = (x[1], x[-1])\n assert_allclose(ax1.get_xlim(), lims)\n assert_allclose(ax1.get_ylim(), lims)\n assert_allclose(ax2.get_xlim(), lims)\n assert_allclose(ax2.get_ylim(), (x[0], x[-1]))\n\n\[email protected]('default')\ndef test_use_sticky_edges():\n fig, ax = plt.subplots()\n ax.imshow([[0, 1], [2, 3]], origin='lower')\n assert_allclose(ax.get_xlim(), (-0.5, 1.5))\n assert_allclose(ax.get_ylim(), (-0.5, 1.5))\n ax.use_sticky_edges = False\n ax.autoscale()\n xlim = (-0.5 - 2 * ax._xmargin, 1.5 + 2 * ax._xmargin)\n ylim = (-0.5 - 2 * ax._ymargin, 1.5 + 2 * ax._ymargin)\n assert_allclose(ax.get_xlim(), xlim)\n assert_allclose(ax.get_ylim(), ylim)\n # Make sure it is reversible:\n ax.use_sticky_edges = True\n ax.autoscale()\n assert_allclose(ax.get_xlim(), (-0.5, 1.5))\n assert_allclose(ax.get_ylim(), (-0.5, 1.5))\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_sticky_shared_axes(fig_test, fig_ref):\n # Check that sticky edges work whether they are set in an axes that is a\n # \"master\" in a share, or an axes that is a \"follower\".\n Z = np.arange(15).reshape(3, 5)\n\n ax0 = fig_test.add_subplot(211)\n ax1 = fig_test.add_subplot(212, sharex=ax0)\n ax1.pcolormesh(Z)\n\n ax0 = fig_ref.add_subplot(212)\n ax1 = fig_ref.add_subplot(211, sharex=ax0)\n ax0.pcolormesh(Z)\n\n\n@image_comparison(['offset_points'], remove_text=True)\ndef test_basic_annotate():\n # Setup some data\n t = np.arange(0.0, 5.0, 0.01)\n s = np.cos(2.0*np.pi * t)\n\n # Offset Points\n\n fig = plt.figure()\n ax = fig.add_subplot(autoscale_on=False, xlim=(-1, 5), ylim=(-3, 5))\n line, = ax.plot(t, s, lw=3, color='purple')\n\n ax.annotate('local max', xy=(3, 1), xycoords='data',\n xytext=(3, 3), textcoords='offset points')\n\n\ndef test_annotate_parameter_warn():\n fig, ax = plt.subplots()\n with pytest.warns(MatplotlibDeprecationWarning,\n match=r\"The \\'s\\' parameter of annotate\\(\\) \"\n \"has been renamed \\'text\\'\"):\n ax.annotate(s='now named text', xy=(0, 1))\n\n\n@image_comparison(['arrow_simple.png'], remove_text=True)\ndef test_arrow_simple():\n # Simple image test for ax.arrow\n # kwargs that take discrete values\n length_includes_head = (True, False)\n shape = ('full', 'left', 'right')\n head_starts_at_zero = (True, False)\n # Create outer product of values\n kwargs = product(length_includes_head, shape, head_starts_at_zero)\n\n fig, axs = plt.subplots(3, 4)\n for i, (ax, kwarg) in enumerate(zip(axs.flat, kwargs)):\n ax.set_xlim(-2, 2)\n ax.set_ylim(-2, 2)\n # Unpack kwargs\n (length_includes_head, shape, head_starts_at_zero) = kwarg\n theta = 2 * np.pi * i / 12\n # Draw arrow\n ax.arrow(0, 0, np.sin(theta), np.cos(theta),\n width=theta/100,\n length_includes_head=length_includes_head,\n shape=shape,\n head_starts_at_zero=head_starts_at_zero,\n head_width=theta / 10,\n head_length=theta / 10)\n\n\ndef test_arrow_empty():\n _, ax = plt.subplots()\n # Create an empty FancyArrow\n ax.arrow(0, 0, 0, 0, head_length=0)\n\n\ndef test_arrow_in_view():\n _, ax = plt.subplots()\n ax.arrow(1, 1, 1, 1)\n assert ax.get_xlim() == (0.8, 2.2)\n assert ax.get_ylim() == (0.8, 2.2)\n\n\ndef test_annotate_default_arrow():\n # Check that we can make an annotation arrow with only default properties.\n fig, ax = plt.subplots()\n ann = ax.annotate(\"foo\", (0, 1), xytext=(2, 3))\n assert ann.arrow_patch is None\n ann = ax.annotate(\"foo\", (0, 1), xytext=(2, 3), arrowprops={})\n assert ann.arrow_patch is not None\n\n\n@image_comparison(['fill_units.png'], savefig_kwarg={'dpi': 60})\ndef test_fill_units():\n import matplotlib.testing.jpl_units as units\n units.register()\n\n # generate some data\n t = units.Epoch(\"ET\", dt=datetime.datetime(2009, 4, 27))\n value = 10.0 * units.deg\n day = units.Duration(\"ET\", 24.0 * 60.0 * 60.0)\n dt = np.arange('2009-04-27', '2009-04-29', dtype='datetime64[D]')\n dtn = mdates.date2num(dt)\n\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n ax1.plot([t], [value], yunits='deg', color='red')\n ind = [0, 0, 1, 1]\n ax1.fill(dtn[ind], [0.0, 0.0, 90.0, 0.0], 'b')\n\n ax2.plot([t], [value], yunits='deg', color='red')\n ax2.fill([t, t, t + day, t + day],\n [0.0, 0.0, 90.0, 0.0], 'b')\n\n ax3.plot([t], [value], yunits='deg', color='red')\n ax3.fill(dtn[ind],\n [0 * units.deg, 0 * units.deg, 90 * units.deg, 0 * units.deg],\n 'b')\n\n ax4.plot([t], [value], yunits='deg', color='red')\n ax4.fill([t, t, t + day, t + day],\n [0 * units.deg, 0 * units.deg, 90 * units.deg, 0 * units.deg],\n facecolor=\"blue\")\n fig.autofmt_xdate()\n\n\ndef test_plot_format_kwarg_redundant():\n with pytest.warns(UserWarning, match=\"marker .* redundantly defined\"):\n plt.plot([0], [0], 'o', marker='x')\n with pytest.warns(UserWarning, match=\"linestyle .* redundantly defined\"):\n plt.plot([0], [0], '-', linestyle='--')\n with pytest.warns(UserWarning, match=\"color .* redundantly defined\"):\n plt.plot([0], [0], 'r', color='blue')\n # smoke-test: should not warn\n plt.errorbar([0], [0], fmt='none', color='blue')\n\n\n@image_comparison(['single_point', 'single_point'])\ndef test_single_point():\n # Issue #1796: don't let lines.marker affect the grid\n matplotlib.rcParams['lines.marker'] = 'o'\n matplotlib.rcParams['axes.grid'] = True\n\n fig, (ax1, ax2) = plt.subplots(2)\n ax1.plot([0], [0], 'o')\n ax2.plot([1], [1], 'o')\n\n # Reuse testcase from above for a labeled data test\n data = {'a': [0], 'b': [1]}\n\n fig, (ax1, ax2) = plt.subplots(2)\n ax1.plot('a', 'a', 'o', data=data)\n ax2.plot('b', 'b', 'o', data=data)\n\n\n@image_comparison(['single_date.png'], style='mpl20')\ndef test_single_date():\n\n # use former defaults to match existing baseline image\n plt.rcParams['axes.formatter.limits'] = -7, 7\n dt = mdates.date2num(np.datetime64('0000-12-31'))\n\n time1 = [721964.0]\n data1 = [-65.54]\n\n fig, ax = plt.subplots(2, 1)\n ax[0].plot_date(time1 + dt, data1, 'o', color='r')\n ax[1].plot(time1, data1, 'o', color='r')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_shaped_data(fig_test, fig_ref):\n row = np.arange(10).reshape((1, -1))\n col = np.arange(0, 100, 10).reshape((-1, 1))\n\n axs = fig_test.subplots(2)\n axs[0].plot(row) # Actually plots nothing (columns are single points).\n axs[1].plot(col) # Same as plotting 1d.\n\n axs = fig_ref.subplots(2)\n # xlim from the implicit \"x=0\", ylim from the row datalim.\n axs[0].set(xlim=(-.06, .06), ylim=(0, 9))\n axs[1].plot(col.ravel())\n\n\ndef test_structured_data():\n # support for structured data\n pts = np.array([(1, 1), (2, 2)], dtype=[(\"ones\", float), (\"twos\", float)])\n\n # this should not read second name as a format and raise ValueError\n axs = plt.figure().subplots(2)\n axs[0].plot(\"ones\", \"twos\", data=pts)\n axs[1].plot(\"ones\", \"twos\", \"r\", data=pts)\n\n\n@image_comparison(['aitoff_proj'], extensions=[\"png\"],\n remove_text=True, style='mpl20')\ndef test_aitoff_proj():\n \"\"\"\n Test aitoff projection ref.:\n https://github.com/matplotlib/matplotlib/pull/14451\n \"\"\"\n x = np.linspace(-np.pi, np.pi, 20)\n y = np.linspace(-np.pi / 2, np.pi / 2, 20)\n X, Y = np.meshgrid(x, y)\n\n fig, ax = plt.subplots(figsize=(8, 4.2),\n subplot_kw=dict(projection=\"aitoff\"))\n ax.grid()\n ax.plot(X.flat, Y.flat, 'o', markersize=4)\n\n\n@image_comparison(['axvspan_epoch'])\ndef test_axvspan_epoch():\n import matplotlib.testing.jpl_units as units\n units.register()\n\n # generate some data\n t0 = units.Epoch(\"ET\", dt=datetime.datetime(2009, 1, 20))\n tf = units.Epoch(\"ET\", dt=datetime.datetime(2009, 1, 21))\n dt = units.Duration(\"ET\", units.day.convert(\"sec\"))\n\n ax = plt.gca()\n ax.axvspan(t0, tf, facecolor=\"blue\", alpha=0.25)\n ax.set_xlim(t0 - 5.0*dt, tf + 5.0*dt)\n\n\n@image_comparison(['axhspan_epoch'], tol=0.02)\ndef test_axhspan_epoch():\n import matplotlib.testing.jpl_units as units\n units.register()\n\n # generate some data\n t0 = units.Epoch(\"ET\", dt=datetime.datetime(2009, 1, 20))\n tf = units.Epoch(\"ET\", dt=datetime.datetime(2009, 1, 21))\n dt = units.Duration(\"ET\", units.day.convert(\"sec\"))\n\n ax = plt.gca()\n ax.axhspan(t0, tf, facecolor=\"blue\", alpha=0.25)\n ax.set_ylim(t0 - 5.0*dt, tf + 5.0*dt)\n\n\n@image_comparison(['hexbin_extent.png', 'hexbin_extent.png'], remove_text=True)\ndef test_hexbin_extent():\n # this test exposes sf bug 2856228\n fig, ax = plt.subplots()\n data = (np.arange(2000) / 2000).reshape((2, 1000))\n x, y = data\n\n ax.hexbin(x, y, extent=[.1, .3, .6, .7])\n\n # Reuse testcase from above for a labeled data test\n data = {\"x\": x, \"y\": y}\n\n fig, ax = plt.subplots()\n ax.hexbin(\"x\", \"y\", extent=[.1, .3, .6, .7], data=data)\n\n\n@image_comparison(['hexbin_empty.png'], remove_text=True)\ndef test_hexbin_empty():\n # From #3886: creating hexbin from empty dataset raises ValueError\n ax = plt.gca()\n ax.hexbin([], [])\n\n\ndef test_hexbin_pickable():\n # From #1973: Test that picking a hexbin collection works\n fig, ax = plt.subplots()\n data = (np.arange(200) / 200).reshape((2, 100))\n x, y = data\n hb = ax.hexbin(x, y, extent=[.1, .3, .6, .7], picker=-1)\n mouse_event = SimpleNamespace(x=400, y=300)\n assert hb.contains(mouse_event)[0]\n\n\n@image_comparison(['hexbin_log.png'], style='mpl20')\ndef test_hexbin_log():\n # Issue #1636 (and also test log scaled colorbar)\n\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n np.random.seed(19680801)\n n = 100000\n x = np.random.standard_normal(n)\n y = 2.0 + 3.0 * x + 4.0 * np.random.standard_normal(n)\n y = np.power(2, y * 0.5)\n\n fig, ax = plt.subplots()\n h = ax.hexbin(x, y, yscale='log', bins='log')\n plt.colorbar(h)\n\n\ndef test_inverted_limits():\n # Test gh:1553\n # Calling invert_xaxis prior to plotting should not disable autoscaling\n # while still maintaining the inverted direction\n fig, ax = plt.subplots()\n ax.invert_xaxis()\n ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])\n\n assert ax.get_xlim() == (4, -5)\n assert ax.get_ylim() == (-3, 5)\n plt.close()\n\n fig, ax = plt.subplots()\n ax.invert_yaxis()\n ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])\n\n assert ax.get_xlim() == (-5, 4)\n assert ax.get_ylim() == (5, -3)\n\n # Test inverting nonlinear axes.\n fig, ax = plt.subplots()\n ax.set_yscale(\"log\")\n ax.set_ylim(10, 1)\n assert ax.get_ylim() == (10, 1)\n\n\n@image_comparison(['nonfinite_limits'])\ndef test_nonfinite_limits():\n x = np.arange(0., np.e, 0.01)\n # silence divide by zero warning from log(0)\n with np.errstate(divide='ignore'):\n y = np.log(x)\n x[len(x)//2] = np.nan\n fig, ax = plt.subplots()\n ax.plot(x, y)\n\n\[email protected]('default')\[email protected]('plot_fun',\n ['scatter', 'plot', 'fill_between'])\n@check_figures_equal(extensions=[\"png\"])\ndef test_limits_empty_data(plot_fun, fig_test, fig_ref):\n # Check that plotting empty data doesn't change autoscaling of dates\n x = np.arange(\"2010-01-01\", \"2011-01-01\", dtype=\"datetime64[D]\")\n\n ax_test = fig_test.subplots()\n ax_ref = fig_ref.subplots()\n\n getattr(ax_test, plot_fun)([], [])\n\n for ax in [ax_test, ax_ref]:\n getattr(ax, plot_fun)(x, range(len(x)), color='C0')\n\n\n@image_comparison(['imshow', 'imshow'], remove_text=True, style='mpl20')\ndef test_imshow():\n # use former defaults to match existing baseline image\n matplotlib.rcParams['image.interpolation'] = 'nearest'\n # Create a NxN image\n N = 100\n (x, y) = np.indices((N, N))\n x -= N//2\n y -= N//2\n r = np.sqrt(x**2+y**2-x*y)\n\n # Create a contour plot at N/4 and extract both the clip path and transform\n fig, ax = plt.subplots()\n ax.imshow(r)\n\n # Reuse testcase from above for a labeled data test\n data = {\"r\": r}\n fig, ax = plt.subplots()\n ax.imshow(\"r\", data=data)\n\n\n@image_comparison(['imshow_clip'], style='mpl20')\ndef test_imshow_clip():\n # As originally reported by Gellule Xg <[email protected]>\n # use former defaults to match existing baseline image\n matplotlib.rcParams['image.interpolation'] = 'nearest'\n\n # Create a NxN image\n N = 100\n (x, y) = np.indices((N, N))\n x -= N//2\n y -= N//2\n r = np.sqrt(x**2+y**2-x*y)\n\n # Create a contour plot at N/4 and extract both the clip path and transform\n fig, ax = plt.subplots()\n\n c = ax.contour(r, [N/4])\n x = c.collections[0]\n clip_path = x.get_paths()[0]\n clip_transform = x.get_transform()\n\n clip_path = mtransforms.TransformedPath(clip_path, clip_transform)\n\n # Plot the image clipped by the contour\n ax.imshow(r, clip_path=clip_path)\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_imshow_norm_vminvmax(fig_test, fig_ref):\n \"\"\"Parameters vmin, vmax should be ignored if norm is given.\"\"\"\n a = [[1, 2], [3, 4]]\n ax = fig_ref.subplots()\n ax.imshow(a, vmin=0, vmax=5)\n ax = fig_test.subplots()\n with pytest.warns(MatplotlibDeprecationWarning,\n match=\"Passing parameters norm and vmin/vmax \"\n \"simultaneously is deprecated.\"):\n ax.imshow(a, norm=mcolors.Normalize(-10, 10), vmin=0, vmax=5)\n\n\n@image_comparison(['polycollection_joinstyle'], remove_text=True)\ndef test_polycollection_joinstyle():\n # Bug #2890979 reported by Matthew West\n fig, ax = plt.subplots()\n verts = np.array([[1, 1], [1, 2], [2, 2], [2, 1]])\n c = mpl.collections.PolyCollection([verts], linewidths=40)\n ax.add_collection(c)\n ax.set_xbound(0, 3)\n ax.set_ybound(0, 3)\n\n\[email protected](\n 'x, y1, y2', [\n (np.zeros((2, 2)), 3, 3),\n (np.arange(0.0, 2, 0.02), np.zeros((2, 2)), 3),\n (np.arange(0.0, 2, 0.02), 3, np.zeros((2, 2)))\n ], ids=[\n '2d_x_input',\n '2d_y1_input',\n '2d_y2_input'\n ]\n)\ndef test_fill_between_input(x, y1, y2):\n fig, ax = plt.subplots()\n with pytest.raises(ValueError):\n ax.fill_between(x, y1, y2)\n\n\[email protected](\n 'y, x1, x2', [\n (np.zeros((2, 2)), 3, 3),\n (np.arange(0.0, 2, 0.02), np.zeros((2, 2)), 3),\n (np.arange(0.0, 2, 0.02), 3, np.zeros((2, 2)))\n ], ids=[\n '2d_y_input',\n '2d_x1_input',\n '2d_x2_input'\n ]\n)\ndef test_fill_betweenx_input(y, x1, x2):\n fig, ax = plt.subplots()\n with pytest.raises(ValueError):\n ax.fill_betweenx(y, x1, x2)\n\n\n@image_comparison(['fill_between_interpolate'], remove_text=True)\ndef test_fill_between_interpolate():\n x = np.arange(0.0, 2, 0.02)\n y1 = np.sin(2*np.pi*x)\n y2 = 1.2*np.sin(4*np.pi*x)\n\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n ax1.plot(x, y1, x, y2, color='black')\n ax1.fill_between(x, y1, y2, where=y2 >= y1, facecolor='white', hatch='/',\n interpolate=True)\n ax1.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red',\n interpolate=True)\n\n # Test support for masked arrays.\n y2 = np.ma.masked_greater(y2, 1.0)\n # Test that plotting works for masked arrays with the first element masked\n y2[0] = np.ma.masked\n ax2.plot(x, y1, x, y2, color='black')\n ax2.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green',\n interpolate=True)\n ax2.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red',\n interpolate=True)\n\n\n@image_comparison(['fill_between_interpolate_decreasing'],\n style='mpl20', remove_text=True)\ndef test_fill_between_interpolate_decreasing():\n p = np.array([724.3, 700, 655])\n t = np.array([9.4, 7, 2.2])\n prof = np.array([7.9, 6.6, 3.8])\n\n fig, ax = plt.subplots(figsize=(9, 9))\n\n ax.plot(t, p, 'tab:red')\n ax.plot(prof, p, 'k')\n\n ax.fill_betweenx(p, t, prof, where=prof < t,\n facecolor='blue', interpolate=True, alpha=0.4)\n ax.fill_betweenx(p, t, prof, where=prof > t,\n facecolor='red', interpolate=True, alpha=0.4)\n\n ax.set_xlim(0, 30)\n ax.set_ylim(800, 600)\n\n\n# test_symlog and test_symlog2 used to have baseline images in all three\n# formats, but the png and svg baselines got invalidated by the removal of\n# minor tick overstriking.\n@image_comparison(['symlog.pdf'])\ndef test_symlog():\n x = np.array([0, 1, 2, 4, 6, 9, 12, 24])\n y = np.array([1000000, 500000, 100000, 100, 5, 0, 0, 0])\n\n fig, ax = plt.subplots()\n ax.plot(x, y)\n ax.set_yscale('symlog')\n ax.set_xscale('linear')\n ax.set_ylim(-1, 10000000)\n\n\n@image_comparison(['symlog2.pdf'], remove_text=True)\ndef test_symlog2():\n # Numbers from -50 to 50, with 0.1 as step\n x = np.arange(-50, 50, 0.001)\n\n fig, axs = plt.subplots(5, 1)\n for ax, linthresh in zip(axs, [20., 2., 1., 0.1, 0.01]):\n ax.plot(x, x)\n ax.set_xscale('symlog', linthresh=linthresh)\n ax.grid(True)\n axs[-1].set_ylim(-0.1, 0.1)\n\n\ndef test_pcolorargs_5205():\n # Smoketest to catch issue found in gh:5205\n x = [-1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5]\n y = [-1.5, -1.25, -1.0, -0.75, -0.5, -0.25, 0,\n 0.25, 0.5, 0.75, 1.0, 1.25, 1.5]\n X, Y = np.meshgrid(x, y)\n Z = np.hypot(X, Y)\n\n plt.pcolor(Z)\n plt.pcolor(list(Z))\n plt.pcolor(x, y, Z[:-1, :-1])\n plt.pcolor(X, Y, list(Z[:-1, :-1]))\n\n\n@image_comparison(['pcolormesh'], remove_text=True)\ndef test_pcolormesh():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n n = 12\n x = np.linspace(-1.5, 1.5, n)\n y = np.linspace(-1.5, 1.5, n*2)\n X, Y = np.meshgrid(x, y)\n Qx = np.cos(Y) - np.cos(X)\n Qz = np.sin(Y) + np.sin(X)\n Qx = (Qx + 1.1)\n Z = np.hypot(X, Y) / 5\n Z = (Z - Z.min()) / Z.ptp()\n\n # The color array can include masked values:\n Zm = ma.masked_where(np.abs(Qz) < 0.5 * np.max(Qz), Z)\n\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.pcolormesh(Qx, Qz, Z[:-1, :-1], lw=0.5, edgecolors='k')\n ax2.pcolormesh(Qx, Qz, Z[:-1, :-1], lw=2, edgecolors=['b', 'w'])\n ax3.pcolormesh(Qx, Qz, Z, shading=\"gouraud\")\n\n\n@image_comparison(['pcolormesh_alpha'], extensions=[\"png\", \"pdf\"],\n remove_text=True)\ndef test_pcolormesh_alpha():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n n = 12\n X, Y = np.meshgrid(\n np.linspace(-1.5, 1.5, n),\n np.linspace(-1.5, 1.5, n*2)\n )\n Qx = X\n Qy = Y + np.sin(X)\n Z = np.hypot(X, Y) / 5\n Z = (Z - Z.min()) / Z.ptp()\n vir = plt.get_cmap(\"viridis\", 16)\n # make another colormap with varying alpha\n colors = vir(np.arange(16))\n colors[:, 3] = 0.5 + 0.5*np.sin(np.arange(16))\n cmap = mcolors.ListedColormap(colors)\n\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n for ax in ax1, ax2, ax3, ax4:\n ax.add_patch(mpatches.Rectangle(\n (0, -1.5), 1.5, 3, facecolor=[.7, .1, .1, .5], zorder=0\n ))\n # ax1, ax2: constant alpha\n ax1.pcolormesh(Qx, Qy, Z[:-1, :-1], cmap=vir, alpha=0.4,\n shading='flat', zorder=1)\n ax2.pcolormesh(Qx, Qy, Z, cmap=vir, alpha=0.4, shading='gouraud', zorder=1)\n # ax3, ax4: alpha from colormap\n ax3.pcolormesh(Qx, Qy, Z[:-1, :-1], cmap=cmap, shading='flat', zorder=1)\n ax4.pcolormesh(Qx, Qy, Z, cmap=cmap, shading='gouraud', zorder=1)\n\n\n@image_comparison(['pcolormesh_datetime_axis.png'],\n remove_text=False, style='mpl20')\ndef test_pcolormesh_datetime_axis():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)\n base = datetime.datetime(2013, 1, 1)\n x = np.array([base + datetime.timedelta(days=d) for d in range(21)])\n y = np.arange(21)\n z1, z2 = np.meshgrid(np.arange(20), np.arange(20))\n z = z1 * z2\n plt.subplot(221)\n plt.pcolormesh(x[:-1], y[:-1], z[:-1, :-1])\n plt.subplot(222)\n plt.pcolormesh(x, y, z)\n x = np.repeat(x[np.newaxis], 21, axis=0)\n y = np.repeat(y[:, np.newaxis], 21, axis=1)\n plt.subplot(223)\n plt.pcolormesh(x[:-1, :-1], y[:-1, :-1], z[:-1, :-1])\n plt.subplot(224)\n plt.pcolormesh(x, y, z)\n for ax in fig.get_axes():\n for label in ax.get_xticklabels():\n label.set_ha('right')\n label.set_rotation(30)\n\n\n@image_comparison(['pcolor_datetime_axis.png'],\n remove_text=False, style='mpl20')\ndef test_pcolor_datetime_axis():\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)\n base = datetime.datetime(2013, 1, 1)\n x = np.array([base + datetime.timedelta(days=d) for d in range(21)])\n y = np.arange(21)\n z1, z2 = np.meshgrid(np.arange(20), np.arange(20))\n z = z1 * z2\n plt.subplot(221)\n plt.pcolor(x[:-1], y[:-1], z[:-1, :-1])\n plt.subplot(222)\n plt.pcolor(x, y, z)\n x = np.repeat(x[np.newaxis], 21, axis=0)\n y = np.repeat(y[:, np.newaxis], 21, axis=1)\n plt.subplot(223)\n plt.pcolor(x[:-1, :-1], y[:-1, :-1], z[:-1, :-1])\n plt.subplot(224)\n plt.pcolor(x, y, z)\n for ax in fig.get_axes():\n for label in ax.get_xticklabels():\n label.set_ha('right')\n label.set_rotation(30)\n\n\ndef test_pcolorargs():\n n = 12\n x = np.linspace(-1.5, 1.5, n)\n y = np.linspace(-1.5, 1.5, n*2)\n X, Y = np.meshgrid(x, y)\n Z = np.hypot(X, Y) / 5\n\n _, ax = plt.subplots()\n with pytest.raises(TypeError):\n ax.pcolormesh(y, x, Z)\n with pytest.raises(TypeError):\n ax.pcolormesh(X, Y, Z.T)\n with pytest.raises(TypeError):\n ax.pcolormesh(x, y, Z[:-1, :-1], shading=\"gouraud\")\n with pytest.raises(TypeError):\n ax.pcolormesh(X, Y, Z[:-1, :-1], shading=\"gouraud\")\n x[0] = np.NaN\n with pytest.raises(ValueError):\n ax.pcolormesh(x, y, Z[:-1, :-1])\n with np.errstate(invalid='ignore'):\n x = np.ma.array(x, mask=(x < 0))\n with pytest.raises(ValueError):\n ax.pcolormesh(x, y, Z[:-1, :-1])\n # Expect a warning with non-increasing coordinates\n x = [359, 0, 1]\n y = [-10, 10]\n X, Y = np.meshgrid(x, y)\n Z = np.zeros(X.shape)\n with pytest.warns(UserWarning,\n match='are not monotonically increasing or decreasing'):\n ax.pcolormesh(X, Y, Z, shading='auto')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_pcolornearest(fig_test, fig_ref):\n ax = fig_test.subplots()\n x = np.arange(0, 10)\n y = np.arange(0, 3)\n np.random.seed(19680801)\n Z = np.random.randn(2, 9)\n ax.pcolormesh(x, y, Z, shading='flat')\n\n ax = fig_ref.subplots()\n # specify the centers\n x2 = x[:-1] + np.diff(x) / 2\n y2 = y[:-1] + np.diff(y) / 2\n ax.pcolormesh(x2, y2, Z, shading='nearest')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_pcolornearestunits(fig_test, fig_ref):\n ax = fig_test.subplots()\n x = [datetime.datetime.fromtimestamp(x * 3600) for x in range(10)]\n y = np.arange(0, 3)\n np.random.seed(19680801)\n Z = np.random.randn(2, 9)\n ax.pcolormesh(x, y, Z, shading='flat')\n\n ax = fig_ref.subplots()\n # specify the centers\n x2 = [datetime.datetime.fromtimestamp((x + 0.5) * 3600) for x in range(9)]\n y2 = y[:-1] + np.diff(y) / 2\n ax.pcolormesh(x2, y2, Z, shading='nearest')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_pcolordropdata(fig_test, fig_ref):\n ax = fig_test.subplots()\n x = np.arange(0, 10)\n y = np.arange(0, 4)\n np.random.seed(19680801)\n Z = np.random.randn(3, 9)\n # fake dropping the data\n ax.pcolormesh(x[:-1], y[:-1], Z[:-1, :-1], shading='flat')\n\n ax = fig_ref.subplots()\n # test dropping the data...\n x2 = x[:-1]\n y2 = y[:-1]\n with pytest.warns(MatplotlibDeprecationWarning):\n ax.pcolormesh(x2, y2, Z, shading='flat')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_pcolorauto(fig_test, fig_ref):\n ax = fig_test.subplots()\n x = np.arange(0, 10)\n y = np.arange(0, 4)\n np.random.seed(19680801)\n Z = np.random.randn(3, 9)\n ax.pcolormesh(x, y, Z, shading='auto')\n\n ax = fig_ref.subplots()\n # specify the centers\n x2 = x[:-1] + np.diff(x) / 2\n y2 = y[:-1] + np.diff(y) / 2\n ax.pcolormesh(x2, y2, Z, shading='auto')\n\n\n@image_comparison(['canonical'])\ndef test_canonical():\n fig, ax = plt.subplots()\n ax.plot([1, 2, 3])\n\n\n@image_comparison(['arc_angles.png'], remove_text=True, style='default')\ndef test_arc_angles():\n # Ellipse parameters\n w = 2\n h = 1\n centre = (0.2, 0.5)\n scale = 2\n\n fig, axs = plt.subplots(3, 3)\n for i, ax in enumerate(axs.flat):\n theta2 = i * 360 / 9\n theta1 = theta2 - 45\n\n ax.add_patch(mpatches.Ellipse(centre, w, h, alpha=0.3))\n ax.add_patch(mpatches.Arc(centre, w, h, theta1=theta1, theta2=theta2))\n # Straight lines intersecting start and end of arc\n ax.plot([scale * np.cos(np.deg2rad(theta1)) + centre[0],\n centre[0],\n scale * np.cos(np.deg2rad(theta2)) + centre[0]],\n [scale * np.sin(np.deg2rad(theta1)) + centre[1],\n centre[1],\n scale * np.sin(np.deg2rad(theta2)) + centre[1]])\n\n ax.set_xlim(-scale, scale)\n ax.set_ylim(-scale, scale)\n\n # This looks the same, but it triggers a different code path when it\n # gets large enough.\n w *= 10\n h *= 10\n centre = (centre[0] * 10, centre[1] * 10)\n scale *= 10\n\n\n@image_comparison(['arc_ellipse'], remove_text=True)\ndef test_arc_ellipse():\n xcenter, ycenter = 0.38, 0.52\n width, height = 1e-1, 3e-1\n angle = -30\n\n theta = np.deg2rad(np.arange(360))\n x = width / 2. * np.cos(theta)\n y = height / 2. * np.sin(theta)\n\n rtheta = np.deg2rad(angle)\n R = np.array([\n [np.cos(rtheta), -np.sin(rtheta)],\n [np.sin(rtheta), np.cos(rtheta)]])\n\n x, y = np.dot(R, np.array([x, y]))\n x += xcenter\n y += ycenter\n\n fig = plt.figure()\n ax = fig.add_subplot(211, aspect='auto')\n ax.fill(x, y, alpha=0.2, facecolor='yellow', edgecolor='yellow',\n linewidth=1, zorder=1)\n\n e1 = mpatches.Arc((xcenter, ycenter), width, height,\n angle=angle, linewidth=2, fill=False, zorder=2)\n\n ax.add_patch(e1)\n\n ax = fig.add_subplot(212, aspect='equal')\n ax.fill(x, y, alpha=0.2, facecolor='green', edgecolor='green', zorder=1)\n e2 = mpatches.Arc((xcenter, ycenter), width, height,\n angle=angle, linewidth=2, fill=False, zorder=2)\n\n ax.add_patch(e2)\n\n\ndef test_marker_as_markerstyle():\n fix, ax = plt.subplots()\n m = mmarkers.MarkerStyle('o')\n ax.plot([1, 2, 3], [3, 2, 1], marker=m)\n ax.scatter([1, 2, 3], [4, 3, 2], marker=m)\n ax.errorbar([1, 2, 3], [5, 4, 3], marker=m)\n\n\n@image_comparison(['markevery'], remove_text=True)\ndef test_markevery():\n x = np.linspace(0, 10, 100)\n y = np.sin(x) * np.sqrt(x/10 + 0.5)\n\n # check marker only plot\n fig, ax = plt.subplots()\n ax.plot(x, y, 'o', label='default')\n ax.plot(x, y, 'd', markevery=None, label='mark all')\n ax.plot(x, y, 's', markevery=10, label='mark every 10')\n ax.plot(x, y, '+', markevery=(5, 20), label='mark every 5 starting at 10')\n ax.legend()\n\n\n@image_comparison(['markevery_line'], remove_text=True)\ndef test_markevery_line():\n x = np.linspace(0, 10, 100)\n y = np.sin(x) * np.sqrt(x/10 + 0.5)\n\n # check line/marker combos\n fig, ax = plt.subplots()\n ax.plot(x, y, '-o', label='default')\n ax.plot(x, y, '-d', markevery=None, label='mark all')\n ax.plot(x, y, '-s', markevery=10, label='mark every 10')\n ax.plot(x, y, '-+', markevery=(5, 20), label='mark every 5 starting at 10')\n ax.legend()\n\n\n@image_comparison(['markevery_linear_scales'], remove_text=True, tol=0.001)\ndef test_markevery_linear_scales():\n cases = [None,\n 8,\n (30, 8),\n [16, 24, 30], [0, -1],\n slice(100, 200, 3),\n 0.1, 0.3, 1.5,\n (0.0, 0.1), (0.45, 0.1)]\n\n cols = 3\n gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)\n\n delta = 0.11\n x = np.linspace(0, 10 - 2 * delta, 200) + delta\n y = np.sin(x) + 1.0 + delta\n\n for i, case in enumerate(cases):\n row = (i // cols)\n col = i % cols\n plt.subplot(gs[row, col])\n plt.title('markevery=%s' % str(case))\n plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)\n\n\n@image_comparison(['markevery_linear_scales_zoomed'], remove_text=True)\ndef test_markevery_linear_scales_zoomed():\n cases = [None,\n 8,\n (30, 8),\n [16, 24, 30], [0, -1],\n slice(100, 200, 3),\n 0.1, 0.3, 1.5,\n (0.0, 0.1), (0.45, 0.1)]\n\n cols = 3\n gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)\n\n delta = 0.11\n x = np.linspace(0, 10 - 2 * delta, 200) + delta\n y = np.sin(x) + 1.0 + delta\n\n for i, case in enumerate(cases):\n row = (i // cols)\n col = i % cols\n plt.subplot(gs[row, col])\n plt.title('markevery=%s' % str(case))\n plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)\n plt.xlim((6, 6.7))\n plt.ylim((1.1, 1.7))\n\n\n@image_comparison(['markevery_log_scales'], remove_text=True)\ndef test_markevery_log_scales():\n cases = [None,\n 8,\n (30, 8),\n [16, 24, 30], [0, -1],\n slice(100, 200, 3),\n 0.1, 0.3, 1.5,\n (0.0, 0.1), (0.45, 0.1)]\n\n cols = 3\n gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)\n\n delta = 0.11\n x = np.linspace(0, 10 - 2 * delta, 200) + delta\n y = np.sin(x) + 1.0 + delta\n\n for i, case in enumerate(cases):\n row = (i // cols)\n col = i % cols\n plt.subplot(gs[row, col])\n plt.title('markevery=%s' % str(case))\n plt.xscale('log')\n plt.yscale('log')\n plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)\n\n\n@image_comparison(['markevery_polar'], style='default', remove_text=True)\ndef test_markevery_polar():\n cases = [None,\n 8,\n (30, 8),\n [16, 24, 30], [0, -1],\n slice(100, 200, 3),\n 0.1, 0.3, 1.5,\n (0.0, 0.1), (0.45, 0.1)]\n\n cols = 3\n gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)\n\n r = np.linspace(0, 3.0, 200)\n theta = 2 * np.pi * r\n\n for i, case in enumerate(cases):\n row = (i // cols)\n col = i % cols\n plt.subplot(gs[row, col], polar=True)\n plt.title('markevery=%s' % str(case))\n plt.plot(theta, r, 'o', ls='-', ms=4, markevery=case)\n\n\n@image_comparison(['marker_edges'], remove_text=True)\ndef test_marker_edges():\n x = np.linspace(0, 1, 10)\n fig, ax = plt.subplots()\n ax.plot(x, np.sin(x), 'y.', ms=30.0, mew=0, mec='r')\n ax.plot(x+0.1, np.sin(x), 'y.', ms=30.0, mew=1, mec='r')\n ax.plot(x+0.2, np.sin(x), 'y.', ms=30.0, mew=2, mec='b')\n\n\n@image_comparison(['bar_tick_label_single.png', 'bar_tick_label_single.png'])\ndef test_bar_tick_label_single():\n # From 2516: plot bar with array of string labels for x axis\n ax = plt.gca()\n ax.bar(0, 1, align='edge', tick_label='0')\n\n # Reuse testcase from above for a labeled data test\n data = {\"a\": 0, \"b\": 1}\n fig, ax = plt.subplots()\n ax = plt.gca()\n ax.bar(\"a\", \"b\", align='edge', tick_label='0', data=data)\n\n\ndef test_nan_bar_values():\n fig, ax = plt.subplots()\n ax.bar([0, 1], [np.nan, 4])\n\n\ndef test_bar_ticklabel_fail():\n fig, ax = plt.subplots()\n ax.bar([], [])\n\n\n@image_comparison(['bar_tick_label_multiple.png'])\ndef test_bar_tick_label_multiple():\n # From 2516: plot bar with array of string labels for x axis\n ax = plt.gca()\n ax.bar([1, 2.5], [1, 2], width=[0.2, 0.5], tick_label=['a', 'b'],\n align='center')\n\n\n@image_comparison(['bar_tick_label_multiple_old_label_alignment.png'])\ndef test_bar_tick_label_multiple_old_alignment():\n # Test that the alignment for class is backward compatible\n matplotlib.rcParams[\"ytick.alignment\"] = \"center\"\n ax = plt.gca()\n ax.bar([1, 2.5], [1, 2], width=[0.2, 0.5], tick_label=['a', 'b'],\n align='center')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_bar_decimal_center(fig_test, fig_ref):\n ax = fig_test.subplots()\n x0 = [1.5, 8.4, 5.3, 4.2]\n y0 = [1.1, 2.2, 3.3, 4.4]\n x = [Decimal(x) for x in x0]\n y = [Decimal(y) for y in y0]\n # Test image - vertical, align-center bar chart with Decimal() input\n ax.bar(x, y, align='center')\n # Reference image\n ax = fig_ref.subplots()\n ax.bar(x0, y0, align='center')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_barh_decimal_center(fig_test, fig_ref):\n ax = fig_test.subplots()\n x0 = [1.5, 8.4, 5.3, 4.2]\n y0 = [1.1, 2.2, 3.3, 4.4]\n x = [Decimal(x) for x in x0]\n y = [Decimal(y) for y in y0]\n # Test image - horizontal, align-center bar chart with Decimal() input\n ax.barh(x, y, height=[0.5, 0.5, 1, 1], align='center')\n # Reference image\n ax = fig_ref.subplots()\n ax.barh(x0, y0, height=[0.5, 0.5, 1, 1], align='center')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_bar_decimal_width(fig_test, fig_ref):\n x = [1.5, 8.4, 5.3, 4.2]\n y = [1.1, 2.2, 3.3, 4.4]\n w0 = [0.7, 1.45, 1, 2]\n w = [Decimal(i) for i in w0]\n # Test image - vertical bar chart with Decimal() width\n ax = fig_test.subplots()\n ax.bar(x, y, width=w, align='center')\n # Reference image\n ax = fig_ref.subplots()\n ax.bar(x, y, width=w0, align='center')\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_barh_decimal_height(fig_test, fig_ref):\n x = [1.5, 8.4, 5.3, 4.2]\n y = [1.1, 2.2, 3.3, 4.4]\n h0 = [0.7, 1.45, 1, 2]\n h = [Decimal(i) for i in h0]\n # Test image - horizontal bar chart with Decimal() height\n ax = fig_test.subplots()\n ax.barh(x, y, height=h, align='center')\n # Reference image\n ax = fig_ref.subplots()\n ax.barh(x, y, height=h0, align='center')\n\n\ndef test_bar_color_none_alpha():\n ax = plt.gca()\n rects = ax.bar([1, 2], [2, 4], alpha=0.3, color='none', edgecolor='r')\n for rect in rects:\n assert rect.get_facecolor() == (0, 0, 0, 0)\n assert rect.get_edgecolor() == (1, 0, 0, 0.3)\n\n\ndef test_bar_edgecolor_none_alpha():\n ax = plt.gca()\n rects = ax.bar([1, 2], [2, 4], alpha=0.3, color='r', edgecolor='none')\n for rect in rects:\n assert rect.get_facecolor() == (1, 0, 0, 0.3)\n assert rect.get_edgecolor() == (0, 0, 0, 0)\n\n\n@image_comparison(['barh_tick_label.png'])\ndef test_barh_tick_label():\n # From 2516: plot barh with array of string labels for y axis\n ax = plt.gca()\n ax.barh([1, 2.5], [1, 2], height=[0.2, 0.5], tick_label=['a', 'b'],\n align='center')\n\n\ndef test_bar_timedelta():\n \"\"\"Smoketest that bar can handle width and height in delta units.\"\"\"\n fig, ax = plt.subplots()\n ax.bar(datetime.datetime(2018, 1, 1), 1.,\n width=datetime.timedelta(hours=3))\n ax.bar(datetime.datetime(2018, 1, 1), 1.,\n xerr=datetime.timedelta(hours=2),\n width=datetime.timedelta(hours=3))\n fig, ax = plt.subplots()\n ax.barh(datetime.datetime(2018, 1, 1), 1,\n height=datetime.timedelta(hours=3))\n ax.barh(datetime.datetime(2018, 1, 1), 1,\n height=datetime.timedelta(hours=3),\n yerr=datetime.timedelta(hours=2))\n fig, ax = plt.subplots()\n ax.barh([datetime.datetime(2018, 1, 1), datetime.datetime(2018, 1, 1)],\n np.array([1, 1.5]),\n height=datetime.timedelta(hours=3))\n ax.barh([datetime.datetime(2018, 1, 1), datetime.datetime(2018, 1, 1)],\n np.array([1, 1.5]),\n height=[datetime.timedelta(hours=t) for t in [1, 2]])\n ax.broken_barh([(datetime.datetime(2018, 1, 1),\n datetime.timedelta(hours=1))],\n (10, 20))\n\n\ndef test_boxplot_dates_pandas(pd):\n # smoke test for boxplot and dates in pandas\n data = np.random.rand(5, 2)\n years = pd.date_range('1/1/2000',\n periods=2, freq=pd.DateOffset(years=1)).year\n plt.figure()\n plt.boxplot(data, positions=years)\n\n\ndef test_pcolor_regression(pd):\n from pandas.plotting import (\n register_matplotlib_converters,\n deregister_matplotlib_converters,\n )\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n times = [datetime.datetime(2021, 1, 1)]\n while len(times) < 7:\n times.append(times[-1] + datetime.timedelta(seconds=120))\n\n y_vals = np.arange(5)\n\n time_axis, y_axis = np.meshgrid(times, y_vals)\n shape = (len(y_vals) - 1, len(times) - 1)\n z_data = np.arange(shape[0] * shape[1])\n\n z_data.shape = shape\n try:\n register_matplotlib_converters()\n\n im = ax.pcolormesh(time_axis, y_axis, z_data)\n # make sure this does not raise!\n fig.canvas.draw()\n finally:\n deregister_matplotlib_converters()\n\n\ndef test_bar_pandas(pd):\n # Smoke test for pandas\n df = pd.DataFrame(\n {'year': [2018, 2018, 2018],\n 'month': [1, 1, 1],\n 'day': [1, 2, 3],\n 'value': [1, 2, 3]})\n df['date'] = pd.to_datetime(df[['year', 'month', 'day']])\n\n monthly = df[['date', 'value']].groupby(['date']).sum()\n dates = monthly.index\n forecast = monthly['value']\n baseline = monthly['value']\n\n fig, ax = plt.subplots()\n ax.bar(dates, forecast, width=10, align='center')\n ax.plot(dates, baseline, color='orange', lw=4)\n\n\ndef test_bar_pandas_indexed(pd):\n # Smoke test for indexed pandas\n df = pd.DataFrame({\"x\": [1., 2., 3.], \"width\": [.2, .4, .6]},\n index=[1, 2, 3])\n fig, ax = plt.subplots()\n ax.bar(df.x, 1., width=df.width)\n\n\n@check_figures_equal()\[email protected]('default')\ndef test_bar_hatches(fig_test, fig_ref):\n ax_test = fig_test.subplots()\n ax_ref = fig_ref.subplots()\n\n x = [1, 2]\n y = [2, 3]\n hatches = ['x', 'o']\n for i in range(2):\n ax_ref.bar(x[i], y[i], color='C0', hatch=hatches[i])\n\n ax_test.bar(x, y, hatch=hatches)\n\n\ndef test_pandas_minimal_plot(pd):\n # smoke test that series and index objcets do not warn\n x = pd.Series([1, 2], dtype=\"float64\")\n plt.plot(x, x)\n plt.plot(x.index, x)\n plt.plot(x)\n plt.plot(x.index)\n\n\n@image_comparison(['hist_log'], remove_text=True)\ndef test_hist_log():\n data0 = np.linspace(0, 1, 200)**3\n data = np.concatenate([1 - data0, 1 + data0])\n fig, ax = plt.subplots()\n ax.hist(data, fill=False, log=True)\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_hist_log_2(fig_test, fig_ref):\n axs_test = fig_test.subplots(2, 3)\n axs_ref = fig_ref.subplots(2, 3)\n for i, histtype in enumerate([\"bar\", \"step\", \"stepfilled\"]):\n # Set log scale, then call hist().\n axs_test[0, i].set_yscale(\"log\")\n axs_test[0, i].hist(1, 1, histtype=histtype)\n # Call hist(), then set log scale.\n axs_test[1, i].hist(1, 1, histtype=histtype)\n axs_test[1, i].set_yscale(\"log\")\n # Use hist(..., log=True).\n for ax in axs_ref[:, i]:\n ax.hist(1, 1, log=True, histtype=histtype)\n\n\ndef test_hist_log_barstacked():\n fig, axs = plt.subplots(2)\n axs[0].hist([[0], [0, 1]], 2, histtype=\"barstacked\")\n axs[0].set_yscale(\"log\")\n axs[1].hist([0, 0, 1], 2, histtype=\"barstacked\")\n axs[1].set_yscale(\"log\")\n fig.canvas.draw()\n assert axs[0].get_ylim() == axs[1].get_ylim()\n\n\n@image_comparison(['hist_bar_empty.png'], remove_text=True)\ndef test_hist_bar_empty():\n # From #3886: creating hist from empty dataset raises ValueError\n ax = plt.gca()\n ax.hist([], histtype='bar')\n\n\n@image_comparison(['hist_step_empty.png'], remove_text=True)\ndef test_hist_step_empty():\n # From #3886: creating hist from empty dataset raises ValueError\n ax = plt.gca()\n ax.hist([], histtype='step')\n\n\n@image_comparison(['hist_step_filled.png'], remove_text=True)\ndef test_hist_step_filled():\n np.random.seed(0)\n x = np.random.randn(1000, 3)\n n_bins = 10\n\n kwargs = [{'fill': True}, {'fill': False}, {'fill': None}, {}]*2\n types = ['step']*4+['stepfilled']*4\n fig, axs = plt.subplots(nrows=2, ncols=4)\n\n for kg, _type, ax in zip(kwargs, types, axs.flat):\n ax.hist(x, n_bins, histtype=_type, stacked=True, **kg)\n ax.set_title('%s/%s' % (kg, _type))\n ax.set_ylim(bottom=-50)\n\n patches = axs[0, 0].patches\n assert all(p.get_facecolor() == p.get_edgecolor() for p in patches)\n\n\n@image_comparison(['hist_density.png'])\ndef test_hist_density():\n np.random.seed(19680801)\n data = np.random.standard_normal(2000)\n fig, ax = plt.subplots()\n ax.hist(data, density=True)\n\n\ndef test_hist_unequal_bins_density():\n # Test correct behavior of normalized histogram with unequal bins\n # https://github.com/matplotlib/matplotlib/issues/9557\n rng = np.random.RandomState(57483)\n t = rng.randn(100)\n bins = [-3, -1, -0.5, 0, 1, 5]\n mpl_heights, _, _ = plt.hist(t, bins=bins, density=True)\n np_heights, _ = np.histogram(t, bins=bins, density=True)\n assert_allclose(mpl_heights, np_heights)\n\n\ndef test_hist_datetime_datasets():\n data = [[datetime.datetime(2017, 1, 1), datetime.datetime(2017, 1, 1)],\n [datetime.datetime(2017, 1, 1), datetime.datetime(2017, 1, 2)]]\n fig, ax = plt.subplots()\n ax.hist(data, stacked=True)\n ax.hist(data, stacked=False)\n\n\[email protected](\"bins_preprocess\",\n [mpl.dates.date2num,\n lambda bins: bins,\n lambda bins: np.asarray(bins).astype('datetime64')],\n ids=['date2num', 'datetime.datetime',\n 'np.datetime64'])\ndef test_hist_datetime_datasets_bins(bins_preprocess):\n data = [[datetime.datetime(2019, 1, 5), datetime.datetime(2019, 1, 11),\n datetime.datetime(2019, 2, 1), datetime.datetime(2019, 3, 1)],\n [datetime.datetime(2019, 1, 11), datetime.datetime(2019, 2, 5),\n datetime.datetime(2019, 2, 18), datetime.datetime(2019, 3, 1)]]\n\n date_edges = [datetime.datetime(2019, 1, 1), datetime.datetime(2019, 2, 1),\n datetime.datetime(2019, 3, 1)]\n\n fig, ax = plt.subplots()\n _, bins, _ = ax.hist(data, bins=bins_preprocess(date_edges), stacked=True)\n np.testing.assert_allclose(bins, mpl.dates.date2num(date_edges))\n\n _, bins, _ = ax.hist(data, bins=bins_preprocess(date_edges), stacked=False)\n np.testing.assert_allclose(bins, mpl.dates.date2num(date_edges))\n\n\[email protected]('data, expected_number_of_hists',\n [([], 1),\n ([[]], 1),\n ([[], []], 2)])\ndef test_hist_with_empty_input(data, expected_number_of_hists):\n hists, _, _ = plt.hist(data)\n hists = np.asarray(hists)\n\n if hists.ndim == 1:\n assert 1 == expected_number_of_hists\n else:\n assert hists.shape[0] == expected_number_of_hists\n\n\[email protected](\"histtype, zorder\",\n [(\"bar\", mpl.patches.Patch.zorder),\n (\"step\", mpl.lines.Line2D.zorder),\n (\"stepfilled\", mpl.patches.Patch.zorder)])\ndef test_hist_zorder(histtype, zorder):\n ax = plt.figure().add_subplot()\n ax.hist([1, 2], histtype=histtype)\n assert ax.patches\n for patch in ax.patches:\n assert patch.get_zorder() == zorder\n\n\n@check_figures_equal(extensions=['png'])\ndef test_stairs(fig_test, fig_ref):\n import matplotlib.lines as mlines\n y = np.array([6, 14, 32, 37, 48, 32, 21, 4]) # hist\n x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.]) # bins\n\n test_axes = fig_test.subplots(3, 2).flatten()\n test_axes[0].stairs(y, x, baseline=None)\n test_axes[1].stairs(y, x, baseline=None, orientation='horizontal')\n test_axes[2].stairs(y, x)\n test_axes[3].stairs(y, x, orientation='horizontal')\n test_axes[4].stairs(y, x)\n test_axes[4].semilogy()\n test_axes[5].stairs(y, x, orientation='horizontal')\n test_axes[5].semilogx()\n\n # defaults of `PathPatch` to be used for all following Line2D\n style = {'solid_joinstyle': 'miter', 'solid_capstyle': 'butt'}\n\n ref_axes = fig_ref.subplots(3, 2).flatten()\n ref_axes[0].plot(x, np.append(y, y[-1]), drawstyle='steps-post', **style)\n ref_axes[1].plot(np.append(y[0], y), x, drawstyle='steps-post', **style)\n\n ref_axes[2].plot(x, np.append(y, y[-1]), drawstyle='steps-post', **style)\n ref_axes[2].add_line(mlines.Line2D([x[0], x[0]], [0, y[0]], **style))\n ref_axes[2].add_line(mlines.Line2D([x[-1], x[-1]], [0, y[-1]], **style))\n ref_axes[2].set_ylim(0, None)\n\n ref_axes[3].plot(np.append(y[0], y), x, drawstyle='steps-post', **style)\n ref_axes[3].add_line(mlines.Line2D([0, y[0]], [x[0], x[0]], **style))\n ref_axes[3].add_line(mlines.Line2D([0, y[-1]], [x[-1], x[-1]], **style))\n ref_axes[3].set_xlim(0, None)\n\n ref_axes[4].plot(x, np.append(y, y[-1]), drawstyle='steps-post', **style)\n ref_axes[4].add_line(mlines.Line2D([x[0], x[0]], [0, y[0]], **style))\n ref_axes[4].add_line(mlines.Line2D([x[-1], x[-1]], [0, y[-1]], **style))\n ref_axes[4].semilogy()\n\n ref_axes[5].plot(np.append(y[0], y), x, drawstyle='steps-post', **style)\n ref_axes[5].add_line(mlines.Line2D([0, y[0]], [x[0], x[0]], **style))\n ref_axes[5].add_line(mlines.Line2D([0, y[-1]], [x[-1], x[-1]], **style))\n ref_axes[5].semilogx()\n\n\n@check_figures_equal(extensions=['png'])\ndef test_stairs_fill(fig_test, fig_ref):\n h, bins = [1, 2, 3, 4, 2], [0, 1, 2, 3, 4, 5]\n bs = -2\n # Test\n test_axes = fig_test.subplots(2, 2).flatten()\n test_axes[0].stairs(h, bins, fill=True)\n test_axes[1].stairs(h, bins, orientation='horizontal', fill=True)\n test_axes[2].stairs(h, bins, baseline=bs, fill=True)\n test_axes[3].stairs(h, bins, baseline=bs, orientation='horizontal',\n fill=True)\n\n # # Ref\n ref_axes = fig_ref.subplots(2, 2).flatten()\n ref_axes[0].fill_between(bins, np.append(h, h[-1]), step='post', lw=0)\n ref_axes[0].set_ylim(0, None)\n ref_axes[1].fill_betweenx(bins, np.append(h, h[-1]), step='post', lw=0)\n ref_axes[1].set_xlim(0, None)\n ref_axes[2].fill_between(bins, np.append(h, h[-1]),\n np.ones(len(h)+1)*bs, step='post', lw=0)\n ref_axes[2].set_ylim(bs, None)\n ref_axes[3].fill_betweenx(bins, np.append(h, h[-1]),\n np.ones(len(h)+1)*bs, step='post', lw=0)\n ref_axes[3].set_xlim(bs, None)\n\n\n@check_figures_equal(extensions=['png'])\ndef test_stairs_update(fig_test, fig_ref):\n # fixed ylim because stairs() does autoscale, but updating data does not\n ylim = -3, 4\n # Test\n test_ax = fig_test.add_subplot()\n h = test_ax.stairs([1, 2, 3])\n test_ax.set_ylim(ylim)\n h.set_data([3, 2, 1])\n h.set_data(edges=np.arange(4)+2)\n h.set_data([1, 2, 1], np.arange(4)/2)\n h.set_data([1, 2, 3])\n h.set_data(None, np.arange(4))\n assert np.allclose(h.get_data()[0], np.arange(1, 4))\n assert np.allclose(h.get_data()[1], np.arange(4))\n h.set_data(baseline=-2)\n assert h.get_data().baseline == -2\n\n # Ref\n ref_ax = fig_ref.add_subplot()\n h = ref_ax.stairs([1, 2, 3], baseline=-2)\n ref_ax.set_ylim(ylim)\n\n\n@check_figures_equal(extensions=['png'])\ndef test_stairs_baseline_0(fig_test, fig_ref):\n # Test\n test_ax = fig_test.add_subplot()\n test_ax.stairs([5, 6, 7], baseline=None)\n\n # Ref\n ref_ax = fig_ref.add_subplot()\n style = {'solid_joinstyle': 'miter', 'solid_capstyle': 'butt'}\n ref_ax.plot(range(4), [5, 6, 7, 7], drawstyle='steps-post', **style)\n ref_ax.set_ylim(0, None)\n\n\ndef test_stairs_empty():\n ax = plt.figure().add_subplot()\n ax.stairs([], [42])\n assert ax.get_xlim() == (39, 45)\n assert ax.get_ylim() == (-0.06, 0.06)\n\n\ndef test_stairs_invalid_nan():\n with pytest.raises(ValueError, match='Nan values in \"edges\"'):\n plt.stairs([1, 2], [0, np.nan, 1])\n\n\ndef test_stairs_invalid_mismatch():\n with pytest.raises(ValueError, match='Size mismatch'):\n plt.stairs([1, 2], [0, 1])\n\n\ndef test_stairs_invalid_update():\n h = plt.stairs([1, 2], [0, 1, 2])\n with pytest.raises(ValueError, match='Nan values in \"edges\"'):\n h.set_data(edges=[1, np.nan, 2])\n\n\ndef test_stairs_invalid_update2():\n h = plt.stairs([1, 2], [0, 1, 2])\n with pytest.raises(ValueError, match='Size mismatch'):\n h.set_data(edges=np.arange(5))\n\n\n@image_comparison(['test_stairs_options.png'], remove_text=True)\ndef test_stairs_options():\n x, y = np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4]).astype(float)\n yn = y.copy()\n yn[1] = np.nan\n\n fig, ax = plt.subplots()\n ax.stairs(y*3, x, color='green', fill=True, label=\"A\")\n ax.stairs(y, x*3-3, color='red', fill=True,\n orientation='horizontal', label=\"B\")\n ax.stairs(yn, x, color='orange', ls='--', lw=2, label=\"C\")\n ax.stairs(yn/3, x*3-2, ls='--', lw=2, baseline=0.5,\n orientation='horizontal', label=\"D\")\n ax.stairs(y[::-1]*3+13, x-1, color='red', ls='--', lw=2, baseline=None,\n label=\"E\")\n ax.stairs(y[::-1]*3+14, x, baseline=26,\n color='purple', ls='--', lw=2, label=\"F\")\n ax.stairs(yn[::-1]*3+15, x+1, baseline=np.linspace(27, 25, len(y)),\n color='blue', ls='--', lw=2, label=\"G\", fill=True)\n ax.stairs(y[:-1][::-1]*2+11, x[:-1]+0.5, color='black', ls='--', lw=2,\n baseline=12, hatch='//', label=\"H\")\n ax.legend(loc=0)\n\n\n@image_comparison(['test_stairs_datetime.png'])\ndef test_stairs_datetime():\n f, ax = plt.subplots(constrained_layout=True)\n ax.stairs(np.arange(36),\n np.arange(np.datetime64('2001-12-27'),\n np.datetime64('2002-02-02')))\n plt.xticks(rotation=30)\n\n\ndef contour_dat():\n x = np.linspace(-3, 5, 150)\n y = np.linspace(-3, 5, 120)\n z = np.cos(x) + np.sin(y[:, np.newaxis])\n return x, y, z\n\n\n@image_comparison(['contour_hatching'], remove_text=True, style='mpl20')\ndef test_contour_hatching():\n x, y, z = contour_dat()\n fig, ax = plt.subplots()\n ax.contourf(x, y, z, 7, hatches=['/', '\\\\', '//', '-'],\n cmap=plt.get_cmap('gray'),\n extend='both', alpha=0.5)\n\n\n@image_comparison(['contour_colorbar'], style='mpl20')\ndef test_contour_colorbar():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n x, y, z = contour_dat()\n\n fig, ax = plt.subplots()\n cs = ax.contourf(x, y, z, levels=np.arange(-1.8, 1.801, 0.2),\n cmap=plt.get_cmap('RdBu'),\n vmin=-0.6,\n vmax=0.6,\n extend='both')\n cs1 = ax.contour(x, y, z, levels=np.arange(-2.2, -0.599, 0.2),\n colors=['y'],\n linestyles='solid',\n linewidths=2)\n cs2 = ax.contour(x, y, z, levels=np.arange(0.6, 2.2, 0.2),\n colors=['c'],\n linewidths=2)\n cbar = fig.colorbar(cs, ax=ax)\n cbar.add_lines(cs1)\n cbar.add_lines(cs2, erase=False)\n\n\n@image_comparison(['hist2d', 'hist2d'], remove_text=True, style='mpl20')\ndef test_hist2d():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n np.random.seed(0)\n # make it not symmetric in case we switch x and y axis\n x = np.random.randn(100)*2+5\n y = np.random.randn(100)-2\n fig, ax = plt.subplots()\n ax.hist2d(x, y, bins=10, rasterized=True)\n\n # Reuse testcase from above for a labeled data test\n data = {\"x\": x, \"y\": y}\n fig, ax = plt.subplots()\n ax.hist2d(\"x\", \"y\", bins=10, data=data, rasterized=True)\n\n\n@image_comparison(['hist2d_transpose'], remove_text=True, style='mpl20')\ndef test_hist2d_transpose():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n np.random.seed(0)\n # make sure the output from np.histogram is transposed before\n # passing to pcolorfast\n x = np.array([5]*100)\n y = np.random.randn(100)-2\n fig, ax = plt.subplots()\n ax.hist2d(x, y, bins=10, rasterized=True)\n\n\ndef test_hist2d_density():\n x, y = np.random.random((2, 100))\n ax = plt.figure().subplots()\n for obj in [ax, plt]:\n obj.hist2d(x, y, density=True)\n\n\nclass TestScatter:\n @image_comparison(['scatter'], style='mpl20', remove_text=True)\n def test_scatter_plot(self):\n data = {\"x\": np.array([3, 4, 2, 6]), \"y\": np.array([2, 5, 2, 3]),\n \"c\": ['r', 'y', 'b', 'lime'], \"s\": [24, 15, 19, 29],\n \"c2\": ['0.5', '0.6', '0.7', '0.8']}\n\n fig, ax = plt.subplots()\n ax.scatter(data[\"x\"] - 1., data[\"y\"] - 1., c=data[\"c\"], s=data[\"s\"])\n ax.scatter(data[\"x\"] + 1., data[\"y\"] + 1., c=data[\"c2\"], s=data[\"s\"])\n ax.scatter(\"x\", \"y\", c=\"c\", s=\"s\", data=data)\n\n @image_comparison(['scatter_marker.png'], remove_text=True)\n def test_scatter_marker(self):\n fig, (ax0, ax1, ax2) = plt.subplots(ncols=3)\n ax0.scatter([3, 4, 2, 6], [2, 5, 2, 3],\n c=[(1, 0, 0), 'y', 'b', 'lime'],\n s=[60, 50, 40, 30],\n edgecolors=['k', 'r', 'g', 'b'],\n marker='s')\n ax1.scatter([3, 4, 2, 6], [2, 5, 2, 3],\n c=[(1, 0, 0), 'y', 'b', 'lime'],\n s=[60, 50, 40, 30],\n edgecolors=['k', 'r', 'g', 'b'],\n marker=mmarkers.MarkerStyle('o', fillstyle='top'))\n # unit area ellipse\n rx, ry = 3, 1\n area = rx * ry * np.pi\n theta = np.linspace(0, 2 * np.pi, 21)\n verts = np.column_stack([np.cos(theta) * rx / area,\n np.sin(theta) * ry / area])\n ax2.scatter([3, 4, 2, 6], [2, 5, 2, 3],\n c=[(1, 0, 0), 'y', 'b', 'lime'],\n s=[60, 50, 40, 30],\n edgecolors=['k', 'r', 'g', 'b'],\n marker=verts)\n\n @image_comparison(['scatter_2D'], remove_text=True, extensions=['png'])\n def test_scatter_2D(self):\n x = np.arange(3)\n y = np.arange(2)\n x, y = np.meshgrid(x, y)\n z = x + y\n fig, ax = plt.subplots()\n ax.scatter(x, y, c=z, s=200, edgecolors='face')\n\n @check_figures_equal(extensions=[\"png\"])\n def test_scatter_decimal(self, fig_test, fig_ref):\n x0 = np.array([1.5, 8.4, 5.3, 4.2])\n y0 = np.array([1.1, 2.2, 3.3, 4.4])\n x = np.array([Decimal(i) for i in x0])\n y = np.array([Decimal(i) for i in y0])\n c = ['r', 'y', 'b', 'lime']\n s = [24, 15, 19, 29]\n # Test image - scatter plot with Decimal() input\n ax = fig_test.subplots()\n ax.scatter(x, y, c=c, s=s)\n # Reference image\n ax = fig_ref.subplots()\n ax.scatter(x0, y0, c=c, s=s)\n\n def test_scatter_color(self):\n # Try to catch cases where 'c' kwarg should have been used.\n with pytest.raises(ValueError):\n plt.scatter([1, 2], [1, 2], color=[0.1, 0.2])\n with pytest.raises(ValueError):\n plt.scatter([1, 2, 3], [1, 2, 3], color=[1, 2, 3])\n\n def test_scatter_unfilled(self):\n coll = plt.scatter([0, 1, 2], [1, 3, 2], c=['0.1', '0.3', '0.5'],\n marker=mmarkers.MarkerStyle('o', fillstyle='none'),\n linewidths=[1.1, 1.2, 1.3])\n assert coll.get_facecolors().shape == (0, 4) # no facecolors\n assert_array_equal(coll.get_edgecolors(), [[0.1, 0.1, 0.1, 1],\n [0.3, 0.3, 0.3, 1],\n [0.5, 0.5, 0.5, 1]])\n assert_array_equal(coll.get_linewidths(), [1.1, 1.2, 1.3])\n\n @pytest.mark.style('default')\n def test_scatter_unfillable(self):\n coll = plt.scatter([0, 1, 2], [1, 3, 2], c=['0.1', '0.3', '0.5'],\n marker='x',\n linewidths=[1.1, 1.2, 1.3])\n assert_array_equal(coll.get_facecolors(), coll.get_edgecolors())\n assert_array_equal(coll.get_edgecolors(), [[0.1, 0.1, 0.1, 1],\n [0.3, 0.3, 0.3, 1],\n [0.5, 0.5, 0.5, 1]])\n assert_array_equal(coll.get_linewidths(), [1.1, 1.2, 1.3])\n\n def test_scatter_size_arg_size(self):\n x = np.arange(4)\n with pytest.raises(ValueError, match='same size as x and y'):\n plt.scatter(x, x, x[1:])\n with pytest.raises(ValueError, match='same size as x and y'):\n plt.scatter(x[1:], x[1:], x)\n with pytest.raises(ValueError, match='float array-like'):\n plt.scatter(x, x, 'foo')\n\n def test_scatter_edgecolor_RGB(self):\n # Github issue 19066\n coll = plt.scatter([1, 2, 3], [1, np.nan, np.nan],\n edgecolor=(1, 0, 0))\n assert mcolors.same_color(coll.get_edgecolor(), (1, 0, 0))\n coll = plt.scatter([1, 2, 3, 4], [1, np.nan, np.nan, 1],\n edgecolor=(1, 0, 0, 1))\n assert mcolors.same_color(coll.get_edgecolor(), (1, 0, 0, 1))\n\n @check_figures_equal(extensions=[\"png\"])\n def test_scatter_invalid_color(self, fig_test, fig_ref):\n ax = fig_test.subplots()\n cmap = plt.get_cmap(\"viridis\", 16)\n cmap.set_bad(\"k\", 1)\n # Set a nonuniform size to prevent the last call to `scatter` (plotting\n # the invalid points separately in fig_ref) from using the marker\n # stamping fast path, which would result in slightly offset markers.\n ax.scatter(range(4), range(4),\n c=[1, np.nan, 2, np.nan], s=[1, 2, 3, 4],\n cmap=cmap, plotnonfinite=True)\n ax = fig_ref.subplots()\n cmap = plt.get_cmap(\"viridis\", 16)\n ax.scatter([0, 2], [0, 2], c=[1, 2], s=[1, 3], cmap=cmap)\n ax.scatter([1, 3], [1, 3], s=[2, 4], color=\"k\")\n\n @check_figures_equal(extensions=[\"png\"])\n def test_scatter_no_invalid_color(self, fig_test, fig_ref):\n # With plotninfinite=False we plot only 2 points.\n ax = fig_test.subplots()\n cmap = plt.get_cmap(\"viridis\", 16)\n cmap.set_bad(\"k\", 1)\n ax.scatter(range(4), range(4),\n c=[1, np.nan, 2, np.nan], s=[1, 2, 3, 4],\n cmap=cmap, plotnonfinite=False)\n ax = fig_ref.subplots()\n ax.scatter([0, 2], [0, 2], c=[1, 2], s=[1, 3], cmap=cmap)\n\n @check_figures_equal(extensions=[\"png\"])\n def test_scatter_norm_vminvmax(self, fig_test, fig_ref):\n \"\"\"Parameters vmin, vmax should be ignored if norm is given.\"\"\"\n x = [1, 2, 3]\n ax = fig_ref.subplots()\n ax.scatter(x, x, c=x, vmin=0, vmax=5)\n ax = fig_test.subplots()\n with pytest.warns(MatplotlibDeprecationWarning,\n match=\"Passing parameters norm and vmin/vmax \"\n \"simultaneously is deprecated.\"):\n ax.scatter(x, x, c=x, norm=mcolors.Normalize(-10, 10),\n vmin=0, vmax=5)\n\n @check_figures_equal(extensions=[\"png\"])\n def test_scatter_single_point(self, fig_test, fig_ref):\n ax = fig_test.subplots()\n ax.scatter(1, 1, c=1)\n ax = fig_ref.subplots()\n ax.scatter([1], [1], c=[1])\n\n @check_figures_equal(extensions=[\"png\"])\n def test_scatter_different_shapes(self, fig_test, fig_ref):\n x = np.arange(10)\n ax = fig_test.subplots()\n ax.scatter(x, x.reshape(2, 5), c=x.reshape(5, 2))\n ax = fig_ref.subplots()\n ax.scatter(x.reshape(5, 2), x, c=x.reshape(2, 5))\n\n # Parameters for *test_scatter_c*. NB: assuming that the\n # scatter plot will have 4 elements. The tuple scheme is:\n # (*c* parameter case, exception regexp key or None if no exception)\n params_test_scatter_c = [\n # single string:\n ('0.5', None),\n # Single letter-sequences\n ([\"rgby\"], \"conversion\"),\n # Special cases\n (\"red\", None),\n (\"none\", None),\n (None, None),\n ([\"r\", \"g\", \"b\", \"none\"], None),\n # Non-valid color spec (FWIW, 'jaune' means yellow in French)\n (\"jaune\", \"conversion\"),\n ([\"jaune\"], \"conversion\"), # wrong type before wrong size\n ([\"jaune\"]*4, \"conversion\"),\n # Value-mapping like\n ([0.5]*3, None), # should emit a warning for user's eyes though\n ([0.5]*4, None), # NB: no warning as matching size allows mapping\n ([0.5]*5, \"shape\"),\n # list of strings:\n (['0.5', '0.4', '0.6', '0.7'], None),\n (['0.5', 'red', '0.6', 'C5'], None),\n (['0.5', 0.5, '0.6', 'C5'], \"conversion\"),\n # RGB values\n ([[1, 0, 0]], None),\n ([[1, 0, 0]]*3, \"shape\"),\n ([[1, 0, 0]]*4, None),\n ([[1, 0, 0]]*5, \"shape\"),\n # RGBA values\n ([[1, 0, 0, 0.5]], None),\n ([[1, 0, 0, 0.5]]*3, \"shape\"),\n ([[1, 0, 0, 0.5]]*4, None),\n ([[1, 0, 0, 0.5]]*5, \"shape\"),\n # Mix of valid color specs\n ([[1, 0, 0, 0.5]]*3 + [[1, 0, 0]], None),\n ([[1, 0, 0, 0.5], \"red\", \"0.0\"], \"shape\"),\n ([[1, 0, 0, 0.5], \"red\", \"0.0\", \"C5\"], None),\n ([[1, 0, 0, 0.5], \"red\", \"0.0\", \"C5\", [0, 1, 0]], \"shape\"),\n # Mix of valid and non valid color specs\n ([[1, 0, 0, 0.5], \"red\", \"jaune\"], \"conversion\"),\n ([[1, 0, 0, 0.5], \"red\", \"0.0\", \"jaune\"], \"conversion\"),\n ([[1, 0, 0, 0.5], \"red\", \"0.0\", \"C5\", \"jaune\"], \"conversion\"),\n ]\n\n @pytest.mark.parametrize('c_case, re_key', params_test_scatter_c)\n def test_scatter_c(self, c_case, re_key):\n def get_next_color():\n return 'blue' # currently unused\n\n xsize = 4\n # Additional checking of *c* (introduced in #11383).\n REGEXP = {\n \"shape\": \"^'c' argument has [0-9]+ elements\", # shape mismatch\n \"conversion\": \"^'c' argument must be a color\", # bad vals\n }\n\n if re_key is None:\n mpl.axes.Axes._parse_scatter_color_args(\n c=c_case, edgecolors=\"black\", kwargs={}, xsize=xsize,\n get_next_color_func=get_next_color)\n else:\n with pytest.raises(ValueError, match=REGEXP[re_key]):\n mpl.axes.Axes._parse_scatter_color_args(\n c=c_case, edgecolors=\"black\", kwargs={}, xsize=xsize,\n get_next_color_func=get_next_color)\n\n @pytest.mark.style('default')\n @check_figures_equal(extensions=[\"png\"])\n def test_scatter_single_color_c(self, fig_test, fig_ref):\n rgb = [[1, 0.5, 0.05]]\n rgba = [[1, 0.5, 0.05, .5]]\n\n # set via color kwarg\n ax_ref = fig_ref.subplots()\n ax_ref.scatter(np.ones(3), range(3), color=rgb)\n ax_ref.scatter(np.ones(4)*2, range(4), color=rgba)\n\n # set via broadcasting via c\n ax_test = fig_test.subplots()\n ax_test.scatter(np.ones(3), range(3), c=rgb)\n ax_test.scatter(np.ones(4)*2, range(4), c=rgba)\n\n def test_scatter_linewidths(self):\n x = np.arange(5)\n\n fig, ax = plt.subplots()\n for i in range(3):\n pc = ax.scatter(x, np.full(5, i), c=f'C{i}', marker='x', s=100,\n linewidths=i + 1)\n assert pc.get_linewidths() == i + 1\n\n pc = ax.scatter(x, np.full(5, 3), c='C3', marker='x', s=100,\n linewidths=[*range(1, 5), None])\n assert_array_equal(pc.get_linewidths(),\n [*range(1, 5), mpl.rcParams['lines.linewidth']])\n\n\ndef _params(c=None, xsize=2, *, edgecolors=None, **kwargs):\n return (c, edgecolors, kwargs if kwargs is not None else {}, xsize)\n_result = namedtuple('_result', 'c, colors')\n\n\[email protected](\n 'params, expected_result',\n [(_params(),\n _result(c='b', colors=np.array([[0, 0, 1, 1]]))),\n (_params(c='r'),\n _result(c='r', colors=np.array([[1, 0, 0, 1]]))),\n (_params(c='r', colors='b'),\n _result(c='r', colors=np.array([[1, 0, 0, 1]]))),\n # color\n (_params(color='b'),\n _result(c='b', colors=np.array([[0, 0, 1, 1]]))),\n (_params(color=['b', 'g']),\n _result(c=['b', 'g'], colors=np.array([[0, 0, 1, 1], [0, .5, 0, 1]]))),\n ])\ndef test_parse_scatter_color_args(params, expected_result):\n def get_next_color():\n return 'blue' # currently unused\n\n c, colors, _edgecolors = mpl.axes.Axes._parse_scatter_color_args(\n *params, get_next_color_func=get_next_color)\n assert c == expected_result.c\n assert_allclose(colors, expected_result.colors)\n\ndel _params\ndel _result\n\n\[email protected](\n 'kwargs, expected_edgecolors',\n [(dict(), None),\n (dict(c='b'), None),\n (dict(edgecolors='r'), 'r'),\n (dict(edgecolors=['r', 'g']), ['r', 'g']),\n (dict(edgecolor='r'), 'r'),\n (dict(edgecolors='face'), 'face'),\n (dict(edgecolors='none'), 'none'),\n (dict(edgecolor='r', edgecolors='g'), 'r'),\n (dict(c='b', edgecolor='r', edgecolors='g'), 'r'),\n (dict(color='r'), 'r'),\n (dict(color='r', edgecolor='g'), 'g'),\n ])\ndef test_parse_scatter_color_args_edgecolors(kwargs, expected_edgecolors):\n def get_next_color():\n return 'blue' # currently unused\n\n c = kwargs.pop('c', None)\n edgecolors = kwargs.pop('edgecolors', None)\n _, _, result_edgecolors = \\\n mpl.axes.Axes._parse_scatter_color_args(\n c, edgecolors, kwargs, xsize=2, get_next_color_func=get_next_color)\n assert result_edgecolors == expected_edgecolors\n\n\ndef test_parse_scatter_color_args_error():\n def get_next_color():\n return 'blue' # currently unused\n\n with pytest.raises(ValueError,\n match=\"RGBA values should be within 0-1 range\"):\n c = np.array([[0.1, 0.2, 0.7], [0.2, 0.4, 1.4]]) # value > 1\n mpl.axes.Axes._parse_scatter_color_args(\n c, None, kwargs={}, xsize=2, get_next_color_func=get_next_color)\n\n\ndef test_as_mpl_axes_api():\n # tests the _as_mpl_axes api\n from matplotlib.projections.polar import PolarAxes\n\n class Polar:\n def __init__(self):\n self.theta_offset = 0\n\n def _as_mpl_axes(self):\n # implement the matplotlib axes interface\n return PolarAxes, {'theta_offset': self.theta_offset}\n\n prj = Polar()\n prj2 = Polar()\n prj2.theta_offset = np.pi\n prj3 = Polar()\n\n # testing axes creation with plt.axes\n ax = plt.axes([0, 0, 1, 1], projection=prj)\n assert type(ax) == PolarAxes\n with pytest.warns(\n MatplotlibDeprecationWarning,\n match=r'Calling gca\\(\\) with keyword arguments was deprecated'):\n ax_via_gca = plt.gca(projection=prj)\n assert ax_via_gca is ax\n plt.close()\n\n # testing axes creation with gca\n with pytest.warns(\n MatplotlibDeprecationWarning,\n match=r'Calling gca\\(\\) with keyword arguments was deprecated'):\n ax = plt.gca(projection=prj)\n assert type(ax) == mpl.axes._subplots.subplot_class_factory(PolarAxes)\n with pytest.warns(\n MatplotlibDeprecationWarning,\n match=r'Calling gca\\(\\) with keyword arguments was deprecated'):\n ax_via_gca = plt.gca(projection=prj)\n assert ax_via_gca is ax\n # try getting the axes given a different polar projection\n with pytest.warns(\n MatplotlibDeprecationWarning,\n match=r'Calling gca\\(\\) with keyword arguments was deprecated'):\n ax_via_gca = plt.gca(projection=prj2)\n assert ax_via_gca is ax\n assert ax.get_theta_offset() == 0\n # try getting the axes given an == (not is) polar projection\n with pytest.warns(\n MatplotlibDeprecationWarning,\n match=r'Calling gca\\(\\) with keyword arguments was deprecated'):\n ax_via_gca = plt.gca(projection=prj3)\n assert ax_via_gca is ax\n plt.close()\n\n # testing axes creation with subplot\n ax = plt.subplot(121, projection=prj)\n assert type(ax) == mpl.axes._subplots.subplot_class_factory(PolarAxes)\n plt.close()\n\n\ndef test_pyplot_axes():\n # test focusing of Axes in other Figure\n fig1, ax1 = plt.subplots()\n fig2, ax2 = plt.subplots()\n plt.sca(ax1)\n assert ax1 is plt.gca()\n assert fig1 is plt.gcf()\n plt.close(fig1)\n plt.close(fig2)\n\n\n@image_comparison(['log_scales'])\ndef test_log_scales():\n fig, ax = plt.subplots()\n ax.plot(np.log(np.linspace(0.1, 100)))\n ax.set_yscale('log', base=5.5)\n ax.invert_yaxis()\n ax.set_xscale('log', base=9.0)\n\n\ndef test_log_scales_no_data():\n _, ax = plt.subplots()\n ax.set(xscale=\"log\", yscale=\"log\")\n ax.xaxis.set_major_locator(mticker.MultipleLocator(1))\n assert ax.get_xlim() == ax.get_ylim() == (1, 10)\n\n\ndef test_log_scales_invalid():\n fig, ax = plt.subplots()\n ax.set_xscale('log')\n with pytest.warns(UserWarning, match='Attempted to set non-positive'):\n ax.set_xlim(-1, 10)\n ax.set_yscale('log')\n with pytest.warns(UserWarning, match='Attempted to set non-positive'):\n ax.set_ylim(-1, 10)\n\n\n@image_comparison(['stackplot_test_image', 'stackplot_test_image'])\ndef test_stackplot():\n fig = plt.figure()\n x = np.linspace(0, 10, 10)\n y1 = 1.0 * x\n y2 = 2.0 * x + 1\n y3 = 3.0 * x + 2\n ax = fig.add_subplot(1, 1, 1)\n ax.stackplot(x, y1, y2, y3)\n ax.set_xlim((0, 10))\n ax.set_ylim((0, 70))\n\n # Reuse testcase from above for a labeled data test\n data = {\"x\": x, \"y1\": y1, \"y2\": y2, \"y3\": y3}\n fig, ax = plt.subplots()\n ax.stackplot(\"x\", \"y1\", \"y2\", \"y3\", data=data)\n ax.set_xlim((0, 10))\n ax.set_ylim((0, 70))\n\n\n@image_comparison(['stackplot_test_baseline'], remove_text=True)\ndef test_stackplot_baseline():\n np.random.seed(0)\n\n def layers(n, m):\n a = np.zeros((m, n))\n for i in range(n):\n for j in range(5):\n x = 1 / (.1 + np.random.random())\n y = 2 * np.random.random() - .5\n z = 10 / (.1 + np.random.random())\n a[:, i] += x * np.exp(-((np.arange(m) / m - y) * z) ** 2)\n return a\n\n d = layers(3, 100)\n d[50, :] = 0 # test for fixed weighted wiggle (issue #6313)\n\n fig, axs = plt.subplots(2, 2)\n\n axs[0, 0].stackplot(range(100), d.T, baseline='zero')\n axs[0, 1].stackplot(range(100), d.T, baseline='sym')\n axs[1, 0].stackplot(range(100), d.T, baseline='wiggle')\n axs[1, 1].stackplot(range(100), d.T, baseline='weighted_wiggle')\n\n\ndef _bxp_test_helper(\n stats_kwargs={}, transform_stats=lambda s: s, bxp_kwargs={}):\n np.random.seed(937)\n logstats = mpl.cbook.boxplot_stats(\n np.random.lognormal(mean=1.25, sigma=1., size=(37, 4)), **stats_kwargs)\n fig, ax = plt.subplots()\n if bxp_kwargs.get('vert', True):\n ax.set_yscale('log')\n else:\n ax.set_xscale('log')\n # Work around baseline images generate back when bxp did not respect the\n # boxplot.boxprops.linewidth rcParam when patch_artist is False.\n if not bxp_kwargs.get('patch_artist', False):\n mpl.rcParams['boxplot.boxprops.linewidth'] = \\\n mpl.rcParams['lines.linewidth']\n ax.bxp(transform_stats(logstats), **bxp_kwargs)\n\n\n@image_comparison(['bxp_baseline.png'],\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_baseline():\n _bxp_test_helper()\n\n\n@image_comparison(['bxp_rangewhis.png'],\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_rangewhis():\n _bxp_test_helper(stats_kwargs=dict(whis=[0, 100]))\n\n\n@image_comparison(['bxp_percentilewhis.png'],\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_percentilewhis():\n _bxp_test_helper(stats_kwargs=dict(whis=[5, 95]))\n\n\n@image_comparison(['bxp_with_xlabels.png'],\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_with_xlabels():\n def transform(stats):\n for s, label in zip(stats, list('ABCD')):\n s['label'] = label\n return stats\n\n _bxp_test_helper(transform_stats=transform)\n\n\n@image_comparison(['bxp_horizontal.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default',\n tol=0.1)\ndef test_bxp_horizontal():\n _bxp_test_helper(bxp_kwargs=dict(vert=False))\n\n\n@image_comparison(['bxp_with_ylabels.png'],\n savefig_kwarg={'dpi': 40},\n style='default',\n tol=0.1)\ndef test_bxp_with_ylabels():\n def transform(stats):\n for s, label in zip(stats, list('ABCD')):\n s['label'] = label\n return stats\n\n _bxp_test_helper(transform_stats=transform, bxp_kwargs=dict(vert=False))\n\n\n@image_comparison(['bxp_patchartist.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_patchartist():\n _bxp_test_helper(bxp_kwargs=dict(patch_artist=True))\n\n\n@image_comparison(['bxp_custompatchartist.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 100},\n style='default')\ndef test_bxp_custompatchartist():\n _bxp_test_helper(bxp_kwargs=dict(\n patch_artist=True,\n boxprops=dict(facecolor='yellow', edgecolor='green', ls=':')))\n\n\n@image_comparison(['bxp_customoutlier.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_customoutlier():\n _bxp_test_helper(bxp_kwargs=dict(\n flierprops=dict(linestyle='none', marker='d', mfc='g')))\n\n\n@image_comparison(['bxp_withmean_custompoint.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_showcustommean():\n _bxp_test_helper(bxp_kwargs=dict(\n showmeans=True,\n meanprops=dict(linestyle='none', marker='d', mfc='green'),\n ))\n\n\n@image_comparison(['bxp_custombox.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_custombox():\n _bxp_test_helper(bxp_kwargs=dict(\n boxprops=dict(linestyle='--', color='b', lw=3)))\n\n\n@image_comparison(['bxp_custommedian.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_custommedian():\n _bxp_test_helper(bxp_kwargs=dict(\n medianprops=dict(linestyle='--', color='b', lw=3)))\n\n\n@image_comparison(['bxp_customcap.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_customcap():\n _bxp_test_helper(bxp_kwargs=dict(\n capprops=dict(linestyle='--', color='g', lw=3)))\n\n\n@image_comparison(['bxp_customwhisker.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_customwhisker():\n _bxp_test_helper(bxp_kwargs=dict(\n whiskerprops=dict(linestyle='-', color='m', lw=3)))\n\n\n@image_comparison(['bxp_withnotch.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_shownotches():\n _bxp_test_helper(bxp_kwargs=dict(shownotches=True))\n\n\n@image_comparison(['bxp_nocaps.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_nocaps():\n _bxp_test_helper(bxp_kwargs=dict(showcaps=False))\n\n\n@image_comparison(['bxp_nobox.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_nobox():\n _bxp_test_helper(bxp_kwargs=dict(showbox=False))\n\n\n@image_comparison(['bxp_no_flier_stats.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_no_flier_stats():\n def transform(stats):\n for s in stats:\n s.pop('fliers', None)\n return stats\n\n _bxp_test_helper(transform_stats=transform,\n bxp_kwargs=dict(showfliers=False))\n\n\n@image_comparison(['bxp_withmean_point.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_showmean():\n _bxp_test_helper(bxp_kwargs=dict(showmeans=True, meanline=False))\n\n\n@image_comparison(['bxp_withmean_line.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_showmeanasline():\n _bxp_test_helper(bxp_kwargs=dict(showmeans=True, meanline=True))\n\n\n@image_comparison(['bxp_scalarwidth.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_scalarwidth():\n _bxp_test_helper(bxp_kwargs=dict(widths=.25))\n\n\n@image_comparison(['bxp_customwidths.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_customwidths():\n _bxp_test_helper(bxp_kwargs=dict(widths=[0.10, 0.25, 0.65, 0.85]))\n\n\n@image_comparison(['bxp_custompositions.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_bxp_custompositions():\n _bxp_test_helper(bxp_kwargs=dict(positions=[1, 5, 6, 7]))\n\n\ndef test_bxp_bad_widths():\n with pytest.raises(ValueError):\n _bxp_test_helper(bxp_kwargs=dict(widths=[1]))\n\n\ndef test_bxp_bad_positions():\n with pytest.raises(ValueError):\n _bxp_test_helper(bxp_kwargs=dict(positions=[2, 3]))\n\n\n@image_comparison(['boxplot', 'boxplot'], tol=1.28, style='default')\ndef test_boxplot():\n # Randomness used for bootstrapping.\n np.random.seed(937)\n\n x = np.linspace(-7, 7, 140)\n x = np.hstack([-25, x, 25])\n fig, ax = plt.subplots()\n\n ax.boxplot([x, x], bootstrap=10000, notch=1)\n ax.set_ylim((-30, 30))\n\n # Reuse testcase from above for a labeled data test\n data = {\"x\": [x, x]}\n fig, ax = plt.subplots()\n ax.boxplot(\"x\", bootstrap=10000, notch=1, data=data)\n ax.set_ylim((-30, 30))\n\n\n@image_comparison(['boxplot_sym2.png'], remove_text=True, style='default')\ndef test_boxplot_sym2():\n # Randomness used for bootstrapping.\n np.random.seed(937)\n\n x = np.linspace(-7, 7, 140)\n x = np.hstack([-25, x, 25])\n fig, [ax1, ax2] = plt.subplots(1, 2)\n\n ax1.boxplot([x, x], bootstrap=10000, sym='^')\n ax1.set_ylim((-30, 30))\n\n ax2.boxplot([x, x], bootstrap=10000, sym='g')\n ax2.set_ylim((-30, 30))\n\n\n@image_comparison(['boxplot_sym.png'],\n remove_text=True,\n savefig_kwarg={'dpi': 40},\n style='default')\ndef test_boxplot_sym():\n x = np.linspace(-7, 7, 140)\n x = np.hstack([-25, x, 25])\n fig, ax = plt.subplots()\n\n ax.boxplot([x, x], sym='gs')\n ax.set_ylim((-30, 30))\n\n\n@image_comparison(['boxplot_autorange_false_whiskers.png',\n 'boxplot_autorange_true_whiskers.png'],\n style='default')\ndef test_boxplot_autorange_whiskers():\n # Randomness used for bootstrapping.\n np.random.seed(937)\n\n x = np.ones(140)\n x = np.hstack([0, x, 2])\n\n fig1, ax1 = plt.subplots()\n ax1.boxplot([x, x], bootstrap=10000, notch=1)\n ax1.set_ylim((-5, 5))\n\n fig2, ax2 = plt.subplots()\n ax2.boxplot([x, x], bootstrap=10000, notch=1, autorange=True)\n ax2.set_ylim((-5, 5))\n\n\ndef _rc_test_bxp_helper(ax, rc_dict):\n x = np.linspace(-7, 7, 140)\n x = np.hstack([-25, x, 25])\n with matplotlib.rc_context(rc_dict):\n ax.boxplot([x, x])\n return ax\n\n\n@image_comparison(['boxplot_rc_parameters'],\n savefig_kwarg={'dpi': 100}, remove_text=True,\n tol=1, style='default')\ndef test_boxplot_rc_parameters():\n # Randomness used for bootstrapping.\n np.random.seed(937)\n\n fig, ax = plt.subplots(3)\n\n rc_axis0 = {\n 'boxplot.notch': True,\n 'boxplot.whiskers': [5, 95],\n 'boxplot.bootstrap': 10000,\n\n 'boxplot.flierprops.color': 'b',\n 'boxplot.flierprops.marker': 'o',\n 'boxplot.flierprops.markerfacecolor': 'g',\n 'boxplot.flierprops.markeredgecolor': 'b',\n 'boxplot.flierprops.markersize': 5,\n 'boxplot.flierprops.linestyle': '--',\n 'boxplot.flierprops.linewidth': 2.0,\n\n 'boxplot.boxprops.color': 'r',\n 'boxplot.boxprops.linewidth': 2.0,\n 'boxplot.boxprops.linestyle': '--',\n\n 'boxplot.capprops.color': 'c',\n 'boxplot.capprops.linewidth': 2.0,\n 'boxplot.capprops.linestyle': '--',\n\n 'boxplot.medianprops.color': 'k',\n 'boxplot.medianprops.linewidth': 2.0,\n 'boxplot.medianprops.linestyle': '--',\n }\n\n rc_axis1 = {\n 'boxplot.vertical': False,\n 'boxplot.whiskers': [0, 100],\n 'boxplot.patchartist': True,\n }\n\n rc_axis2 = {\n 'boxplot.whiskers': 2.0,\n 'boxplot.showcaps': False,\n 'boxplot.showbox': False,\n 'boxplot.showfliers': False,\n 'boxplot.showmeans': True,\n 'boxplot.meanline': True,\n\n 'boxplot.meanprops.color': 'c',\n 'boxplot.meanprops.linewidth': 2.0,\n 'boxplot.meanprops.linestyle': '--',\n\n 'boxplot.whiskerprops.color': 'r',\n 'boxplot.whiskerprops.linewidth': 2.0,\n 'boxplot.whiskerprops.linestyle': '-.',\n }\n dict_list = [rc_axis0, rc_axis1, rc_axis2]\n for axis, rc_axis in zip(ax, dict_list):\n _rc_test_bxp_helper(axis, rc_axis)\n\n assert (matplotlib.patches.PathPatch in\n [type(t) for t in ax[1].get_children()])\n\n\n@image_comparison(['boxplot_with_CIarray.png'],\n remove_text=True, savefig_kwarg={'dpi': 40}, style='default')\ndef test_boxplot_with_CIarray():\n # Randomness used for bootstrapping.\n np.random.seed(937)\n\n x = np.linspace(-7, 7, 140)\n x = np.hstack([-25, x, 25])\n fig, ax = plt.subplots()\n CIs = np.array([[-1.5, 3.], [-1., 3.5]])\n\n # show a boxplot with Matplotlib medians and confidence intervals, and\n # another with manual values\n ax.boxplot([x, x], bootstrap=10000, usermedians=[None, 1.0],\n conf_intervals=CIs, notch=1)\n ax.set_ylim((-30, 30))\n\n\n@image_comparison(['boxplot_no_inverted_whisker.png'],\n remove_text=True, savefig_kwarg={'dpi': 40}, style='default')\ndef test_boxplot_no_weird_whisker():\n x = np.array([3, 9000, 150, 88, 350, 200000, 1400, 960],\n dtype=np.float64)\n ax1 = plt.axes()\n ax1.boxplot(x)\n ax1.set_yscale('log')\n ax1.yaxis.grid(False, which='minor')\n ax1.xaxis.grid(False)\n\n\ndef test_boxplot_bad_medians():\n x = np.linspace(-7, 7, 140)\n x = np.hstack([-25, x, 25])\n fig, ax = plt.subplots()\n with pytest.raises(ValueError):\n ax.boxplot(x, usermedians=[1, 2])\n with pytest.raises(ValueError):\n ax.boxplot([x, x], usermedians=[[1, 2], [1, 2]])\n\n\ndef test_boxplot_bad_ci():\n x = np.linspace(-7, 7, 140)\n x = np.hstack([-25, x, 25])\n fig, ax = plt.subplots()\n with pytest.raises(ValueError):\n ax.boxplot([x, x], conf_intervals=[[1, 2]])\n with pytest.raises(ValueError):\n ax.boxplot([x, x], conf_intervals=[[1, 2], [1]])\n\n\ndef test_boxplot_zorder():\n x = np.arange(10)\n fix, ax = plt.subplots()\n assert ax.boxplot(x)['boxes'][0].get_zorder() == 2\n assert ax.boxplot(x, zorder=10)['boxes'][0].get_zorder() == 10\n\n\ndef test_boxplot_marker_behavior():\n plt.rcParams['lines.marker'] = 's'\n plt.rcParams['boxplot.flierprops.marker'] = 'o'\n plt.rcParams['boxplot.meanprops.marker'] = '^'\n fig, ax = plt.subplots()\n test_data = np.arange(100)\n test_data[-1] = 150 # a flier point\n bxp_handle = ax.boxplot(test_data, showmeans=True)\n for bxp_lines in ['whiskers', 'caps', 'boxes', 'medians']:\n for each_line in bxp_handle[bxp_lines]:\n # Ensure that the rcParams['lines.marker'] is overridden by ''\n assert each_line.get_marker() == ''\n\n # Ensure that markers for fliers and means aren't overridden with ''\n assert bxp_handle['fliers'][0].get_marker() == 'o'\n assert bxp_handle['means'][0].get_marker() == '^'\n\n\n@image_comparison(['boxplot_mod_artists_after_plotting.png'],\n remove_text=True, savefig_kwarg={'dpi': 40}, style='default')\ndef test_boxplot_mod_artist_after_plotting():\n x = [0.15, 0.11, 0.06, 0.06, 0.12, 0.56, -0.56]\n fig, ax = plt.subplots()\n bp = ax.boxplot(x, sym=\"o\")\n for key in bp:\n for obj in bp[key]:\n obj.set_color('green')\n\n\n@image_comparison(['violinplot_vert_baseline.png',\n 'violinplot_vert_baseline.png'])\ndef test_vert_violinplot_baseline():\n # First 9 digits of frac(sqrt(2))\n np.random.seed(414213562)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax = plt.axes()\n ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,\n showmedians=0)\n\n # Reuse testcase from above for a labeled data test\n data = {\"d\": data}\n fig, ax = plt.subplots()\n ax.violinplot(\"d\", positions=range(4), showmeans=0, showextrema=0,\n showmedians=0, data=data)\n\n\n@image_comparison(['violinplot_vert_showmeans.png'])\ndef test_vert_violinplot_showmeans():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(3))\n np.random.seed(732050807)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), showmeans=1, showextrema=0,\n showmedians=0)\n\n\n@image_comparison(['violinplot_vert_showextrema.png'])\ndef test_vert_violinplot_showextrema():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(5))\n np.random.seed(236067977)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), showmeans=0, showextrema=1,\n showmedians=0)\n\n\n@image_comparison(['violinplot_vert_showmedians.png'])\ndef test_vert_violinplot_showmedians():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(7))\n np.random.seed(645751311)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,\n showmedians=1)\n\n\n@image_comparison(['violinplot_vert_showall.png'])\ndef test_vert_violinplot_showall():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(11))\n np.random.seed(316624790)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), showmeans=1, showextrema=1,\n showmedians=1,\n quantiles=[[0.1, 0.9], [0.2, 0.8], [0.3, 0.7], [0.4, 0.6]])\n\n\n@image_comparison(['violinplot_vert_custompoints_10.png'])\ndef test_vert_violinplot_custompoints_10():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(13))\n np.random.seed(605551275)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,\n showmedians=0, points=10)\n\n\n@image_comparison(['violinplot_vert_custompoints_200.png'])\ndef test_vert_violinplot_custompoints_200():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(17))\n np.random.seed(123105625)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,\n showmedians=0, points=200)\n\n\n@image_comparison(['violinplot_horiz_baseline.png'])\ndef test_horiz_violinplot_baseline():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(19))\n np.random.seed(358898943)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), vert=False, showmeans=0,\n showextrema=0, showmedians=0)\n\n\n@image_comparison(['violinplot_horiz_showmedians.png'])\ndef test_horiz_violinplot_showmedians():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(23))\n np.random.seed(795831523)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), vert=False, showmeans=0,\n showextrema=0, showmedians=1)\n\n\n@image_comparison(['violinplot_horiz_showmeans.png'])\ndef test_horiz_violinplot_showmeans():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(29))\n np.random.seed(385164807)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), vert=False, showmeans=1,\n showextrema=0, showmedians=0)\n\n\n@image_comparison(['violinplot_horiz_showextrema.png'])\ndef test_horiz_violinplot_showextrema():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(31))\n np.random.seed(567764362)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), vert=False, showmeans=0,\n showextrema=1, showmedians=0)\n\n\n@image_comparison(['violinplot_horiz_showall.png'])\ndef test_horiz_violinplot_showall():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(37))\n np.random.seed(82762530)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), vert=False, showmeans=1,\n showextrema=1, showmedians=1,\n quantiles=[[0.1, 0.9], [0.2, 0.8], [0.3, 0.7], [0.4, 0.6]])\n\n\n@image_comparison(['violinplot_horiz_custompoints_10.png'])\ndef test_horiz_violinplot_custompoints_10():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(41))\n np.random.seed(403124237)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), vert=False, showmeans=0,\n showextrema=0, showmedians=0, points=10)\n\n\n@image_comparison(['violinplot_horiz_custompoints_200.png'])\ndef test_horiz_violinplot_custompoints_200():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(43))\n np.random.seed(557438524)\n data = [np.random.normal(size=100) for _ in range(4)]\n ax.violinplot(data, positions=range(4), vert=False, showmeans=0,\n showextrema=0, showmedians=0, points=200)\n\n\ndef test_violinplot_bad_positions():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(47))\n np.random.seed(855654600)\n data = [np.random.normal(size=100) for _ in range(4)]\n with pytest.raises(ValueError):\n ax.violinplot(data, positions=range(5))\n\n\ndef test_violinplot_bad_widths():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(53))\n np.random.seed(280109889)\n data = [np.random.normal(size=100) for _ in range(4)]\n with pytest.raises(ValueError):\n ax.violinplot(data, positions=range(4), widths=[1, 2, 3])\n\n\ndef test_violinplot_bad_quantiles():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(73))\n np.random.seed(544003745)\n data = [np.random.normal(size=100)]\n\n # Different size quantile list and plots\n with pytest.raises(ValueError):\n ax.violinplot(data, quantiles=[[0.1, 0.2], [0.5, 0.7]])\n\n\ndef test_violinplot_outofrange_quantiles():\n ax = plt.axes()\n # First 9 digits of frac(sqrt(79))\n np.random.seed(888194417)\n data = [np.random.normal(size=100)]\n\n # Quantile value above 100\n with pytest.raises(ValueError):\n ax.violinplot(data, quantiles=[[0.1, 0.2, 0.3, 1.05]])\n\n # Quantile value below 0\n with pytest.raises(ValueError):\n ax.violinplot(data, quantiles=[[-0.05, 0.2, 0.3, 0.75]])\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_violinplot_single_list_quantiles(fig_test, fig_ref):\n # Ensures quantile list for 1D can be passed in as single list\n # First 9 digits of frac(sqrt(83))\n np.random.seed(110433579)\n data = [np.random.normal(size=100)]\n\n # Test image\n ax = fig_test.subplots()\n ax.violinplot(data, quantiles=[0.1, 0.3, 0.9])\n\n # Reference image\n ax = fig_ref.subplots()\n ax.violinplot(data, quantiles=[[0.1, 0.3, 0.9]])\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_violinplot_pandas_series(fig_test, fig_ref, pd):\n np.random.seed(110433579)\n s1 = pd.Series(np.random.normal(size=7), index=[9, 8, 7, 6, 5, 4, 3])\n s2 = pd.Series(np.random.normal(size=9), index=list('ABCDEFGHI'))\n s3 = pd.Series(np.random.normal(size=11))\n fig_test.subplots().violinplot([s1, s2, s3])\n fig_ref.subplots().violinplot([s1.values, s2.values, s3.values])\n\n\ndef test_manage_xticks():\n _, ax = plt.subplots()\n ax.set_xlim(0, 4)\n old_xlim = ax.get_xlim()\n np.random.seed(0)\n y1 = np.random.normal(10, 3, 20)\n y2 = np.random.normal(3, 1, 20)\n ax.boxplot([y1, y2], positions=[1, 2], manage_ticks=False)\n new_xlim = ax.get_xlim()\n assert_array_equal(old_xlim, new_xlim)\n\n\ndef test_boxplot_not_single():\n fig, ax = plt.subplots()\n ax.boxplot(np.random.rand(100), positions=[3])\n ax.boxplot(np.random.rand(100), positions=[5])\n fig.canvas.draw()\n assert ax.get_xlim() == (2.5, 5.5)\n assert list(ax.get_xticks()) == [3, 5]\n assert [t.get_text() for t in ax.get_xticklabels()] == [\"3\", \"5\"]\n\n\ndef test_tick_space_size_0():\n # allow font size to be zero, which affects ticks when there is\n # no other text in the figure.\n plt.plot([0, 1], [0, 1])\n matplotlib.rcParams.update({'font.size': 0})\n b = io.BytesIO()\n plt.savefig(b, dpi=80, format='raw')\n\n\n@image_comparison(['errorbar_basic', 'errorbar_mixed', 'errorbar_basic'])\ndef test_errorbar():\n x = np.arange(0.1, 4, 0.5)\n y = np.exp(-x)\n\n yerr = 0.1 + 0.2*np.sqrt(x)\n xerr = 0.1 + yerr\n\n # First illustrate basic pyplot interface, using defaults where possible.\n fig = plt.figure()\n ax = fig.gca()\n ax.errorbar(x, y, xerr=0.2, yerr=0.4)\n ax.set_title(\"Simplest errorbars, 0.2 in x, 0.4 in y\")\n\n # Now switch to a more OO interface to exercise more features.\n fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)\n ax = axs[0, 0]\n ax.errorbar(x, y, yerr=yerr, fmt='o')\n ax.set_title('Vert. symmetric')\n\n # With 4 subplots, reduce the number of axis ticks to avoid crowding.\n ax.locator_params(nbins=4)\n\n ax = axs[0, 1]\n ax.errorbar(x, y, xerr=xerr, fmt='o', alpha=0.4)\n ax.set_title('Hor. symmetric w/ alpha')\n\n ax = axs[1, 0]\n ax.errorbar(x, y, yerr=[yerr, 2*yerr], xerr=[xerr, 2*xerr], fmt='--o')\n ax.set_title('H, V asymmetric')\n\n ax = axs[1, 1]\n ax.set_yscale('log')\n # Here we have to be careful to keep all y values positive:\n ylower = np.maximum(1e-2, y - yerr)\n yerr_lower = y - ylower\n\n ax.errorbar(x, y, yerr=[yerr_lower, 2*yerr], xerr=xerr,\n fmt='o', ecolor='g', capthick=2)\n ax.set_title('Mixed sym., log y')\n\n fig.suptitle('Variable errorbars')\n\n # Reuse the first testcase from above for a labeled data test\n data = {\"x\": x, \"y\": y}\n fig = plt.figure()\n ax = fig.gca()\n ax.errorbar(\"x\", \"y\", xerr=0.2, yerr=0.4, data=data)\n ax.set_title(\"Simplest errorbars, 0.2 in x, 0.4 in y\")\n\n\ndef test_errorbar_colorcycle():\n\n f, ax = plt.subplots()\n x = np.arange(10)\n y = 2*x\n\n e1, _, _ = ax.errorbar(x, y, c=None)\n e2, _, _ = ax.errorbar(x, 2*y, c=None)\n ln1, = ax.plot(x, 4*y)\n\n assert mcolors.to_rgba(e1.get_color()) == mcolors.to_rgba('C0')\n assert mcolors.to_rgba(e2.get_color()) == mcolors.to_rgba('C1')\n assert mcolors.to_rgba(ln1.get_color()) == mcolors.to_rgba('C2')\n\n\n@check_figures_equal()\ndef test_errorbar_cycle_ecolor(fig_test, fig_ref):\n x = np.arange(0.1, 4, 0.5)\n y = [np.exp(-x+n) for n in range(4)]\n\n axt = fig_test.subplots()\n axr = fig_ref.subplots()\n\n for yi, color in zip(y, ['C0', 'C1', 'C2', 'C3']):\n axt.errorbar(x, yi, yerr=(yi * 0.25), linestyle='-',\n marker='o', ecolor='black')\n axr.errorbar(x, yi, yerr=(yi * 0.25), linestyle='-',\n marker='o', color=color, ecolor='black')\n\n\ndef test_errorbar_shape():\n fig = plt.figure()\n ax = fig.gca()\n\n x = np.arange(0.1, 4, 0.5)\n y = np.exp(-x)\n yerr1 = 0.1 + 0.2*np.sqrt(x)\n yerr = np.vstack((yerr1, 2*yerr1)).T\n xerr = 0.1 + yerr\n\n with pytest.raises(ValueError):\n ax.errorbar(x, y, yerr=yerr, fmt='o')\n with pytest.raises(ValueError):\n ax.errorbar(x, y, xerr=xerr, fmt='o')\n with pytest.raises(ValueError):\n ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt='o')\n\n\n@image_comparison(['errorbar_limits'])\ndef test_errorbar_limits():\n x = np.arange(0.5, 5.5, 0.5)\n y = np.exp(-x)\n xerr = 0.1\n yerr = 0.2\n ls = 'dotted'\n\n fig, ax = plt.subplots()\n\n # standard error bars\n ax.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')\n\n # including upper limits\n uplims = np.zeros_like(x)\n uplims[[1, 5, 9]] = True\n ax.errorbar(x, y+0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,\n color='green')\n\n # including lower limits\n lolims = np.zeros_like(x)\n lolims[[2, 4, 8]] = True\n ax.errorbar(x, y+1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,\n color='red')\n\n # including upper and lower limits\n ax.errorbar(x, y+1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,\n lolims=lolims, uplims=uplims, ls=ls, color='magenta')\n\n # including xlower and xupper limits\n xerr = 0.2\n yerr = np.full_like(x, 0.2)\n yerr[[3, 6]] = 0.3\n xlolims = lolims\n xuplims = uplims\n lolims = np.zeros_like(x)\n uplims = np.zeros_like(x)\n lolims[[6]] = True\n uplims[[3]] = True\n ax.errorbar(x, y+2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,\n xlolims=xlolims, xuplims=xuplims, uplims=uplims,\n lolims=lolims, ls='none', mec='blue', capsize=0,\n color='cyan')\n ax.set_xlim((0, 5.5))\n ax.set_title('Errorbar upper and lower limits')\n\n\ndef test_errobar_nonefmt():\n # Check that passing 'none' as a format still plots errorbars\n x = np.arange(5)\n y = np.arange(5)\n\n plotline, _, barlines = plt.errorbar(x, y, xerr=1, yerr=1, fmt='none')\n assert plotline is None\n for errbar in barlines:\n assert np.all(errbar.get_color() == mcolors.to_rgba('C0'))\n\n\ndef test_errorbar_line_specific_kwargs():\n # Check that passing line-specific keyword arguments will not result in\n # errors.\n x = np.arange(5)\n y = np.arange(5)\n\n plotline, _, _ = plt.errorbar(x, y, xerr=1, yerr=1, ls='None',\n marker='s', fillstyle='full',\n drawstyle='steps-mid',\n dash_capstyle='round',\n dash_joinstyle='miter',\n solid_capstyle='butt',\n solid_joinstyle='bevel')\n assert plotline.get_fillstyle() == 'full'\n assert plotline.get_drawstyle() == 'steps-mid'\n\n\n@check_figures_equal(extensions=['png'])\ndef test_errorbar_with_prop_cycle(fig_test, fig_ref):\n ax = fig_ref.subplots()\n ax.errorbar(x=[2, 4, 10], y=[0, 1, 2], yerr=0.5,\n ls='--', marker='s', mfc='k')\n ax.errorbar(x=[2, 4, 10], y=[2, 3, 4], yerr=0.5, color='tab:green',\n ls=':', marker='s', mfc='y')\n ax.errorbar(x=[2, 4, 10], y=[4, 5, 6], yerr=0.5, fmt='tab:blue',\n ls='-.', marker='o', mfc='c')\n ax.set_xlim(1, 11)\n\n _cycle = cycler(ls=['--', ':', '-.'], marker=['s', 's', 'o'],\n mfc=['k', 'y', 'c'], color=['b', 'g', 'r'])\n plt.rc(\"axes\", prop_cycle=_cycle)\n ax = fig_test.subplots()\n ax.errorbar(x=[2, 4, 10], y=[0, 1, 2], yerr=0.5)\n ax.errorbar(x=[2, 4, 10], y=[2, 3, 4], yerr=0.5, color='tab:green')\n ax.errorbar(x=[2, 4, 10], y=[4, 5, 6], yerr=0.5, fmt='tab:blue')\n ax.set_xlim(1, 11)\n\n\ndef test_errorbar_every_invalid():\n x = np.linspace(0, 1, 15)\n y = x * (1-x)\n yerr = y/6\n\n ax = plt.figure().subplots()\n\n with pytest.raises(ValueError, match='not a tuple of two integers'):\n ax.errorbar(x, y, yerr, errorevery=(1, 2, 3))\n with pytest.raises(ValueError, match='not a tuple of two integers'):\n ax.errorbar(x, y, yerr, errorevery=(1.3, 3))\n with pytest.raises(ValueError, match='not a valid NumPy fancy index'):\n ax.errorbar(x, y, yerr, errorevery=[False, True])\n with pytest.raises(ValueError, match='not a recognized value'):\n ax.errorbar(x, y, yerr, errorevery='foobar')\n\n\n@check_figures_equal()\ndef test_errorbar_every(fig_test, fig_ref):\n x = np.linspace(0, 1, 15)\n y = x * (1-x)\n yerr = y/6\n\n ax_ref = fig_ref.subplots()\n ax_test = fig_test.subplots()\n\n for color, shift in zip('rgbk', [0, 0, 2, 7]):\n y += .02\n\n # Check errorevery using an explicit offset and step.\n ax_test.errorbar(x, y, yerr, errorevery=(shift, 4),\n capsize=4, c=color)\n\n # Using manual errorbars\n # n.b. errorbar draws the main plot at z=2.1 by default\n ax_ref.plot(x, y, c=color, zorder=2.1)\n ax_ref.errorbar(x[shift::4], y[shift::4], yerr[shift::4],\n capsize=4, c=color, fmt='none')\n\n # Check that markevery is propagated to line, without affecting errorbars.\n ax_test.errorbar(x, y + 0.1, yerr, markevery=(1, 4), capsize=4, fmt='o')\n ax_ref.plot(x[1::4], y[1::4] + 0.1, 'o', zorder=2.1)\n ax_ref.errorbar(x, y + 0.1, yerr, capsize=4, fmt='none')\n\n # Check that passing a slice to markevery/errorevery works.\n ax_test.errorbar(x, y + 0.2, yerr, errorevery=slice(2, None, 3),\n markevery=slice(2, None, 3),\n capsize=4, c='C0', fmt='o')\n ax_ref.plot(x[2::3], y[2::3] + 0.2, 'o', c='C0', zorder=2.1)\n ax_ref.errorbar(x[2::3], y[2::3] + 0.2, yerr[2::3],\n capsize=4, c='C0', fmt='none')\n\n # Check that passing an iterable to markevery/errorevery works.\n ax_test.errorbar(x, y + 0.2, yerr, errorevery=[False, True, False] * 5,\n markevery=[False, True, False] * 5,\n capsize=4, c='C1', fmt='o')\n ax_ref.plot(x[1::3], y[1::3] + 0.2, 'o', c='C1', zorder=2.1)\n ax_ref.errorbar(x[1::3], y[1::3] + 0.2, yerr[1::3],\n capsize=4, c='C1', fmt='none')\n\n\[email protected]('elinewidth', [[1, 2, 3],\n np.array([1, 2, 3]),\n 1])\ndef test_errorbar_linewidth_type(elinewidth):\n plt.errorbar([1, 2, 3], [1, 2, 3], yerr=[1, 2, 3], elinewidth=elinewidth)\n\n\n@image_comparison(['hist_stacked_stepfilled', 'hist_stacked_stepfilled'])\ndef test_hist_stacked_stepfilled():\n # make some data\n d1 = np.linspace(1, 3, 20)\n d2 = np.linspace(0, 10, 50)\n fig, ax = plt.subplots()\n ax.hist((d1, d2), histtype=\"stepfilled\", stacked=True)\n\n # Reuse testcase from above for a labeled data test\n data = {\"x\": (d1, d2)}\n fig, ax = plt.subplots()\n ax.hist(\"x\", histtype=\"stepfilled\", stacked=True, data=data)\n\n\n@image_comparison(['hist_offset'])\ndef test_hist_offset():\n # make some data\n d1 = np.linspace(0, 10, 50)\n d2 = np.linspace(1, 3, 20)\n fig, ax = plt.subplots()\n ax.hist(d1, bottom=5)\n ax.hist(d2, bottom=15)\n\n\n@image_comparison(['hist_step.png'], remove_text=True)\ndef test_hist_step():\n # make some data\n d1 = np.linspace(1, 3, 20)\n fig, ax = plt.subplots()\n ax.hist(d1, histtype=\"step\")\n ax.set_ylim(0, 10)\n ax.set_xlim(-1, 5)\n\n\n@image_comparison(['hist_step_horiz.png'])\ndef test_hist_step_horiz():\n # make some data\n d1 = np.linspace(0, 10, 50)\n d2 = np.linspace(1, 3, 20)\n fig, ax = plt.subplots()\n ax.hist((d1, d2), histtype=\"step\", orientation=\"horizontal\")\n\n\n@image_comparison(['hist_stacked_weights'])\ndef test_hist_stacked_weighted():\n # make some data\n d1 = np.linspace(0, 10, 50)\n d2 = np.linspace(1, 3, 20)\n w1 = np.linspace(0.01, 3.5, 50)\n w2 = np.linspace(0.05, 2., 20)\n fig, ax = plt.subplots()\n ax.hist((d1, d2), weights=(w1, w2), histtype=\"stepfilled\", stacked=True)\n\n\[email protected](\"use_line_collection\", [True, False],\n ids=['w/ line collection', 'w/o line collection'])\n@image_comparison(['stem.png'], style='mpl20', remove_text=True)\ndef test_stem(use_line_collection):\n x = np.linspace(0.1, 2 * np.pi, 100)\n\n fig, ax = plt.subplots()\n # Label is a single space to force a legend to be drawn, but to avoid any\n # text being drawn\n ax.stem(x, np.cos(x),\n linefmt='C2-.', markerfmt='k+', basefmt='C1-.', label=' ',\n use_line_collection=use_line_collection)\n ax.legend()\n\n\ndef test_stem_args():\n fig, ax = plt.subplots()\n\n x = list(range(10))\n y = list(range(10))\n\n # Test the call signatures\n ax.stem(y)\n ax.stem(x, y)\n ax.stem(x, y, 'r--')\n ax.stem(x, y, 'r--', basefmt='b--')\n\n\ndef test_stem_dates():\n fig, ax = plt.subplots(1, 1)\n xs = [dateutil.parser.parse(\"2013-9-28 11:00:00\"),\n dateutil.parser.parse(\"2013-9-28 12:00:00\")]\n ys = [100, 200]\n ax.stem(xs, ys, \"*-\")\n\n\[email protected](\"use_line_collection\", [True, False],\n ids=['w/ line collection', 'w/o line collection'])\n@image_comparison(['stem_orientation.png'], style='mpl20', remove_text=True)\ndef test_stem_orientation(use_line_collection):\n x = np.linspace(0.1, 2*np.pi, 50)\n\n fig, ax = plt.subplots()\n ax.stem(x, np.cos(x),\n linefmt='C2-.', markerfmt='kx', basefmt='C1-.',\n use_line_collection=use_line_collection, orientation='horizontal')\n\n\n@image_comparison(['hist_stacked_stepfilled_alpha'])\ndef test_hist_stacked_stepfilled_alpha():\n # make some data\n d1 = np.linspace(1, 3, 20)\n d2 = np.linspace(0, 10, 50)\n fig, ax = plt.subplots()\n ax.hist((d1, d2), histtype=\"stepfilled\", stacked=True, alpha=0.5)\n\n\n@image_comparison(['hist_stacked_step'])\ndef test_hist_stacked_step():\n # make some data\n d1 = np.linspace(1, 3, 20)\n d2 = np.linspace(0, 10, 50)\n fig, ax = plt.subplots()\n ax.hist((d1, d2), histtype=\"step\", stacked=True)\n\n\n@image_comparison(['hist_stacked_normed'])\ndef test_hist_stacked_density():\n # make some data\n d1 = np.linspace(1, 3, 20)\n d2 = np.linspace(0, 10, 50)\n fig, ax = plt.subplots()\n ax.hist((d1, d2), stacked=True, density=True)\n\n\n@image_comparison(['hist_step_bottom.png'], remove_text=True)\ndef test_hist_step_bottom():\n # make some data\n d1 = np.linspace(1, 3, 20)\n fig, ax = plt.subplots()\n ax.hist(d1, bottom=np.arange(10), histtype=\"stepfilled\")\n\n\ndef test_hist_stepfilled_geometry():\n bins = [0, 1, 2, 3]\n data = [0, 0, 1, 1, 1, 2]\n _, _, (polygon, ) = plt.hist(data,\n bins=bins,\n histtype='stepfilled')\n xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1],\n [3, 0], [2, 0], [2, 0], [1, 0], [1, 0], [0, 0]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\ndef test_hist_step_geometry():\n bins = [0, 1, 2, 3]\n data = [0, 0, 1, 1, 1, 2]\n _, _, (polygon, ) = plt.hist(data,\n bins=bins,\n histtype='step')\n xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1], [3, 0]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\ndef test_hist_stepfilled_bottom_geometry():\n bins = [0, 1, 2, 3]\n data = [0, 0, 1, 1, 1, 2]\n _, _, (polygon, ) = plt.hist(data,\n bins=bins,\n bottom=[1, 2, 1.5],\n histtype='stepfilled')\n xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5],\n [3, 1.5], [2, 1.5], [2, 2], [1, 2], [1, 1], [0, 1]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\ndef test_hist_step_bottom_geometry():\n bins = [0, 1, 2, 3]\n data = [0, 0, 1, 1, 1, 2]\n _, _, (polygon, ) = plt.hist(data,\n bins=bins,\n bottom=[1, 2, 1.5],\n histtype='step')\n xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5], [3, 1.5]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\ndef test_hist_stacked_stepfilled_geometry():\n bins = [0, 1, 2, 3]\n data_1 = [0, 0, 1, 1, 1, 2]\n data_2 = [0, 1, 2]\n _, _, patches = plt.hist([data_1, data_2],\n bins=bins,\n stacked=True,\n histtype='stepfilled')\n\n assert len(patches) == 2\n\n polygon, = patches[0]\n xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1],\n [3, 0], [2, 0], [2, 0], [1, 0], [1, 0], [0, 0]]\n assert_array_equal(polygon.get_xy(), xy)\n\n polygon, = patches[1]\n xy = [[0, 2], [0, 3], [1, 3], [1, 4], [2, 4], [2, 2], [3, 2],\n [3, 1], [2, 1], [2, 3], [1, 3], [1, 2], [0, 2]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\ndef test_hist_stacked_step_geometry():\n bins = [0, 1, 2, 3]\n data_1 = [0, 0, 1, 1, 1, 2]\n data_2 = [0, 1, 2]\n _, _, patches = plt.hist([data_1, data_2],\n bins=bins,\n stacked=True,\n histtype='step')\n\n assert len(patches) == 2\n\n polygon, = patches[0]\n xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1], [3, 0]]\n assert_array_equal(polygon.get_xy(), xy)\n\n polygon, = patches[1]\n xy = [[0, 2], [0, 3], [1, 3], [1, 4], [2, 4], [2, 2], [3, 2], [3, 1]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\ndef test_hist_stacked_stepfilled_bottom_geometry():\n bins = [0, 1, 2, 3]\n data_1 = [0, 0, 1, 1, 1, 2]\n data_2 = [0, 1, 2]\n _, _, patches = plt.hist([data_1, data_2],\n bins=bins,\n stacked=True,\n bottom=[1, 2, 1.5],\n histtype='stepfilled')\n\n assert len(patches) == 2\n\n polygon, = patches[0]\n xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5],\n [3, 1.5], [2, 1.5], [2, 2], [1, 2], [1, 1], [0, 1]]\n assert_array_equal(polygon.get_xy(), xy)\n\n polygon, = patches[1]\n xy = [[0, 3], [0, 4], [1, 4], [1, 6], [2, 6], [2, 3.5], [3, 3.5],\n [3, 2.5], [2, 2.5], [2, 5], [1, 5], [1, 3], [0, 3]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\ndef test_hist_stacked_step_bottom_geometry():\n bins = [0, 1, 2, 3]\n data_1 = [0, 0, 1, 1, 1, 2]\n data_2 = [0, 1, 2]\n _, _, patches = plt.hist([data_1, data_2],\n bins=bins,\n stacked=True,\n bottom=[1, 2, 1.5],\n histtype='step')\n\n assert len(patches) == 2\n\n polygon, = patches[0]\n xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5], [3, 1.5]]\n assert_array_equal(polygon.get_xy(), xy)\n\n polygon, = patches[1]\n xy = [[0, 3], [0, 4], [1, 4], [1, 6], [2, 6], [2, 3.5], [3, 3.5], [3, 2.5]]\n assert_array_equal(polygon.get_xy(), xy)\n\n\n@image_comparison(['hist_stacked_bar'])\ndef test_hist_stacked_bar():\n # make some data\n d = [[100, 100, 100, 100, 200, 320, 450, 80, 20, 600, 310, 800],\n [20, 23, 50, 11, 100, 420], [120, 120, 120, 140, 140, 150, 180],\n [60, 60, 60, 60, 300, 300, 5, 5, 5, 5, 10, 300],\n [555, 555, 555, 30, 30, 30, 30, 30, 100, 100, 100, 100, 30, 30],\n [30, 30, 30, 30, 400, 400, 400, 400, 400, 400, 400, 400]]\n colors = [(0.5759849696758961, 1.0, 0.0), (0.0, 1.0, 0.350624650815206),\n (0.0, 1.0, 0.6549834156005998), (0.0, 0.6569064625276622, 1.0),\n (0.28302699607823545, 0.0, 1.0), (0.6849123462299822, 0.0, 1.0)]\n labels = ['green', 'orange', ' yellow', 'magenta', 'black']\n fig, ax = plt.subplots()\n ax.hist(d, bins=10, histtype='barstacked', align='mid', color=colors,\n label=labels)\n ax.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0), ncol=1)\n\n\ndef test_hist_barstacked_bottom_unchanged():\n b = np.array([10, 20])\n plt.hist([[0, 1], [0, 1]], 2, histtype=\"barstacked\", bottom=b)\n assert b.tolist() == [10, 20]\n\n\ndef test_hist_emptydata():\n fig, ax = plt.subplots()\n ax.hist([[], range(10), range(10)], histtype=\"step\")\n\n\ndef test_hist_labels():\n # test singleton labels OK\n fig, ax = plt.subplots()\n _, _, bars = ax.hist([0, 1], label=0)\n assert bars[0].get_label() == '0'\n _, _, bars = ax.hist([0, 1], label=[0])\n assert bars[0].get_label() == '0'\n _, _, bars = ax.hist([0, 1], label=None)\n assert bars[0].get_label() == '_nolegend_'\n _, _, bars = ax.hist([0, 1], label='0')\n assert bars[0].get_label() == '0'\n _, _, bars = ax.hist([0, 1], label='00')\n assert bars[0].get_label() == '00'\n\n\n@image_comparison(['transparent_markers'], remove_text=True)\ndef test_transparent_markers():\n np.random.seed(0)\n data = np.random.random(50)\n\n fig, ax = plt.subplots()\n ax.plot(data, 'D', mfc='none', markersize=100)\n\n\n@image_comparison(['rgba_markers'], remove_text=True)\ndef test_rgba_markers():\n fig, axs = plt.subplots(ncols=2)\n rcolors = [(1, 0, 0, 1), (1, 0, 0, 0.5)]\n bcolors = [(0, 0, 1, 1), (0, 0, 1, 0.5)]\n alphas = [None, 0.2]\n kw = dict(ms=100, mew=20)\n for i, alpha in enumerate(alphas):\n for j, rcolor in enumerate(rcolors):\n for k, bcolor in enumerate(bcolors):\n axs[i].plot(j+1, k+1, 'o', mfc=bcolor, mec=rcolor,\n alpha=alpha, **kw)\n axs[i].plot(j+1, k+3, 'x', mec=rcolor, alpha=alpha, **kw)\n for ax in axs:\n ax.axis([-1, 4, 0, 5])\n\n\n@image_comparison(['mollweide_grid'], remove_text=True)\ndef test_mollweide_grid():\n # test that both horizontal and vertical gridlines appear on the Mollweide\n # projection\n fig = plt.figure()\n ax = fig.add_subplot(projection='mollweide')\n ax.grid()\n\n\ndef test_mollweide_forward_inverse_closure():\n # test that the round-trip Mollweide forward->inverse transformation is an\n # approximate identity\n fig = plt.figure()\n ax = fig.add_subplot(projection='mollweide')\n\n # set up 1-degree grid in longitude, latitude\n lon = np.linspace(-np.pi, np.pi, 360)\n lat = np.linspace(-np.pi / 2.0, np.pi / 2.0, 180)\n lon, lat = np.meshgrid(lon, lat)\n ll = np.vstack((lon.flatten(), lat.flatten())).T\n\n # perform forward transform\n xy = ax.transProjection.transform(ll)\n\n # perform inverse transform\n ll2 = ax.transProjection.inverted().transform(xy)\n\n # compare\n np.testing.assert_array_almost_equal(ll, ll2, 3)\n\n\ndef test_mollweide_inverse_forward_closure():\n # test that the round-trip Mollweide inverse->forward transformation is an\n # approximate identity\n fig = plt.figure()\n ax = fig.add_subplot(projection='mollweide')\n\n # set up grid in x, y\n x = np.linspace(0, 1, 500)\n x, y = np.meshgrid(x, x)\n xy = np.vstack((x.flatten(), y.flatten())).T\n\n # perform inverse transform\n ll = ax.transProjection.inverted().transform(xy)\n\n # perform forward transform\n xy2 = ax.transProjection.transform(ll)\n\n # compare\n np.testing.assert_array_almost_equal(xy, xy2, 3)\n\n\n@image_comparison(['test_alpha'], remove_text=True)\ndef test_alpha():\n np.random.seed(0)\n data = np.random.random(50)\n\n fig, ax = plt.subplots()\n\n # alpha=.5 markers, solid line\n ax.plot(data, '-D', color=[1, 0, 0], mfc=[1, 0, 0, .5],\n markersize=20, lw=10)\n\n # everything solid by kwarg\n ax.plot(data + 2, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],\n markersize=20, lw=10,\n alpha=1)\n\n # everything alpha=.5 by kwarg\n ax.plot(data + 4, '-D', color=[1, 0, 0], mfc=[1, 0, 0],\n markersize=20, lw=10,\n alpha=.5)\n\n # everything alpha=.5 by colors\n ax.plot(data + 6, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],\n markersize=20, lw=10)\n\n # alpha=.5 line, solid markers\n ax.plot(data + 8, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0],\n markersize=20, lw=10)\n\n\n@image_comparison(['eventplot', 'eventplot'], remove_text=True)\ndef test_eventplot():\n np.random.seed(0)\n\n data1 = np.random.random([32, 20]).tolist()\n data2 = np.random.random([6, 20]).tolist()\n data = data1 + data2\n num_datasets = len(data)\n\n colors1 = [[0, 1, .7]] * len(data1)\n colors2 = [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, .75, 0],\n [1, 0, 1],\n [0, 1, 1]]\n colors = colors1 + colors2\n\n lineoffsets1 = 12 + np.arange(0, len(data1)) * .33\n lineoffsets2 = [-15, -3, 1, 1.5, 6, 10]\n lineoffsets = lineoffsets1.tolist() + lineoffsets2\n\n linelengths1 = [.33] * len(data1)\n linelengths2 = [5, 2, 1, 1, 3, 1.5]\n linelengths = linelengths1 + linelengths2\n\n fig = plt.figure()\n axobj = fig.add_subplot()\n colls = axobj.eventplot(data, colors=colors, lineoffsets=lineoffsets,\n linelengths=linelengths)\n\n num_collections = len(colls)\n assert num_collections == num_datasets\n\n # Reuse testcase from above for a labeled data test\n data = {\"pos\": data, \"c\": colors, \"lo\": lineoffsets, \"ll\": linelengths}\n fig = plt.figure()\n axobj = fig.add_subplot()\n colls = axobj.eventplot(\"pos\", colors=\"c\", lineoffsets=\"lo\",\n linelengths=\"ll\", data=data)\n num_collections = len(colls)\n assert num_collections == num_datasets\n\n\n@image_comparison(['test_eventplot_defaults.png'], remove_text=True)\ndef test_eventplot_defaults():\n \"\"\"\n test that eventplot produces the correct output given the default params\n (see bug #3728)\n \"\"\"\n np.random.seed(0)\n\n data1 = np.random.random([32, 20]).tolist()\n data2 = np.random.random([6, 20]).tolist()\n data = data1 + data2\n\n fig = plt.figure()\n axobj = fig.add_subplot()\n axobj.eventplot(data)\n\n\[email protected](('colors'), [\n ('0.5',), # string color with multiple characters: not OK before #8193 fix\n ('tab:orange', 'tab:pink', 'tab:cyan', 'bLacK'), # case-insensitive\n ('red', (0, 1, 0), None, (1, 0, 1, 0.5)), # a tricky case mixing types\n])\ndef test_eventplot_colors(colors):\n \"\"\"Test the *colors* parameter of eventplot. Inspired by issue #8193.\"\"\"\n data = [[0], [1], [2], [3]] # 4 successive events of different nature\n\n # Build the list of the expected colors\n expected = [c if c is not None else 'C0' for c in colors]\n # Convert the list into an array of RGBA values\n # NB: ['rgbk'] is not a valid argument for to_rgba_array, while 'rgbk' is.\n if len(expected) == 1:\n expected = expected[0]\n expected = np.broadcast_to(mcolors.to_rgba_array(expected), (len(data), 4))\n\n fig, ax = plt.subplots()\n if len(colors) == 1: # tuple with a single string (like '0.5' or 'rgbk')\n colors = colors[0]\n collections = ax.eventplot(data, colors=colors)\n\n for coll, color in zip(collections, expected):\n assert_allclose(coll.get_color(), color)\n\n\n@image_comparison(['test_eventplot_problem_kwargs.png'], remove_text=True)\ndef test_eventplot_problem_kwargs(recwarn):\n \"\"\"\n test that 'singular' versions of LineCollection props raise an\n IgnoredKeywordWarning rather than overriding the 'plural' versions (e.g.\n to prevent 'color' from overriding 'colors', see issue #4297)\n \"\"\"\n np.random.seed(0)\n\n data1 = np.random.random([20]).tolist()\n data2 = np.random.random([10]).tolist()\n data = [data1, data2]\n\n fig = plt.figure()\n axobj = fig.add_subplot()\n\n axobj.eventplot(data,\n colors=['r', 'b'],\n color=['c', 'm'],\n linewidths=[2, 1],\n linewidth=[1, 2],\n linestyles=['solid', 'dashed'],\n linestyle=['dashdot', 'dotted'])\n\n # check that three IgnoredKeywordWarnings were raised\n assert len(recwarn) == 3\n assert all(issubclass(wi.category, MatplotlibDeprecationWarning)\n for wi in recwarn)\n\n\ndef test_empty_eventplot():\n fig, ax = plt.subplots(1, 1)\n ax.eventplot([[]], colors=[(0.0, 0.0, 0.0, 0.0)])\n plt.draw()\n\n\[email protected]('data', [[[]], [[], [0, 1]], [[0, 1], []]])\[email protected](\n 'orientation', ['_empty', 'vertical', 'horizontal', None, 'none'])\ndef test_eventplot_orientation(data, orientation):\n \"\"\"Introduced when fixing issue #6412.\"\"\"\n opts = {} if orientation == \"_empty\" else {'orientation': orientation}\n fig, ax = plt.subplots(1, 1)\n with (pytest.warns(MatplotlibDeprecationWarning)\n if orientation in [None, 'none'] else nullcontext()):\n ax.eventplot(data, **opts)\n plt.draw()\n\n\n@image_comparison(['marker_styles.png'], remove_text=True)\ndef test_marker_styles():\n fig, ax = plt.subplots()\n for y, marker in enumerate(sorted(matplotlib.markers.MarkerStyle.markers,\n key=lambda x: str(type(x))+str(x))):\n ax.plot((y % 2)*5 + np.arange(10)*10, np.ones(10)*10*y, linestyle='',\n marker=marker, markersize=10+y/5, label=marker)\n\n\n@image_comparison(['rc_markerfill.png'])\ndef test_markers_fillstyle_rcparams():\n fig, ax = plt.subplots()\n x = np.arange(7)\n for idx, (style, marker) in enumerate(\n [('top', 's'), ('bottom', 'o'), ('none', '^')]):\n matplotlib.rcParams['markers.fillstyle'] = style\n ax.plot(x+idx, marker=marker)\n\n\n@image_comparison(['vertex_markers.png'], remove_text=True)\ndef test_vertex_markers():\n data = list(range(10))\n marker_as_tuple = ((-1, -1), (1, -1), (1, 1), (-1, 1))\n marker_as_list = [(-1, -1), (1, -1), (1, 1), (-1, 1)]\n fig, ax = plt.subplots()\n ax.plot(data, linestyle='', marker=marker_as_tuple, mfc='k')\n ax.plot(data[::-1], linestyle='', marker=marker_as_list, mfc='b')\n ax.set_xlim([-1, 10])\n ax.set_ylim([-1, 10])\n\n\n@image_comparison(['vline_hline_zorder', 'errorbar_zorder'],\n tol=0 if platform.machine() == 'x86_64' else 0.02)\ndef test_eb_line_zorder():\n x = list(range(10))\n\n # First illustrate basic pyplot interface, using defaults where possible.\n fig = plt.figure()\n ax = fig.gca()\n ax.plot(x, lw=10, zorder=5)\n ax.axhline(1, color='red', lw=10, zorder=1)\n ax.axhline(5, color='green', lw=10, zorder=10)\n ax.axvline(7, color='m', lw=10, zorder=7)\n ax.axvline(2, color='k', lw=10, zorder=3)\n\n ax.set_title(\"axvline and axhline zorder test\")\n\n # Now switch to a more OO interface to exercise more features.\n fig = plt.figure()\n ax = fig.gca()\n x = list(range(10))\n y = np.zeros(10)\n yerr = list(range(10))\n ax.errorbar(x, y, yerr=yerr, zorder=5, lw=5, color='r')\n for j in range(10):\n ax.axhline(j, lw=5, color='k', zorder=j)\n ax.axhline(-j, lw=5, color='k', zorder=j)\n\n ax.set_title(\"errorbar zorder test\")\n\n\n@check_figures_equal()\ndef test_axline_loglog(fig_test, fig_ref):\n ax = fig_test.subplots()\n ax.set(xlim=(0.1, 10), ylim=(1e-3, 1))\n ax.loglog([.3, .6], [.3, .6], \".-\")\n ax.axline((1, 1e-3), (10, 1e-2), c=\"k\")\n\n ax = fig_ref.subplots()\n ax.set(xlim=(0.1, 10), ylim=(1e-3, 1))\n ax.loglog([.3, .6], [.3, .6], \".-\")\n ax.loglog([1, 10], [1e-3, 1e-2], c=\"k\")\n\n\n@check_figures_equal()\ndef test_axline(fig_test, fig_ref):\n ax = fig_test.subplots()\n ax.set(xlim=(-1, 1), ylim=(-1, 1))\n ax.axline((0, 0), (1, 1))\n ax.axline((0, 0), (1, 0), color='C1')\n ax.axline((0, 0.5), (1, 0.5), color='C2')\n # slopes\n ax.axline((-0.7, -0.5), slope=0, color='C3')\n ax.axline((1, -0.5), slope=-0.5, color='C4')\n ax.axline((-0.5, 1), slope=float('inf'), color='C5')\n\n ax = fig_ref.subplots()\n ax.set(xlim=(-1, 1), ylim=(-1, 1))\n ax.plot([-1, 1], [-1, 1])\n ax.axhline(0, color='C1')\n ax.axhline(0.5, color='C2')\n # slopes\n ax.axhline(-0.5, color='C3')\n ax.plot([-1, 1], [0.5, -0.5], color='C4')\n ax.axvline(-0.5, color='C5')\n\n\n@check_figures_equal()\ndef test_axline_transaxes(fig_test, fig_ref):\n ax = fig_test.subplots()\n ax.set(xlim=(-1, 1), ylim=(-1, 1))\n ax.axline((0, 0), slope=1, transform=ax.transAxes)\n ax.axline((1, 0.5), slope=1, color='C1', transform=ax.transAxes)\n ax.axline((0.5, 0.5), slope=0, color='C2', transform=ax.transAxes)\n ax.axline((0.5, 0), (0.5, 1), color='C3', transform=ax.transAxes)\n\n ax = fig_ref.subplots()\n ax.set(xlim=(-1, 1), ylim=(-1, 1))\n ax.plot([-1, 1], [-1, 1])\n ax.plot([0, 1], [-1, 0], color='C1')\n ax.plot([-1, 1], [0, 0], color='C2')\n ax.plot([0, 0], [-1, 1], color='C3')\n\n\n@check_figures_equal()\ndef test_axline_transaxes_panzoom(fig_test, fig_ref):\n # test that it is robust against pan/zoom and\n # figure resize after plotting\n ax = fig_test.subplots()\n ax.set(xlim=(-1, 1), ylim=(-1, 1))\n ax.axline((0, 0), slope=1, transform=ax.transAxes)\n ax.axline((0.5, 0.5), slope=2, color='C1', transform=ax.transAxes)\n ax.axline((0.5, 0.5), slope=0, color='C2', transform=ax.transAxes)\n ax.set(xlim=(0, 5), ylim=(0, 10))\n fig_test.set_size_inches(3, 3)\n\n ax = fig_ref.subplots()\n ax.set(xlim=(0, 5), ylim=(0, 10))\n fig_ref.set_size_inches(3, 3)\n ax.plot([0, 5], [0, 5])\n ax.plot([0, 5], [0, 10], color='C1')\n ax.plot([0, 5], [5, 5], color='C2')\n\n\ndef test_axline_args():\n \"\"\"Exactly one of *xy2* and *slope* must be specified.\"\"\"\n fig, ax = plt.subplots()\n with pytest.raises(TypeError):\n ax.axline((0, 0)) # missing second parameter\n with pytest.raises(TypeError):\n ax.axline((0, 0), (1, 1), slope=1) # redundant parameters\n ax.set_xscale('log')\n with pytest.raises(TypeError):\n ax.axline((0, 0), slope=1)\n ax.set_xscale('linear')\n ax.set_yscale('log')\n with pytest.raises(TypeError):\n ax.axline((0, 0), slope=1)\n ax.set_yscale('linear')\n with pytest.raises(ValueError):\n ax.axline((0, 0), (0, 0)) # two identical points are not allowed\n plt.draw()\n\n\n@image_comparison(['vlines_basic', 'vlines_with_nan', 'vlines_masked'],\n extensions=['png'])\ndef test_vlines():\n # normal\n x1 = [2, 3, 4, 5, 7]\n y1 = [2, -6, 3, 8, 2]\n fig1, ax1 = plt.subplots()\n ax1.vlines(x1, 0, y1, colors='g', linewidth=5)\n\n # GH #7406\n x2 = [2, 3, 4, 5, 6, 7]\n y2 = [2, -6, 3, 8, np.nan, 2]\n fig2, (ax2, ax3, ax4) = plt.subplots(nrows=3, figsize=(4, 8))\n ax2.vlines(x2, 0, y2, colors='g', linewidth=5)\n\n x3 = [2, 3, 4, 5, 6, 7]\n y3 = [np.nan, 2, -6, 3, 8, 2]\n ax3.vlines(x3, 0, y3, colors='r', linewidth=3, linestyle='--')\n\n x4 = [2, 3, 4, 5, 6, 7]\n y4 = [np.nan, 2, -6, 3, 8, np.nan]\n ax4.vlines(x4, 0, y4, colors='k', linewidth=2)\n\n # tweak the x-axis so we can see the lines better\n for ax in [ax1, ax2, ax3, ax4]:\n ax.set_xlim(0, 10)\n\n # check that the y-lims are all automatically the same\n assert ax1.get_ylim() == ax2.get_ylim()\n assert ax1.get_ylim() == ax3.get_ylim()\n assert ax1.get_ylim() == ax4.get_ylim()\n\n fig3, ax5 = plt.subplots()\n x5 = np.ma.masked_equal([2, 4, 6, 8, 10, 12], 8)\n ymin5 = np.ma.masked_equal([0, 1, -1, 0, 2, 1], 2)\n ymax5 = np.ma.masked_equal([13, 14, 15, 16, 17, 18], 18)\n ax5.vlines(x5, ymin5, ymax5, colors='k', linewidth=2)\n ax5.set_xlim(0, 15)\n\n\ndef test_vlines_default():\n fig, ax = plt.subplots()\n with mpl.rc_context({'lines.color': 'red'}):\n lines = ax.vlines(0.5, 0, 1)\n assert mpl.colors.same_color(lines.get_color(), 'red')\n\n\n@image_comparison(['hlines_basic', 'hlines_with_nan', 'hlines_masked'],\n extensions=['png'])\ndef test_hlines():\n # normal\n y1 = [2, 3, 4, 5, 7]\n x1 = [2, -6, 3, 8, 2]\n fig1, ax1 = plt.subplots()\n ax1.hlines(y1, 0, x1, colors='g', linewidth=5)\n\n # GH #7406\n y2 = [2, 3, 4, 5, 6, 7]\n x2 = [2, -6, 3, 8, np.nan, 2]\n fig2, (ax2, ax3, ax4) = plt.subplots(nrows=3, figsize=(4, 8))\n ax2.hlines(y2, 0, x2, colors='g', linewidth=5)\n\n y3 = [2, 3, 4, 5, 6, 7]\n x3 = [np.nan, 2, -6, 3, 8, 2]\n ax3.hlines(y3, 0, x3, colors='r', linewidth=3, linestyle='--')\n\n y4 = [2, 3, 4, 5, 6, 7]\n x4 = [np.nan, 2, -6, 3, 8, np.nan]\n ax4.hlines(y4, 0, x4, colors='k', linewidth=2)\n\n # tweak the y-axis so we can see the lines better\n for ax in [ax1, ax2, ax3, ax4]:\n ax.set_ylim(0, 10)\n\n # check that the x-lims are all automatically the same\n assert ax1.get_xlim() == ax2.get_xlim()\n assert ax1.get_xlim() == ax3.get_xlim()\n assert ax1.get_xlim() == ax4.get_xlim()\n\n fig3, ax5 = plt.subplots()\n y5 = np.ma.masked_equal([2, 4, 6, 8, 10, 12], 8)\n xmin5 = np.ma.masked_equal([0, 1, -1, 0, 2, 1], 2)\n xmax5 = np.ma.masked_equal([13, 14, 15, 16, 17, 18], 18)\n ax5.hlines(y5, xmin5, xmax5, colors='k', linewidth=2)\n ax5.set_ylim(0, 15)\n\n\ndef test_hlines_default():\n fig, ax = plt.subplots()\n with mpl.rc_context({'lines.color': 'red'}):\n lines = ax.hlines(0.5, 0, 1)\n assert mpl.colors.same_color(lines.get_color(), 'red')\n\n\[email protected]('data', [[1, 2, 3, np.nan, 5],\n np.ma.masked_equal([1, 2, 3, 4, 5], 4)])\n@check_figures_equal(extensions=[\"png\"])\ndef test_lines_with_colors(fig_test, fig_ref, data):\n test_colors = ['red', 'green', 'blue', 'purple', 'orange']\n fig_test.add_subplot(2, 1, 1).vlines(data, 0, 1,\n colors=test_colors, linewidth=5)\n fig_test.add_subplot(2, 1, 2).hlines(data, 0, 1,\n colors=test_colors, linewidth=5)\n\n expect_xy = [1, 2, 3, 5]\n expect_color = ['red', 'green', 'blue', 'orange']\n fig_ref.add_subplot(2, 1, 1).vlines(expect_xy, 0, 1,\n colors=expect_color, linewidth=5)\n fig_ref.add_subplot(2, 1, 2).hlines(expect_xy, 0, 1,\n colors=expect_color, linewidth=5)\n\n\n@image_comparison(['step_linestyle', 'step_linestyle'], remove_text=True)\ndef test_step_linestyle():\n x = y = np.arange(10)\n\n # First illustrate basic pyplot interface, using defaults where possible.\n fig, ax_lst = plt.subplots(2, 2)\n ax_lst = ax_lst.flatten()\n\n ln_styles = ['-', '--', '-.', ':']\n\n for ax, ls in zip(ax_lst, ln_styles):\n ax.step(x, y, lw=5, linestyle=ls, where='pre')\n ax.step(x, y + 1, lw=5, linestyle=ls, where='mid')\n ax.step(x, y + 2, lw=5, linestyle=ls, where='post')\n ax.set_xlim([-1, 5])\n ax.set_ylim([-1, 7])\n\n # Reuse testcase from above for a labeled data test\n data = {\"X\": x, \"Y0\": y, \"Y1\": y+1, \"Y2\": y+2}\n fig, ax_lst = plt.subplots(2, 2)\n ax_lst = ax_lst.flatten()\n ln_styles = ['-', '--', '-.', ':']\n for ax, ls in zip(ax_lst, ln_styles):\n ax.step(\"X\", \"Y0\", lw=5, linestyle=ls, where='pre', data=data)\n ax.step(\"X\", \"Y1\", lw=5, linestyle=ls, where='mid', data=data)\n ax.step(\"X\", \"Y2\", lw=5, linestyle=ls, where='post', data=data)\n ax.set_xlim([-1, 5])\n ax.set_ylim([-1, 7])\n\n\n@image_comparison(['mixed_collection'], remove_text=True)\ndef test_mixed_collection():\n # First illustrate basic pyplot interface, using defaults where possible.\n fig, ax = plt.subplots()\n\n c = mpatches.Circle((8, 8), radius=4, facecolor='none', edgecolor='green')\n\n # PDF can optimize this one\n p1 = mpl.collections.PatchCollection([c], match_original=True)\n p1.set_offsets([[0, 0], [24, 24]])\n p1.set_linewidths([1, 5])\n\n # PDF can't optimize this one, because the alpha of the edge changes\n p2 = mpl.collections.PatchCollection([c], match_original=True)\n p2.set_offsets([[48, 0], [-32, -16]])\n p2.set_linewidths([1, 5])\n p2.set_edgecolors([[0, 0, 0.1, 1.0], [0, 0, 0.1, 0.5]])\n\n ax.patch.set_color('0.5')\n ax.add_collection(p1)\n ax.add_collection(p2)\n\n ax.set_xlim(0, 16)\n ax.set_ylim(0, 16)\n\n\ndef test_subplot_key_hash():\n ax = plt.subplot(np.int32(5), np.int64(1), 1)\n ax.twinx()\n assert ax.get_subplotspec().get_geometry() == (5, 1, 0, 0)\n\n\n@image_comparison(\n [\"specgram_freqs.png\", \"specgram_freqs_linear.png\",\n \"specgram_noise.png\", \"specgram_noise_linear.png\"],\n remove_text=True, tol=0.07, style=\"default\")\ndef test_specgram():\n \"\"\"Test axes.specgram in default (psd) mode.\"\"\"\n\n # use former defaults to match existing baseline image\n matplotlib.rcParams['image.interpolation'] = 'nearest'\n\n n = 1000\n Fs = 10.\n\n fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]\n NFFT_freqs = int(10 * Fs / np.min(fstims))\n x = np.arange(0, n, 1/Fs)\n y_freqs = np.concatenate(\n np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1))\n\n NFFT_noise = int(10 * Fs / 11)\n np.random.seed(0)\n y_noise = np.concatenate([np.random.standard_normal(n), np.random.rand(n)])\n\n all_sides = [\"default\", \"onesided\", \"twosided\"]\n for y, NFFT in [(y_freqs, NFFT_freqs), (y_noise, NFFT_noise)]:\n noverlap = NFFT // 2\n pad_to = int(2 ** np.ceil(np.log2(NFFT)))\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,\n pad_to=pad_to, sides=sides)\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,\n pad_to=pad_to, sides=sides,\n scale=\"linear\", norm=matplotlib.colors.LogNorm())\n\n\n@image_comparison(\n [\"specgram_magnitude_freqs.png\", \"specgram_magnitude_freqs_linear.png\",\n \"specgram_magnitude_noise.png\", \"specgram_magnitude_noise_linear.png\"],\n remove_text=True, tol=0.07, style=\"default\")\ndef test_specgram_magnitude():\n \"\"\"Test axes.specgram in magnitude mode.\"\"\"\n\n # use former defaults to match existing baseline image\n matplotlib.rcParams['image.interpolation'] = 'nearest'\n\n n = 1000\n Fs = 10.\n\n fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]\n NFFT_freqs = int(100 * Fs / np.min(fstims))\n x = np.arange(0, n, 1/Fs)\n y = np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1)\n y[:, -1] = 1\n y_freqs = np.hstack(y)\n\n NFFT_noise = int(10 * Fs / 11)\n np.random.seed(0)\n y_noise = np.concatenate([np.random.standard_normal(n), np.random.rand(n)])\n\n all_sides = [\"default\", \"onesided\", \"twosided\"]\n for y, NFFT in [(y_freqs, NFFT_freqs), (y_noise, NFFT_noise)]:\n noverlap = NFFT // 2\n pad_to = int(2 ** np.ceil(np.log2(NFFT)))\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,\n pad_to=pad_to, sides=sides, mode=\"magnitude\")\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,\n pad_to=pad_to, sides=sides, mode=\"magnitude\",\n scale=\"linear\", norm=matplotlib.colors.LogNorm())\n\n\n@image_comparison(\n [\"specgram_angle_freqs.png\", \"specgram_phase_freqs.png\",\n \"specgram_angle_noise.png\", \"specgram_phase_noise.png\"],\n remove_text=True, tol=0.07, style=\"default\")\ndef test_specgram_angle():\n \"\"\"Test axes.specgram in angle and phase modes.\"\"\"\n\n # use former defaults to match existing baseline image\n matplotlib.rcParams['image.interpolation'] = 'nearest'\n\n n = 1000\n Fs = 10.\n\n fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]\n NFFT_freqs = int(10 * Fs / np.min(fstims))\n x = np.arange(0, n, 1/Fs)\n y = np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1)\n y[:, -1] = 1\n y_freqs = np.hstack(y)\n\n NFFT_noise = int(10 * Fs / 11)\n np.random.seed(0)\n y_noise = np.concatenate([np.random.standard_normal(n), np.random.rand(n)])\n\n all_sides = [\"default\", \"onesided\", \"twosided\"]\n for y, NFFT in [(y_freqs, NFFT_freqs), (y_noise, NFFT_noise)]:\n noverlap = NFFT // 2\n pad_to = int(2 ** np.ceil(np.log2(NFFT)))\n for mode in [\"angle\", \"phase\"]:\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,\n pad_to=pad_to, sides=sides, mode=mode)\n with pytest.raises(ValueError):\n ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,\n pad_to=pad_to, sides=sides, mode=mode,\n scale=\"dB\")\n\n\ndef test_specgram_fs_none():\n \"\"\"Test axes.specgram when Fs is None, should not throw error.\"\"\"\n spec, freqs, t, im = plt.specgram(np.ones(300), Fs=None, scale='linear')\n xmin, xmax, freq0, freq1 = im.get_extent()\n assert xmin == 32 and xmax == 96\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_specgram_origin_rcparam(fig_test, fig_ref):\n \"\"\"Test specgram ignores image.origin rcParam and uses origin 'upper'.\"\"\"\n t = np.arange(500)\n signal = np.sin(t)\n\n plt.rcParams[\"image.origin\"] = 'upper'\n\n # Reference: First graph using default origin in imshow (upper),\n fig_ref.subplots().specgram(signal)\n\n # Try to overwrite the setting trying to flip the specgram\n plt.rcParams[\"image.origin\"] = 'lower'\n\n # Test: origin='lower' should be ignored\n fig_test.subplots().specgram(signal)\n\n\ndef test_specgram_origin_kwarg():\n \"\"\"Ensure passing origin as a kwarg raises a TypeError.\"\"\"\n t = np.arange(500)\n signal = np.sin(t)\n\n with pytest.raises(TypeError):\n plt.specgram(signal, origin='lower')\n\n\n@image_comparison(\n [\"psd_freqs.png\", \"csd_freqs.png\", \"psd_noise.png\", \"csd_noise.png\"],\n remove_text=True, tol=0.002)\ndef test_psd_csd():\n n = 10000\n Fs = 100.\n\n fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]\n NFFT_freqs = int(1000 * Fs / np.min(fstims))\n x = np.arange(0, n, 1/Fs)\n ys_freqs = np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1)\n\n NFFT_noise = int(1000 * Fs / 11)\n np.random.seed(0)\n ys_noise = [np.random.standard_normal(n), np.random.rand(n)]\n\n all_kwargs = [{\"sides\": \"default\"},\n {\"sides\": \"onesided\", \"return_line\": False},\n {\"sides\": \"twosided\", \"return_line\": True}]\n for ys, NFFT in [(ys_freqs, NFFT_freqs), (ys_noise, NFFT_noise)]:\n noverlap = NFFT // 2\n pad_to = int(2 ** np.ceil(np.log2(NFFT)))\n for ax, kwargs in zip(plt.figure().subplots(3), all_kwargs):\n ret = ax.psd(np.concatenate(ys), NFFT=NFFT, Fs=Fs,\n noverlap=noverlap, pad_to=pad_to, **kwargs)\n assert len(ret) == 2 + kwargs.get(\"return_line\", False)\n ax.set(xlabel=\"\", ylabel=\"\")\n for ax, kwargs in zip(plt.figure().subplots(3), all_kwargs):\n ret = ax.csd(*ys, NFFT=NFFT, Fs=Fs,\n noverlap=noverlap, pad_to=pad_to, **kwargs)\n assert len(ret) == 2 + kwargs.get(\"return_line\", False)\n ax.set(xlabel=\"\", ylabel=\"\")\n\n\n@image_comparison(\n [\"magnitude_spectrum_freqs_linear.png\",\n \"magnitude_spectrum_freqs_dB.png\",\n \"angle_spectrum_freqs.png\",\n \"phase_spectrum_freqs.png\",\n \"magnitude_spectrum_noise_linear.png\",\n \"magnitude_spectrum_noise_dB.png\",\n \"angle_spectrum_noise.png\",\n \"phase_spectrum_noise.png\"],\n remove_text=True)\ndef test_spectrum():\n n = 10000\n Fs = 100.\n\n fstims1 = [Fs/4, Fs/5, Fs/11]\n NFFT = int(1000 * Fs / min(fstims1))\n pad_to = int(2 ** np.ceil(np.log2(NFFT)))\n\n x = np.arange(0, n, 1/Fs)\n y_freqs = ((np.sin(2 * np.pi * np.outer(x, fstims1)) * 10**np.arange(3))\n .sum(axis=1))\n np.random.seed(0)\n y_noise = np.hstack([np.random.standard_normal(n), np.random.rand(n)]) - .5\n\n all_sides = [\"default\", \"onesided\", \"twosided\"]\n kwargs = {\"Fs\": Fs, \"pad_to\": pad_to}\n for y in [y_freqs, y_noise]:\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n spec, freqs, line = ax.magnitude_spectrum(y, sides=sides, **kwargs)\n ax.set(xlabel=\"\", ylabel=\"\")\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n spec, freqs, line = ax.magnitude_spectrum(y, sides=sides, **kwargs,\n scale=\"dB\")\n ax.set(xlabel=\"\", ylabel=\"\")\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n spec, freqs, line = ax.angle_spectrum(y, sides=sides, **kwargs)\n ax.set(xlabel=\"\", ylabel=\"\")\n for ax, sides in zip(plt.figure().subplots(3), all_sides):\n spec, freqs, line = ax.phase_spectrum(y, sides=sides, **kwargs)\n ax.set(xlabel=\"\", ylabel=\"\")\n\n\n@image_comparison(['twin_spines.png'], remove_text=True)\ndef test_twin_spines():\n\n def make_patch_spines_invisible(ax):\n ax.set_frame_on(True)\n ax.patch.set_visible(False)\n ax.spines[:].set_visible(False)\n\n fig = plt.figure(figsize=(4, 3))\n fig.subplots_adjust(right=0.75)\n\n host = fig.add_subplot()\n par1 = host.twinx()\n par2 = host.twinx()\n\n # Offset the right spine of par2. The ticks and label have already been\n # placed on the right by twinx above.\n par2.spines.right.set_position((\"axes\", 1.2))\n # Having been created by twinx, par2 has its frame off, so the line of\n # its detached spine is invisible. First, activate the frame but make\n # the patch and spines invisible.\n make_patch_spines_invisible(par2)\n # Second, show the right spine.\n par2.spines.right.set_visible(True)\n\n p1, = host.plot([0, 1, 2], [0, 1, 2], \"b-\")\n p2, = par1.plot([0, 1, 2], [0, 3, 2], \"r-\")\n p3, = par2.plot([0, 1, 2], [50, 30, 15], \"g-\")\n\n host.set_xlim(0, 2)\n host.set_ylim(0, 2)\n par1.set_ylim(0, 4)\n par2.set_ylim(1, 65)\n\n host.yaxis.label.set_color(p1.get_color())\n par1.yaxis.label.set_color(p2.get_color())\n par2.yaxis.label.set_color(p3.get_color())\n\n tkw = dict(size=4, width=1.5)\n host.tick_params(axis='y', colors=p1.get_color(), **tkw)\n par1.tick_params(axis='y', colors=p2.get_color(), **tkw)\n par2.tick_params(axis='y', colors=p3.get_color(), **tkw)\n host.tick_params(axis='x', **tkw)\n\n\n@image_comparison(['twin_spines_on_top.png', 'twin_spines_on_top.png'],\n remove_text=True)\ndef test_twin_spines_on_top():\n matplotlib.rcParams['axes.linewidth'] = 48.0\n matplotlib.rcParams['lines.linewidth'] = 48.0\n\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n data = np.array([[1000, 1100, 1200, 1250],\n [310, 301, 360, 400]])\n\n ax2 = ax1.twinx()\n\n ax1.plot(data[0], data[1]/1E3, color='#BEAED4')\n ax1.fill_between(data[0], data[1]/1E3, color='#BEAED4', alpha=.8)\n\n ax2.plot(data[0], data[1]/1E3, color='#7FC97F')\n ax2.fill_between(data[0], data[1]/1E3, color='#7FC97F', alpha=.5)\n\n # Reuse testcase from above for a labeled data test\n data = {\"i\": data[0], \"j\": data[1]/1E3}\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n ax2 = ax1.twinx()\n ax1.plot(\"i\", \"j\", color='#BEAED4', data=data)\n ax1.fill_between(\"i\", \"j\", color='#BEAED4', alpha=.8, data=data)\n ax2.plot(\"i\", \"j\", color='#7FC97F', data=data)\n ax2.fill_between(\"i\", \"j\", color='#7FC97F', alpha=.5, data=data)\n\n\[email protected](\"grid_which, major_visible, minor_visible\", [\n (\"both\", True, True),\n (\"major\", True, False),\n (\"minor\", False, True),\n])\ndef test_rcparam_grid_minor(grid_which, major_visible, minor_visible):\n mpl.rcParams.update({\"axes.grid\": True, \"axes.grid.which\": grid_which})\n fig, ax = plt.subplots()\n fig.canvas.draw()\n assert all(tick.gridline.get_visible() == major_visible\n for tick in ax.xaxis.majorTicks)\n assert all(tick.gridline.get_visible() == minor_visible\n for tick in ax.xaxis.minorTicks)\n\n\ndef test_grid():\n fig, ax = plt.subplots()\n ax.grid()\n fig.canvas.draw()\n assert ax.xaxis.majorTicks[0].gridline.get_visible()\n ax.grid(visible=False)\n fig.canvas.draw()\n assert not ax.xaxis.majorTicks[0].gridline.get_visible()\n ax.grid(visible=True)\n fig.canvas.draw()\n assert ax.xaxis.majorTicks[0].gridline.get_visible()\n ax.grid()\n fig.canvas.draw()\n assert not ax.xaxis.majorTicks[0].gridline.get_visible()\n\n\ndef test_reset_grid():\n fig, ax = plt.subplots()\n ax.tick_params(reset=True, which='major', labelsize=10)\n assert not ax.xaxis.majorTicks[0].gridline.get_visible()\n ax.grid(color='red') # enables grid\n assert ax.xaxis.majorTicks[0].gridline.get_visible()\n\n with plt.rc_context({'axes.grid': True}):\n ax.clear()\n ax.tick_params(reset=True, which='major', labelsize=10)\n assert ax.xaxis.majorTicks[0].gridline.get_visible()\n\n\ndef test_vline_limit():\n fig = plt.figure()\n ax = fig.gca()\n ax.axvline(0.5)\n ax.plot([-0.1, 0, 0.2, 0.1])\n assert_allclose(ax.get_ylim(), (-.1, .2))\n\n\[email protected]('fv, fh, args', [[plt.axvline, plt.axhline, (1,)],\n [plt.axvspan, plt.axhspan, (1, 1)]])\ndef test_axline_minmax(fv, fh, args):\n bad_lim = matplotlib.dates.num2date(1)\n # Check vertical functions\n with pytest.raises(ValueError, match='ymin must be a single scalar value'):\n fv(*args, ymin=bad_lim, ymax=1)\n with pytest.raises(ValueError, match='ymax must be a single scalar value'):\n fv(*args, ymin=1, ymax=bad_lim)\n # Check horizontal functions\n with pytest.raises(ValueError, match='xmin must be a single scalar value'):\n fh(*args, xmin=bad_lim, xmax=1)\n with pytest.raises(ValueError, match='xmax must be a single scalar value'):\n fh(*args, xmin=1, xmax=bad_lim)\n\n\ndef test_empty_shared_subplots():\n # empty plots with shared axes inherit limits from populated plots\n fig, axs = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)\n axs[0].plot([1, 2, 3], [2, 4, 6])\n x0, x1 = axs[1].get_xlim()\n y0, y1 = axs[1].get_ylim()\n assert x0 <= 1\n assert x1 >= 3\n assert y0 <= 2\n assert y1 >= 6\n\n\ndef test_shared_with_aspect_1():\n # allow sharing one axis\n for adjustable in ['box', 'datalim']:\n fig, axs = plt.subplots(nrows=2, sharex=True)\n axs[0].set_aspect(2, adjustable=adjustable, share=True)\n assert axs[1].get_aspect() == 2\n assert axs[1].get_adjustable() == adjustable\n\n fig, axs = plt.subplots(nrows=2, sharex=True)\n axs[0].set_aspect(2, adjustable=adjustable)\n assert axs[1].get_aspect() == 'auto'\n\n\ndef test_shared_with_aspect_2():\n # Share 2 axes only with 'box':\n fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True)\n axs[0].set_aspect(2, share=True)\n axs[0].plot([1, 2], [3, 4])\n axs[1].plot([3, 4], [1, 2])\n plt.draw() # Trigger apply_aspect().\n assert axs[0].get_xlim() == axs[1].get_xlim()\n assert axs[0].get_ylim() == axs[1].get_ylim()\n\n\ndef test_shared_with_aspect_3():\n # Different aspect ratios:\n for adjustable in ['box', 'datalim']:\n fig, axs = plt.subplots(nrows=2, sharey=True)\n axs[0].set_aspect(2, adjustable=adjustable)\n axs[1].set_aspect(0.5, adjustable=adjustable)\n axs[0].plot([1, 2], [3, 4])\n axs[1].plot([3, 4], [1, 2])\n plt.draw() # Trigger apply_aspect().\n assert axs[0].get_xlim() != axs[1].get_xlim()\n assert axs[0].get_ylim() == axs[1].get_ylim()\n fig_aspect = fig.bbox_inches.height / fig.bbox_inches.width\n for ax in axs:\n p = ax.get_position()\n box_aspect = p.height / p.width\n lim_aspect = ax.viewLim.height / ax.viewLim.width\n expected = fig_aspect * box_aspect / lim_aspect\n assert round(expected, 4) == round(ax.get_aspect(), 4)\n\n\[email protected]('twin', ('x', 'y'))\ndef test_twin_with_aspect(twin):\n fig, ax = plt.subplots()\n # test twinx or twiny\n ax_twin = getattr(ax, 'twin{}'.format(twin))()\n ax.set_aspect(5)\n ax_twin.set_aspect(2)\n assert_array_equal(ax.bbox.extents,\n ax_twin.bbox.extents)\n\n\ndef test_relim_visible_only():\n x1 = (0., 10.)\n y1 = (0., 10.)\n x2 = (-10., 20.)\n y2 = (-10., 30.)\n\n fig = matplotlib.figure.Figure()\n ax = fig.add_subplot()\n ax.plot(x1, y1)\n assert ax.get_xlim() == x1\n assert ax.get_ylim() == y1\n line, = ax.plot(x2, y2)\n assert ax.get_xlim() == x2\n assert ax.get_ylim() == y2\n line.set_visible(False)\n assert ax.get_xlim() == x2\n assert ax.get_ylim() == y2\n\n ax.relim(visible_only=True)\n ax.autoscale_view()\n\n assert ax.get_xlim() == x1\n assert ax.get_ylim() == y1\n\n\ndef test_text_labelsize():\n \"\"\"\n tests for issue #1172\n \"\"\"\n fig = plt.figure()\n ax = fig.gca()\n ax.tick_params(labelsize='large')\n ax.tick_params(direction='out')\n\n\n@image_comparison(['pie_default.png'])\ndef test_pie_default():\n # The slices will be ordered and plotted counter-clockwise.\n labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n fig1, ax1 = plt.subplots(figsize=(8, 6))\n ax1.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90)\n\n\n@image_comparison(['pie_linewidth_0', 'pie_linewidth_0', 'pie_linewidth_0'],\n extensions=['png'])\ndef test_pie_linewidth_0():\n # The slices will be ordered and plotted counter-clockwise.\n labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 0})\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n plt.axis('equal')\n\n # Reuse testcase from above for a labeled data test\n data = {\"l\": labels, \"s\": sizes, \"c\": colors, \"ex\": explode}\n fig = plt.figure()\n ax = fig.gca()\n ax.pie(\"s\", explode=\"ex\", labels=\"l\", colors=\"c\",\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 0}, data=data)\n ax.axis('equal')\n\n # And again to test the pyplot functions which should also be able to be\n # called with a data kwarg\n plt.figure()\n plt.pie(\"s\", explode=\"ex\", labels=\"l\", colors=\"c\",\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 0}, data=data)\n plt.axis('equal')\n\n\n@image_comparison(['pie_center_radius.png'])\ndef test_pie_center_radius():\n # The slices will be ordered and plotted counter-clockwise.\n labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 0}, center=(1, 2), radius=1.5)\n\n plt.annotate(\"Center point\", xy=(1, 2), xytext=(1, 1.3),\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"arc3\"),\n bbox=dict(boxstyle=\"square\", facecolor=\"lightgrey\"))\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n plt.axis('equal')\n\n\n@image_comparison(['pie_linewidth_2.png'])\ndef test_pie_linewidth_2():\n # The slices will be ordered and plotted counter-clockwise.\n labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 2})\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n plt.axis('equal')\n\n\n@image_comparison(['pie_ccw_true.png'])\ndef test_pie_ccw_true():\n # The slices will be ordered and plotted counter-clockwise.\n labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n counterclock=True)\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n plt.axis('equal')\n\n\n@image_comparison(['pie_frame_grid.png'])\ndef test_pie_frame_grid():\n # The slices will be ordered and plotted counter-clockwise.\n labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n # only \"explode\" the 2nd slice (i.e. 'Hogs')\n explode = (0, 0.1, 0, 0)\n\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 0},\n frame=True, center=(2, 2))\n\n plt.pie(sizes[::-1], explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 0},\n frame=True, center=(5, 2))\n\n plt.pie(sizes, explode=explode[::-1], labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n wedgeprops={'linewidth': 0},\n frame=True, center=(3, 5))\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n plt.axis('equal')\n\n\n@image_comparison(['pie_rotatelabels_true.png'])\ndef test_pie_rotatelabels_true():\n # The slices will be ordered and plotted counter-clockwise.\n labels = 'Hogwarts', 'Frogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90,\n rotatelabels=True)\n # Set aspect ratio to be equal so that pie is drawn as a circle.\n plt.axis('equal')\n\n\n@image_comparison(['pie_no_label.png'])\ndef test_pie_nolabel_but_legend():\n labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90, labeldistance=None,\n rotatelabels=True)\n plt.axis('equal')\n plt.ylim(-1.2, 1.2)\n plt.legend()\n\n\ndef test_pie_textprops():\n data = [23, 34, 45]\n labels = [\"Long name 1\", \"Long name 2\", \"Long name 3\"]\n\n textprops = dict(horizontalalignment=\"center\",\n verticalalignment=\"top\",\n rotation=90,\n rotation_mode=\"anchor\",\n size=12, color=\"red\")\n\n _, texts, autopct = plt.gca().pie(data, labels=labels, autopct='%.2f',\n textprops=textprops)\n for labels in [texts, autopct]:\n for tx in labels:\n assert tx.get_ha() == textprops[\"horizontalalignment\"]\n assert tx.get_va() == textprops[\"verticalalignment\"]\n assert tx.get_rotation() == textprops[\"rotation\"]\n assert tx.get_rotation_mode() == textprops[\"rotation_mode\"]\n assert tx.get_size() == textprops[\"size\"]\n assert tx.get_color() == textprops[\"color\"]\n\n\ndef test_pie_get_negative_values():\n # Test the ValueError raised when feeding negative values into axes.pie\n fig, ax = plt.subplots()\n with pytest.raises(ValueError):\n ax.pie([5, 5, -3], explode=[0, .1, .2])\n\n\ndef test_normalize_kwarg_warn_pie():\n fig, ax = plt.subplots()\n with pytest.warns(MatplotlibDeprecationWarning):\n ax.pie(x=[0], normalize=None)\n\n\ndef test_normalize_kwarg_pie():\n fig, ax = plt.subplots()\n x = [0.3, 0.3, 0.1]\n t1 = ax.pie(x=x, normalize=True)\n assert abs(t1[0][-1].theta2 - 360.) < 1e-3\n t2 = ax.pie(x=x, normalize=False)\n assert abs(t2[0][-1].theta2 - 360.) > 1e-3\n\n\n@image_comparison(['set_get_ticklabels.png'])\ndef test_set_get_ticklabels():\n # test issue 2246\n fig, ax = plt.subplots(2)\n ha = ['normal', 'set_x/yticklabels']\n\n ax[0].plot(np.arange(10))\n ax[0].set_title(ha[0])\n\n ax[1].plot(np.arange(10))\n ax[1].set_title(ha[1])\n\n # set ticklabel to 1 plot in normal way\n ax[0].set_xticks(range(10))\n ax[0].set_yticks(range(10))\n ax[0].set_xticklabels(['a', 'b', 'c', 'd'] + 6 * [''])\n ax[0].set_yticklabels(['11', '12', '13', '14'] + 6 * [''])\n\n # set ticklabel to the other plot, expect the 2 plots have same label\n # setting pass get_ticklabels return value as ticklabels argument\n ax[1].set_xticks(ax[0].get_xticks())\n ax[1].set_yticks(ax[0].get_yticks())\n ax[1].set_xticklabels(ax[0].get_xticklabels())\n ax[1].set_yticklabels(ax[0].get_yticklabels())\n\n\ndef test_subsampled_ticklabels():\n # test issue 11937\n fig, ax = plt.subplots()\n ax.plot(np.arange(10))\n ax.xaxis.set_ticks(np.arange(10) + 0.1)\n ax.locator_params(nbins=5)\n ax.xaxis.set_ticklabels([c for c in \"bcdefghijk\"])\n plt.draw()\n labels = [t.get_text() for t in ax.xaxis.get_ticklabels()]\n assert labels == ['b', 'd', 'f', 'h', 'j']\n\n\ndef test_mismatched_ticklabels():\n fig, ax = plt.subplots()\n ax.plot(np.arange(10))\n ax.xaxis.set_ticks([1.5, 2.5])\n with pytest.raises(ValueError):\n ax.xaxis.set_ticklabels(['a', 'b', 'c'])\n\n\ndef test_empty_ticks_fixed_loc():\n # Smoke test that [] can be used to unset all tick labels\n fig, ax = plt.subplots()\n ax.bar([1, 2], [1, 2])\n ax.set_xticks([1, 2])\n ax.set_xticklabels([])\n\n\n@image_comparison(['retain_tick_visibility.png'])\ndef test_retain_tick_visibility():\n fig, ax = plt.subplots()\n plt.plot([0, 1, 2], [0, -1, 4])\n plt.setp(ax.get_yticklabels(), visible=False)\n ax.tick_params(axis=\"y\", which=\"both\", length=0)\n\n\ndef test_tick_label_update():\n # test issue 9397\n\n fig, ax = plt.subplots()\n\n # Set up a dummy formatter\n def formatter_func(x, pos):\n return \"unit value\" if x == 1 else \"\"\n ax.xaxis.set_major_formatter(plt.FuncFormatter(formatter_func))\n\n # Force some of the x-axis ticks to be outside of the drawn range\n ax.set_xticks([-1, 0, 1, 2, 3])\n ax.set_xlim(-0.5, 2.5)\n\n ax.figure.canvas.draw()\n tick_texts = [tick.get_text() for tick in ax.xaxis.get_ticklabels()]\n assert tick_texts == [\"\", \"\", \"unit value\", \"\", \"\"]\n\n\n@image_comparison(['o_marker_path_snap.png'], savefig_kwarg={'dpi': 72})\ndef test_o_marker_path_snap():\n fig, ax = plt.subplots()\n ax.margins(.1)\n for ms in range(1, 15):\n ax.plot([1, 2, ], np.ones(2) + ms, 'o', ms=ms)\n\n for ms in np.linspace(1, 10, 25):\n ax.plot([3, 4, ], np.ones(2) + ms, 'o', ms=ms)\n\n\ndef test_margins():\n # test all ways margins can be called\n data = [1, 10]\n xmin = 0.0\n xmax = len(data) - 1.0\n ymin = min(data)\n ymax = max(data)\n\n fig1, ax1 = plt.subplots(1, 1)\n ax1.plot(data)\n ax1.margins(1)\n assert ax1.margins() == (1, 1)\n assert ax1.get_xlim() == (xmin - (xmax - xmin) * 1,\n xmax + (xmax - xmin) * 1)\n assert ax1.get_ylim() == (ymin - (ymax - ymin) * 1,\n ymax + (ymax - ymin) * 1)\n\n fig2, ax2 = plt.subplots(1, 1)\n ax2.plot(data)\n ax2.margins(0.5, 2)\n assert ax2.margins() == (0.5, 2)\n assert ax2.get_xlim() == (xmin - (xmax - xmin) * 0.5,\n xmax + (xmax - xmin) * 0.5)\n assert ax2.get_ylim() == (ymin - (ymax - ymin) * 2,\n ymax + (ymax - ymin) * 2)\n\n fig3, ax3 = plt.subplots(1, 1)\n ax3.plot(data)\n ax3.margins(x=-0.2, y=0.5)\n assert ax3.margins() == (-0.2, 0.5)\n assert ax3.get_xlim() == (xmin - (xmax - xmin) * -0.2,\n xmax + (xmax - xmin) * -0.2)\n assert ax3.get_ylim() == (ymin - (ymax - ymin) * 0.5,\n ymax + (ymax - ymin) * 0.5)\n\n\ndef test_set_margin_updates_limits():\n mpl.style.use(\"default\")\n fig, ax = plt.subplots()\n ax.plot([1, 2], [1, 2])\n ax.set(xscale=\"log\", xmargin=0)\n assert ax.get_xlim() == (1, 2)\n\n\ndef test_length_one_hist():\n fig, ax = plt.subplots()\n ax.hist(1)\n ax.hist([1])\n\n\ndef test_pathological_hexbin():\n # issue #2863\n mylist = [10] * 100\n fig, ax = plt.subplots(1, 1)\n ax.hexbin(mylist, mylist)\n fig.savefig(io.BytesIO()) # Check that no warning is emitted.\n\n\ndef test_color_None():\n # issue 3855\n fig, ax = plt.subplots()\n ax.plot([1, 2], [1, 2], color=None)\n\n\ndef test_color_alias():\n # issues 4157 and 4162\n fig, ax = plt.subplots()\n line = ax.plot([0, 1], c='lime')[0]\n assert 'lime' == line.get_color()\n\n\ndef test_numerical_hist_label():\n fig, ax = plt.subplots()\n ax.hist([range(15)] * 5, label=range(5))\n ax.legend()\n\n\ndef test_unicode_hist_label():\n fig, ax = plt.subplots()\n a = (b'\\xe5\\xbe\\x88\\xe6\\xbc\\x82\\xe4\\xba\\xae, ' +\n b'r\\xc3\\xb6m\\xc3\\xa4n ch\\xc3\\xa4r\\xc3\\xa1ct\\xc3\\xa8rs')\n b = b'\\xd7\\xa9\\xd7\\x9c\\xd7\\x95\\xd7\\x9d'\n labels = [a.decode('utf-8'),\n 'hi aardvark',\n b.decode('utf-8'),\n ]\n\n ax.hist([range(15)] * 3, label=labels)\n ax.legend()\n\n\ndef test_move_offsetlabel():\n data = np.random.random(10) * 1e-22\n\n fig, ax = plt.subplots()\n ax.plot(data)\n fig.canvas.draw()\n before = ax.yaxis.offsetText.get_position()\n assert ax.yaxis.offsetText.get_horizontalalignment() == 'left'\n ax.yaxis.tick_right()\n fig.canvas.draw()\n after = ax.yaxis.offsetText.get_position()\n assert after[0] > before[0] and after[1] == before[1]\n assert ax.yaxis.offsetText.get_horizontalalignment() == 'right'\n\n fig, ax = plt.subplots()\n ax.plot(data)\n fig.canvas.draw()\n before = ax.xaxis.offsetText.get_position()\n assert ax.xaxis.offsetText.get_verticalalignment() == 'top'\n ax.xaxis.tick_top()\n fig.canvas.draw()\n after = ax.xaxis.offsetText.get_position()\n assert after[0] == before[0] and after[1] > before[1]\n assert ax.xaxis.offsetText.get_verticalalignment() == 'bottom'\n\n\n@image_comparison(['rc_spines.png'], savefig_kwarg={'dpi': 40})\ndef test_rc_spines():\n rc_dict = {\n 'axes.spines.left': False,\n 'axes.spines.right': False,\n 'axes.spines.top': False,\n 'axes.spines.bottom': False}\n with matplotlib.rc_context(rc_dict):\n plt.subplots() # create a figure and axes with the spine properties\n\n\n@image_comparison(['rc_grid.png'], savefig_kwarg={'dpi': 40})\ndef test_rc_grid():\n fig = plt.figure()\n rc_dict0 = {\n 'axes.grid': True,\n 'axes.grid.axis': 'both'\n }\n rc_dict1 = {\n 'axes.grid': True,\n 'axes.grid.axis': 'x'\n }\n rc_dict2 = {\n 'axes.grid': True,\n 'axes.grid.axis': 'y'\n }\n dict_list = [rc_dict0, rc_dict1, rc_dict2]\n\n for i, rc_dict in enumerate(dict_list, 1):\n with matplotlib.rc_context(rc_dict):\n fig.add_subplot(3, 1, i)\n\n\ndef test_rc_tick():\n d = {'xtick.bottom': False, 'xtick.top': True,\n 'ytick.left': True, 'ytick.right': False}\n with plt.rc_context(rc=d):\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n xax = ax1.xaxis\n yax = ax1.yaxis\n # tick1On bottom/left\n assert not xax._major_tick_kw['tick1On']\n assert xax._major_tick_kw['tick2On']\n assert not xax._minor_tick_kw['tick1On']\n assert xax._minor_tick_kw['tick2On']\n\n assert yax._major_tick_kw['tick1On']\n assert not yax._major_tick_kw['tick2On']\n assert yax._minor_tick_kw['tick1On']\n assert not yax._minor_tick_kw['tick2On']\n\n\ndef test_rc_major_minor_tick():\n d = {'xtick.top': True, 'ytick.right': True, # Enable all ticks\n 'xtick.bottom': True, 'ytick.left': True,\n # Selectively disable\n 'xtick.minor.bottom': False, 'xtick.major.bottom': False,\n 'ytick.major.left': False, 'ytick.minor.left': False}\n with plt.rc_context(rc=d):\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n xax = ax1.xaxis\n yax = ax1.yaxis\n # tick1On bottom/left\n assert not xax._major_tick_kw['tick1On']\n assert xax._major_tick_kw['tick2On']\n assert not xax._minor_tick_kw['tick1On']\n assert xax._minor_tick_kw['tick2On']\n\n assert not yax._major_tick_kw['tick1On']\n assert yax._major_tick_kw['tick2On']\n assert not yax._minor_tick_kw['tick1On']\n assert yax._minor_tick_kw['tick2On']\n\n\ndef test_square_plot():\n x = np.arange(4)\n y = np.array([1., 3., 5., 7.])\n fig, ax = plt.subplots()\n ax.plot(x, y, 'mo')\n ax.axis('square')\n xlim, ylim = ax.get_xlim(), ax.get_ylim()\n assert np.diff(xlim) == np.diff(ylim)\n assert ax.get_aspect() == 1\n assert_array_almost_equal(\n ax.get_position(original=True).extents, (0.125, 0.1, 0.9, 0.9))\n assert_array_almost_equal(\n ax.get_position(original=False).extents, (0.2125, 0.1, 0.8125, 0.9))\n\n\ndef test_bad_plot_args():\n with pytest.raises(ValueError):\n plt.plot(None)\n with pytest.raises(ValueError):\n plt.plot(None, None)\n with pytest.raises(ValueError):\n plt.plot(np.zeros((2, 2)), np.zeros((2, 3)))\n with pytest.raises(ValueError):\n plt.plot((np.arange(5).reshape((1, -1)), np.arange(5).reshape(-1, 1)))\n\n\[email protected](\n \"xy, cls\", [\n ((), mpl.image.AxesImage), # (0, N)\n (((3, 7), (2, 6)), mpl.image.AxesImage), # (xmin, xmax)\n ((range(5), range(4)), mpl.image.AxesImage), # regular grid\n (([1, 2, 4, 8, 16], [0, 1, 2, 3]), # irregular grid\n mpl.image.PcolorImage),\n ((np.random.random((4, 5)), np.random.random((4, 5))), # 2D coords\n mpl.collections.QuadMesh),\n ]\n)\[email protected](\n \"data\", [np.arange(12).reshape((3, 4)), np.random.rand(3, 4, 3)]\n)\ndef test_pcolorfast(xy, data, cls):\n fig, ax = plt.subplots()\n assert type(ax.pcolorfast(*xy, data)) == cls\n\n\ndef test_shared_scale():\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)\n\n axs[0, 0].set_xscale(\"log\")\n axs[0, 0].set_yscale(\"log\")\n\n for ax in axs.flat:\n assert ax.get_yscale() == 'log'\n assert ax.get_xscale() == 'log'\n\n axs[1, 1].set_xscale(\"linear\")\n axs[1, 1].set_yscale(\"linear\")\n\n for ax in axs.flat:\n assert ax.get_yscale() == 'linear'\n assert ax.get_xscale() == 'linear'\n\n\ndef test_shared_bool():\n with pytest.raises(TypeError):\n plt.subplot(sharex=True)\n with pytest.raises(TypeError):\n plt.subplot(sharey=True)\n\n\ndef test_violin_point_mass():\n \"\"\"Violin plot should handle point mass pdf gracefully.\"\"\"\n plt.violinplot(np.array([0, 0]))\n\n\ndef generate_errorbar_inputs():\n base_xy = cycler('x', [np.arange(5)]) + cycler('y', [np.ones(5)])\n err_cycler = cycler('err', [1,\n [1, 1, 1, 1, 1],\n [[1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1]],\n np.ones(5),\n np.ones((2, 5)),\n None\n ])\n xerr_cy = cycler('xerr', err_cycler)\n yerr_cy = cycler('yerr', err_cycler)\n\n empty = ((cycler('x', [[]]) + cycler('y', [[]])) *\n cycler('xerr', [[], None]) * cycler('yerr', [[], None]))\n xerr_only = base_xy * xerr_cy\n yerr_only = base_xy * yerr_cy\n both_err = base_xy * yerr_cy * xerr_cy\n\n return [*xerr_only, *yerr_only, *both_err, *empty]\n\n\[email protected]('kwargs', generate_errorbar_inputs())\ndef test_errorbar_inputs_shotgun(kwargs):\n ax = plt.gca()\n eb = ax.errorbar(**kwargs)\n eb.remove()\n\n\n@image_comparison([\"dash_offset\"], remove_text=True)\ndef test_dash_offset():\n fig, ax = plt.subplots()\n x = np.linspace(0, 10)\n y = np.ones_like(x)\n for j in range(0, 100, 2):\n ax.plot(x, j*y, ls=(j, (10, 10)), lw=5, color='k')\n\n\ndef test_title_pad():\n # check that title padding puts the title in the right\n # place...\n fig, ax = plt.subplots()\n ax.set_title('aardvark', pad=30.)\n m = ax.titleOffsetTrans.get_matrix()\n assert m[1, -1] == (30. / 72. * fig.dpi)\n ax.set_title('aardvark', pad=0.)\n m = ax.titleOffsetTrans.get_matrix()\n assert m[1, -1] == 0.\n # check that it is reverted...\n ax.set_title('aardvark', pad=None)\n m = ax.titleOffsetTrans.get_matrix()\n assert m[1, -1] == (matplotlib.rcParams['axes.titlepad'] / 72. * fig.dpi)\n\n\ndef test_title_location_roundtrip():\n fig, ax = plt.subplots()\n # set default title location\n plt.rcParams['axes.titlelocation'] = 'center'\n ax.set_title('aardvark')\n ax.set_title('left', loc='left')\n ax.set_title('right', loc='right')\n\n assert 'left' == ax.get_title(loc='left')\n assert 'right' == ax.get_title(loc='right')\n assert 'aardvark' == ax.get_title(loc='center')\n\n with pytest.raises(ValueError):\n ax.get_title(loc='foo')\n with pytest.raises(ValueError):\n ax.set_title('fail', loc='foo')\n\n\n@image_comparison([\"loglog.png\"], remove_text=True, tol=0.02)\ndef test_loglog():\n fig, ax = plt.subplots()\n x = np.arange(1, 11)\n ax.loglog(x, x**3, lw=5)\n ax.tick_params(length=25, width=2)\n ax.tick_params(length=15, width=2, which='minor')\n\n\[email protected](\"new_api\", [False, True])\n@image_comparison([\"test_loglog_nonpos.png\"], remove_text=True, style='mpl20')\ndef test_loglog_nonpos(new_api):\n fig, axs = plt.subplots(3, 3)\n x = np.arange(1, 11)\n y = x**3\n y[7] = -3.\n x[4] = -10\n for (i, j), ax in np.ndenumerate(axs):\n mcx = ['mask', 'clip', ''][j]\n mcy = ['mask', 'clip', ''][i]\n if new_api:\n if mcx == mcy:\n if mcx:\n ax.loglog(x, y**3, lw=2, nonpositive=mcx)\n else:\n ax.loglog(x, y**3, lw=2)\n else:\n ax.loglog(x, y**3, lw=2)\n if mcx:\n ax.set_xscale(\"log\", nonpositive=mcx)\n if mcy:\n ax.set_yscale(\"log\", nonpositive=mcy)\n else:\n kws = {}\n if mcx:\n kws['nonposx'] = mcx\n if mcy:\n kws['nonposy'] = mcy\n with (pytest.warns(MatplotlibDeprecationWarning) if kws\n else nullcontext()):\n ax.loglog(x, y**3, lw=2, **kws)\n\n\[email protected]('default')\ndef test_axes_margins():\n fig, ax = plt.subplots()\n ax.plot([0, 1, 2, 3])\n assert ax.get_ybound()[0] != 0\n\n fig, ax = plt.subplots()\n ax.bar([0, 1, 2, 3], [1, 1, 1, 1])\n assert ax.get_ybound()[0] == 0\n\n fig, ax = plt.subplots()\n ax.barh([0, 1, 2, 3], [1, 1, 1, 1])\n assert ax.get_xbound()[0] == 0\n\n fig, ax = plt.subplots()\n ax.pcolor(np.zeros((10, 10)))\n assert ax.get_xbound() == (0, 10)\n assert ax.get_ybound() == (0, 10)\n\n fig, ax = plt.subplots()\n ax.pcolorfast(np.zeros((10, 10)))\n assert ax.get_xbound() == (0, 10)\n assert ax.get_ybound() == (0, 10)\n\n fig, ax = plt.subplots()\n ax.hist(np.arange(10))\n assert ax.get_ybound()[0] == 0\n\n fig, ax = plt.subplots()\n ax.imshow(np.zeros((10, 10)))\n assert ax.get_xbound() == (-0.5, 9.5)\n assert ax.get_ybound() == (-0.5, 9.5)\n\n\[email protected](params=['x', 'y'])\ndef shared_axis_remover(request):\n def _helper_x(ax):\n ax2 = ax.twinx()\n ax2.remove()\n ax.set_xlim(0, 15)\n r = ax.xaxis.get_major_locator()()\n assert r[-1] > 14\n\n def _helper_y(ax):\n ax2 = ax.twiny()\n ax2.remove()\n ax.set_ylim(0, 15)\n r = ax.yaxis.get_major_locator()()\n assert r[-1] > 14\n\n return {\"x\": _helper_x, \"y\": _helper_y}[request.param]\n\n\[email protected](params=['gca', 'subplots', 'subplots_shared', 'add_axes'])\ndef shared_axes_generator(request):\n # test all of the ways to get fig/ax sets\n if request.param == 'gca':\n fig = plt.figure()\n ax = fig.gca()\n elif request.param == 'subplots':\n fig, ax = plt.subplots()\n elif request.param == 'subplots_shared':\n fig, ax_lst = plt.subplots(2, 2, sharex='all', sharey='all')\n ax = ax_lst[0][0]\n elif request.param == 'add_axes':\n fig = plt.figure()\n ax = fig.add_axes([.1, .1, .8, .8])\n return fig, ax\n\n\ndef test_remove_shared_axes(shared_axes_generator, shared_axis_remover):\n # test all of the ways to get fig/ax sets\n fig, ax = shared_axes_generator\n shared_axis_remover(ax)\n\n\ndef test_remove_shared_axes_relim():\n fig, ax_lst = plt.subplots(2, 2, sharex='all', sharey='all')\n ax = ax_lst[0][0]\n orig_xlim = ax_lst[0][1].get_xlim()\n ax.remove()\n ax.set_xlim(0, 5)\n assert_array_equal(ax_lst[0][1].get_xlim(), orig_xlim)\n\n\ndef test_shared_axes_autoscale():\n l = np.arange(-80, 90, 40)\n t = np.random.random_sample((l.size, l.size))\n\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)\n\n ax1.set_xlim(-1000, 1000)\n ax1.set_ylim(-1000, 1000)\n ax1.contour(l, l, t)\n\n ax2.contour(l, l, t)\n assert not ax1.get_autoscalex_on() and not ax2.get_autoscalex_on()\n assert not ax1.get_autoscaley_on() and not ax2.get_autoscaley_on()\n assert ax1.get_xlim() == ax2.get_xlim() == (-1000, 1000)\n assert ax1.get_ylim() == ax2.get_ylim() == (-1000, 1000)\n\n\ndef test_adjust_numtick_aspect():\n fig, ax = plt.subplots()\n ax.yaxis.get_major_locator().set_params(nbins='auto')\n ax.set_xlim(0, 1000)\n ax.set_aspect('equal')\n fig.canvas.draw()\n assert len(ax.yaxis.get_major_locator()()) == 2\n ax.set_ylim(0, 1000)\n fig.canvas.draw()\n assert len(ax.yaxis.get_major_locator()()) > 2\n\n\n@image_comparison([\"auto_numticks.png\"], style='default')\ndef test_auto_numticks():\n # Make tiny, empty subplots, verify that there are only 3 ticks.\n plt.subplots(4, 4)\n\n\n@image_comparison([\"auto_numticks_log.png\"], style='default')\ndef test_auto_numticks_log():\n # Verify that there are not too many ticks with a large log range.\n fig, ax = plt.subplots()\n matplotlib.rcParams['axes.autolimit_mode'] = 'round_numbers'\n ax.loglog([1e-20, 1e5], [1e-16, 10])\n\n\ndef test_broken_barh_empty():\n fig, ax = plt.subplots()\n ax.broken_barh([], (.1, .5))\n\n\ndef test_broken_barh_timedelta():\n \"\"\"Check that timedelta works as x, dx pair for this method.\"\"\"\n fig, ax = plt.subplots()\n d0 = datetime.datetime(2018, 11, 9, 0, 0, 0)\n pp = ax.broken_barh([(d0, datetime.timedelta(hours=1))], [1, 2])\n assert pp.get_paths()[0].vertices[0, 0] == mdates.date2num(d0)\n assert pp.get_paths()[0].vertices[2, 0] == mdates.date2num(d0) + 1 / 24\n\n\ndef test_pandas_pcolormesh(pd):\n time = pd.date_range('2000-01-01', periods=10)\n depth = np.arange(20)\n data = np.random.rand(19, 9)\n\n fig, ax = plt.subplots()\n ax.pcolormesh(time, depth, data)\n\n\ndef test_pandas_indexing_dates(pd):\n dates = np.arange('2005-02', '2005-03', dtype='datetime64[D]')\n values = np.sin(np.array(range(len(dates))))\n df = pd.DataFrame({'dates': dates, 'values': values})\n\n ax = plt.gca()\n\n without_zero_index = df[np.array(df.index) % 2 == 1].copy()\n ax.plot('dates', 'values', data=without_zero_index)\n\n\ndef test_pandas_errorbar_indexing(pd):\n df = pd.DataFrame(np.random.uniform(size=(5, 4)),\n columns=['x', 'y', 'xe', 'ye'],\n index=[1, 2, 3, 4, 5])\n fig, ax = plt.subplots()\n ax.errorbar('x', 'y', xerr='xe', yerr='ye', data=df)\n\n\ndef test_pandas_index_shape(pd):\n df = pd.DataFrame({\"XX\": [4, 5, 6], \"YY\": [7, 1, 2]})\n fig, ax = plt.subplots()\n ax.plot(df.index, df['YY'])\n\n\ndef test_pandas_indexing_hist(pd):\n ser_1 = pd.Series(data=[1, 2, 2, 3, 3, 4, 4, 4, 4, 5])\n ser_2 = ser_1.iloc[1:]\n fig, ax = plt.subplots()\n ax.hist(ser_2)\n\n\ndef test_pandas_bar_align_center(pd):\n # Tests fix for issue 8767\n df = pd.DataFrame({'a': range(2), 'b': range(2)})\n\n fig, ax = plt.subplots(1)\n\n ax.bar(df.loc[df['a'] == 1, 'b'],\n df.loc[df['a'] == 1, 'b'],\n align='center')\n\n fig.canvas.draw()\n\n\ndef test_axis_set_tick_params_labelsize_labelcolor():\n # Tests fix for issue 4346\n axis_1 = plt.subplot()\n axis_1.yaxis.set_tick_params(labelsize=30, labelcolor='red',\n direction='out')\n\n # Expected values after setting the ticks\n assert axis_1.yaxis.majorTicks[0]._size == 4.0\n assert axis_1.yaxis.majorTicks[0].tick1line.get_color() == 'k'\n assert axis_1.yaxis.majorTicks[0].label1.get_size() == 30.0\n assert axis_1.yaxis.majorTicks[0].label1.get_color() == 'red'\n\n\ndef test_axes_tick_params_gridlines():\n # Now treating grid params like other Tick params\n ax = plt.subplot()\n ax.tick_params(grid_color='b', grid_linewidth=5, grid_alpha=0.5,\n grid_linestyle='dashdot')\n for axis in ax.xaxis, ax.yaxis:\n assert axis.majorTicks[0].gridline.get_color() == 'b'\n assert axis.majorTicks[0].gridline.get_linewidth() == 5\n assert axis.majorTicks[0].gridline.get_alpha() == 0.5\n assert axis.majorTicks[0].gridline.get_linestyle() == '-.'\n\n\ndef test_axes_tick_params_ylabelside():\n # Tests fix for issue 10267\n ax = plt.subplot()\n ax.tick_params(labelleft=False, labelright=True,\n which='major')\n ax.tick_params(labelleft=False, labelright=True,\n which='minor')\n # expects left false, right true\n assert ax.yaxis.majorTicks[0].label1.get_visible() is False\n assert ax.yaxis.majorTicks[0].label2.get_visible() is True\n assert ax.yaxis.minorTicks[0].label1.get_visible() is False\n assert ax.yaxis.minorTicks[0].label2.get_visible() is True\n\n\ndef test_axes_tick_params_xlabelside():\n # Tests fix for issue 10267\n ax = plt.subplot()\n ax.tick_params(labeltop=True, labelbottom=False,\n which='major')\n ax.tick_params(labeltop=True, labelbottom=False,\n which='minor')\n # expects top True, bottom False\n # label1.get_visible() mapped to labelbottom\n # label2.get_visible() mapped to labeltop\n assert ax.xaxis.majorTicks[0].label1.get_visible() is False\n assert ax.xaxis.majorTicks[0].label2.get_visible() is True\n assert ax.xaxis.minorTicks[0].label1.get_visible() is False\n assert ax.xaxis.minorTicks[0].label2.get_visible() is True\n\n\ndef test_none_kwargs():\n ax = plt.figure().subplots()\n ln, = ax.plot(range(32), linestyle=None)\n assert ln.get_linestyle() == '-'\n\n\ndef test_bar_uint8():\n xs = [0, 1, 2, 3]\n b = plt.bar(np.array(xs, dtype=np.uint8), [2, 3, 4, 5], align=\"edge\")\n for (patch, x) in zip(b.patches, xs):\n assert patch.xy[0] == x\n\n\n@image_comparison(['date_timezone_x.png'], tol=1.0)\ndef test_date_timezone_x():\n # Tests issue 5575\n time_index = [datetime.datetime(2016, 2, 22, hour=x,\n tzinfo=dateutil.tz.gettz('Canada/Eastern'))\n for x in range(3)]\n\n # Same Timezone\n plt.figure(figsize=(20, 12))\n plt.subplot(2, 1, 1)\n plt.plot_date(time_index, [3] * 3, tz='Canada/Eastern')\n\n # Different Timezone\n plt.subplot(2, 1, 2)\n plt.plot_date(time_index, [3] * 3, tz='UTC')\n\n\n@image_comparison(['date_timezone_y.png'])\ndef test_date_timezone_y():\n # Tests issue 5575\n time_index = [datetime.datetime(2016, 2, 22, hour=x,\n tzinfo=dateutil.tz.gettz('Canada/Eastern'))\n for x in range(3)]\n\n # Same Timezone\n plt.figure(figsize=(20, 12))\n plt.subplot(2, 1, 1)\n plt.plot_date([3] * 3,\n time_index, tz='Canada/Eastern', xdate=False, ydate=True)\n\n # Different Timezone\n plt.subplot(2, 1, 2)\n plt.plot_date([3] * 3, time_index, tz='UTC', xdate=False, ydate=True)\n\n\n@image_comparison(['date_timezone_x_and_y.png'], tol=1.0)\ndef test_date_timezone_x_and_y():\n # Tests issue 5575\n UTC = datetime.timezone.utc\n time_index = [datetime.datetime(2016, 2, 22, hour=x, tzinfo=UTC)\n for x in range(3)]\n\n # Same Timezone\n plt.figure(figsize=(20, 12))\n plt.subplot(2, 1, 1)\n plt.plot_date(time_index, time_index, tz='UTC', ydate=True)\n\n # Different Timezone\n plt.subplot(2, 1, 2)\n plt.plot_date(time_index, time_index, tz='US/Eastern', ydate=True)\n\n\n@image_comparison(['axisbelow.png'], remove_text=True)\ndef test_axisbelow():\n # Test 'line' setting added in 6287.\n # Show only grids, not frame or ticks, to make this test\n # independent of future change to drawing order of those elements.\n axs = plt.figure().subplots(ncols=3, sharex=True, sharey=True)\n settings = (False, 'line', True)\n\n for ax, setting in zip(axs, settings):\n ax.plot((0, 10), (0, 10), lw=10, color='m')\n circ = mpatches.Circle((3, 3), color='r')\n ax.add_patch(circ)\n ax.grid(color='c', linestyle='-', linewidth=3)\n ax.tick_params(top=False, bottom=False,\n left=False, right=False)\n ax.spines[:].set_visible(False)\n ax.set_axisbelow(setting)\n\n\ndef test_titletwiny():\n plt.style.use('mpl20')\n fig, ax = plt.subplots(dpi=72)\n ax2 = ax.twiny()\n xlabel2 = ax2.set_xlabel('Xlabel2')\n title = ax.set_title('Title')\n fig.canvas.draw()\n renderer = fig.canvas.get_renderer()\n # ------- Test that title is put above Xlabel2 (Xlabel2 at top) ----------\n bbox_y0_title = title.get_window_extent(renderer).y0 # bottom of title\n bbox_y1_xlabel2 = xlabel2.get_window_extent(renderer).y1 # top of xlabel2\n y_diff = bbox_y0_title - bbox_y1_xlabel2\n assert np.isclose(y_diff, 3)\n\n\ndef test_titlesetpos():\n # Test that title stays put if we set it manually\n fig, ax = plt.subplots()\n fig.subplots_adjust(top=0.8)\n ax2 = ax.twiny()\n ax.set_xlabel('Xlabel')\n ax2.set_xlabel('Xlabel2')\n ax.set_title('Title')\n pos = (0.5, 1.11)\n ax.title.set_position(pos)\n renderer = fig.canvas.get_renderer()\n ax._update_title_position(renderer)\n assert ax.title.get_position() == pos\n\n\ndef test_title_xticks_top():\n # Test that title moves if xticks on top of axes.\n mpl.rcParams['axes.titley'] = None\n fig, ax = plt.subplots()\n ax.xaxis.set_ticks_position('top')\n ax.set_title('xlabel top')\n fig.canvas.draw()\n assert ax.title.get_position()[1] > 1.04\n\n\ndef test_title_xticks_top_both():\n # Test that title moves if xticks on top of axes.\n mpl.rcParams['axes.titley'] = None\n fig, ax = plt.subplots()\n ax.tick_params(axis=\"x\",\n bottom=True, top=True, labelbottom=True, labeltop=True)\n ax.set_title('xlabel top')\n fig.canvas.draw()\n assert ax.title.get_position()[1] > 1.04\n\n\ndef test_title_no_move_off_page():\n # If an axes is off the figure (ie. if it is cropped during a save)\n # make sure that the automatic title repositioning does not get done.\n mpl.rcParams['axes.titley'] = None\n fig = plt.figure()\n ax = fig.add_axes([0.1, -0.5, 0.8, 0.2])\n ax.tick_params(axis=\"x\",\n bottom=True, top=True, labelbottom=True, labeltop=True)\n tt = ax.set_title('Boo')\n fig.canvas.draw()\n assert tt.get_position()[1] == 1.0\n\n\ndef test_offset_label_color():\n # Tests issue 6440\n fig, ax = plt.subplots()\n ax.plot([1.01e9, 1.02e9, 1.03e9])\n ax.yaxis.set_tick_params(labelcolor='red')\n assert ax.yaxis.get_offset_text().get_color() == 'red'\n\n\ndef test_offset_text_visible():\n fig, ax = plt.subplots()\n ax.plot([1.01e9, 1.02e9, 1.03e9])\n ax.yaxis.set_tick_params(label1On=False, label2On=True)\n assert ax.yaxis.get_offset_text().get_visible()\n ax.yaxis.set_tick_params(label2On=False)\n assert not ax.yaxis.get_offset_text().get_visible()\n\n\ndef test_large_offset():\n fig, ax = plt.subplots()\n ax.plot((1 + np.array([0, 1.e-12])) * 1.e27)\n fig.canvas.draw()\n\n\ndef test_barb_units():\n fig, ax = plt.subplots()\n dates = [datetime.datetime(2017, 7, 15, 18, i) for i in range(0, 60, 10)]\n y = np.linspace(0, 5, len(dates))\n u = v = np.linspace(0, 50, len(dates))\n ax.barbs(dates, y, u, v)\n\n\ndef test_quiver_units():\n fig, ax = plt.subplots()\n dates = [datetime.datetime(2017, 7, 15, 18, i) for i in range(0, 60, 10)]\n y = np.linspace(0, 5, len(dates))\n u = v = np.linspace(0, 50, len(dates))\n ax.quiver(dates, y, u, v)\n\n\ndef test_bar_color_cycle():\n to_rgb = mcolors.to_rgb\n fig, ax = plt.subplots()\n for j in range(5):\n ln, = ax.plot(range(3))\n brs = ax.bar(range(3), range(3))\n for br in brs:\n assert to_rgb(ln.get_color()) == to_rgb(br.get_facecolor())\n\n\ndef test_tick_param_label_rotation():\n fix, (ax, ax2) = plt.subplots(1, 2)\n ax.plot([0, 1], [0, 1])\n ax2.plot([0, 1], [0, 1])\n ax.xaxis.set_tick_params(which='both', rotation=75)\n ax.yaxis.set_tick_params(which='both', rotation=90)\n for text in ax.get_xticklabels(which='both'):\n assert text.get_rotation() == 75\n for text in ax.get_yticklabels(which='both'):\n assert text.get_rotation() == 90\n\n ax2.tick_params(axis='x', labelrotation=53)\n ax2.tick_params(axis='y', rotation=35)\n for text in ax2.get_xticklabels(which='major'):\n assert text.get_rotation() == 53\n for text in ax2.get_yticklabels(which='major'):\n assert text.get_rotation() == 35\n\n\[email protected]('default')\ndef test_fillbetween_cycle():\n fig, ax = plt.subplots()\n\n for j in range(3):\n cc = ax.fill_between(range(3), range(3))\n target = mcolors.to_rgba('C{}'.format(j))\n assert tuple(cc.get_facecolors().squeeze()) == tuple(target)\n\n for j in range(3, 6):\n cc = ax.fill_betweenx(range(3), range(3))\n target = mcolors.to_rgba('C{}'.format(j))\n assert tuple(cc.get_facecolors().squeeze()) == tuple(target)\n\n target = mcolors.to_rgba('k')\n\n for al in ['facecolor', 'facecolors', 'color']:\n cc = ax.fill_between(range(3), range(3), **{al: 'k'})\n assert tuple(cc.get_facecolors().squeeze()) == tuple(target)\n\n edge_target = mcolors.to_rgba('k')\n for j, el in enumerate(['edgecolor', 'edgecolors'], start=6):\n cc = ax.fill_between(range(3), range(3), **{el: 'k'})\n face_target = mcolors.to_rgba('C{}'.format(j))\n assert tuple(cc.get_facecolors().squeeze()) == tuple(face_target)\n assert tuple(cc.get_edgecolors().squeeze()) == tuple(edge_target)\n\n\ndef test_log_margins():\n plt.rcParams['axes.autolimit_mode'] = 'data'\n fig, ax = plt.subplots()\n margin = 0.05\n ax.set_xmargin(margin)\n ax.semilogx([10, 100], [10, 100])\n xlim0, xlim1 = ax.get_xlim()\n transform = ax.xaxis.get_transform()\n xlim0t, xlim1t = transform.transform([xlim0, xlim1])\n x0t, x1t = transform.transform([10, 100])\n delta = (x1t - x0t) * margin\n assert_allclose([xlim0t + delta, xlim1t - delta], [x0t, x1t])\n\n\ndef test_color_length_mismatch():\n N = 5\n x, y = np.arange(N), np.arange(N)\n colors = np.arange(N+1)\n fig, ax = plt.subplots()\n with pytest.raises(ValueError):\n ax.scatter(x, y, c=colors)\n c_rgb = (0.5, 0.5, 0.5)\n ax.scatter(x, y, c=c_rgb)\n ax.scatter(x, y, c=[c_rgb] * N)\n\n\ndef test_eventplot_legend():\n plt.eventplot([1.0], label='Label')\n plt.legend()\n\n\ndef test_bar_broadcast_args():\n fig, ax = plt.subplots()\n # Check that a bar chart with a single height for all bars works.\n ax.bar(range(4), 1)\n # Check that a horizontal chart with one width works.\n ax.barh(0, 1, left=range(4), height=1)\n # Check that edgecolor gets broadcast.\n rect1, rect2 = ax.bar([0, 1], [0, 1], edgecolor=(.1, .2, .3, .4))\n assert rect1.get_edgecolor() == rect2.get_edgecolor() == (.1, .2, .3, .4)\n\n\ndef test_invalid_axis_limits():\n plt.plot([0, 1], [0, 1])\n with pytest.raises(ValueError):\n plt.xlim(np.nan)\n with pytest.raises(ValueError):\n plt.xlim(np.inf)\n with pytest.raises(ValueError):\n plt.ylim(np.nan)\n with pytest.raises(ValueError):\n plt.ylim(np.inf)\n\n\n# Test all 4 combinations of logs/symlogs for minorticks_on()\[email protected]('xscale', ['symlog', 'log'])\[email protected]('yscale', ['symlog', 'log'])\ndef test_minorticks_on(xscale, yscale):\n ax = plt.subplot()\n ax.plot([1, 2, 3, 4])\n ax.set_xscale(xscale)\n ax.set_yscale(yscale)\n ax.minorticks_on()\n\n\ndef test_twinx_knows_limits():\n fig, ax = plt.subplots()\n\n ax.axvspan(1, 2)\n xtwin = ax.twinx()\n xtwin.plot([0, 0.5], [1, 2])\n # control axis\n fig2, ax2 = plt.subplots()\n\n ax2.axvspan(1, 2)\n ax2.plot([0, 0.5], [1, 2])\n\n assert_array_equal(xtwin.viewLim.intervalx, ax2.viewLim.intervalx)\n\n\ndef test_zero_linewidth():\n # Check that setting a zero linewidth doesn't error\n plt.plot([0, 1], [0, 1], ls='--', lw=0)\n\n\ndef test_empty_errorbar_legend():\n fig, ax = plt.subplots()\n ax.errorbar([], [], xerr=[], label='empty y')\n ax.errorbar([], [], yerr=[], label='empty x')\n ax.legend()\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_plot_decimal(fig_test, fig_ref):\n x0 = np.arange(-10, 10, 0.3)\n y0 = [5.2 * x ** 3 - 2.1 * x ** 2 + 7.34 * x + 4.5 for x in x0]\n x = [Decimal(i) for i in x0]\n y = [Decimal(i) for i in y0]\n # Test image - line plot with Decimal input\n fig_test.subplots().plot(x, y)\n # Reference image\n fig_ref.subplots().plot(x0, y0)\n\n\n# pdf and svg tests fail using travis' old versions of gs and inkscape.\n@check_figures_equal(extensions=[\"png\"])\ndef test_markerfacecolor_none_alpha(fig_test, fig_ref):\n fig_test.subplots().plot(0, \"o\", mfc=\"none\", alpha=.5)\n fig_ref.subplots().plot(0, \"o\", mfc=\"w\", alpha=.5)\n\n\ndef test_tick_padding_tightbbox():\n \"\"\"Test that tick padding gets turned off if axis is off\"\"\"\n plt.rcParams[\"xtick.direction\"] = \"out\"\n plt.rcParams[\"ytick.direction\"] = \"out\"\n fig, ax = plt.subplots()\n bb = ax.get_tightbbox(fig.canvas.get_renderer())\n ax.axis('off')\n bb2 = ax.get_tightbbox(fig.canvas.get_renderer())\n assert bb.x0 < bb2.x0\n assert bb.y0 < bb2.y0\n\n\ndef test_inset():\n \"\"\"\n Ensure that inset_ax argument is indeed optional\n \"\"\"\n dx, dy = 0.05, 0.05\n # generate 2 2d grids for the x & y bounds\n y, x = np.mgrid[slice(1, 5 + dy, dy),\n slice(1, 5 + dx, dx)]\n z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)\n\n fig, ax = plt.subplots()\n ax.pcolormesh(x, y, z[:-1, :-1])\n ax.set_aspect(1.)\n ax.apply_aspect()\n # we need to apply_aspect to make the drawing below work.\n\n xlim = [1.5, 2.15]\n ylim = [2, 2.5]\n\n rect = [xlim[0], ylim[0], xlim[1] - xlim[0], ylim[1] - ylim[0]]\n\n rec, connectors = ax.indicate_inset(bounds=rect)\n assert connectors is None\n fig.canvas.draw()\n xx = np.array([[1.5, 2.],\n [2.15, 2.5]])\n assert np.all(rec.get_bbox().get_points() == xx)\n\n\ndef test_zoom_inset():\n dx, dy = 0.05, 0.05\n # generate 2 2d grids for the x & y bounds\n y, x = np.mgrid[slice(1, 5 + dy, dy),\n slice(1, 5 + dx, dx)]\n z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)\n\n fig, ax = plt.subplots()\n ax.pcolormesh(x, y, z[:-1, :-1])\n ax.set_aspect(1.)\n ax.apply_aspect()\n # we need to apply_aspect to make the drawing below work.\n\n # Make the inset_axes... Position axes coordinates...\n axin1 = ax.inset_axes([0.7, 0.7, 0.35, 0.35])\n # redraw the data in the inset axes...\n axin1.pcolormesh(x, y, z[:-1, :-1])\n axin1.set_xlim([1.5, 2.15])\n axin1.set_ylim([2, 2.5])\n axin1.set_aspect(ax.get_aspect())\n\n rec, connectors = ax.indicate_inset_zoom(axin1)\n assert len(connectors) == 4\n fig.canvas.draw()\n xx = np.array([[1.5, 2.],\n [2.15, 2.5]])\n assert(np.all(rec.get_bbox().get_points() == xx))\n xx = np.array([[0.6325, 0.692308],\n [0.8425, 0.907692]])\n np.testing.assert_allclose(\n axin1.get_position().get_points(), xx, rtol=1e-4)\n\n\[email protected]('x_inverted', [False, True])\[email protected]('y_inverted', [False, True])\ndef test_indicate_inset_inverted(x_inverted, y_inverted):\n \"\"\"\n Test that the inset lines are correctly located with inverted data axes.\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(1, 2)\n\n x = np.arange(10)\n ax1.plot(x, x, 'o')\n if x_inverted:\n ax1.invert_xaxis()\n if y_inverted:\n ax1.invert_yaxis()\n\n rect, bounds = ax1.indicate_inset([2, 2, 5, 4], ax2)\n lower_left, upper_left, lower_right, upper_right = bounds\n\n sign_x = -1 if x_inverted else 1\n sign_y = -1 if y_inverted else 1\n assert sign_x * (lower_right.xy2[0] - lower_left.xy2[0]) > 0\n assert sign_x * (upper_right.xy2[0] - upper_left.xy2[0]) > 0\n assert sign_y * (upper_left.xy2[1] - lower_left.xy2[1]) > 0\n assert sign_y * (upper_right.xy2[1] - lower_right.xy2[1]) > 0\n\n\ndef test_set_position():\n fig, ax = plt.subplots()\n ax.set_aspect(3.)\n ax.set_position([0.1, 0.1, 0.4, 0.4], which='both')\n assert np.allclose(ax.get_position().width, 0.1)\n ax.set_aspect(2.)\n ax.set_position([0.1, 0.1, 0.4, 0.4], which='original')\n assert np.allclose(ax.get_position().width, 0.15)\n ax.set_aspect(3.)\n ax.set_position([0.1, 0.1, 0.4, 0.4], which='active')\n assert np.allclose(ax.get_position().width, 0.1)\n\n\ndef test_spines_properbbox_after_zoom():\n fig, ax = plt.subplots()\n bb = ax.spines.bottom.get_window_extent(fig.canvas.get_renderer())\n # this is what zoom calls:\n ax._set_view_from_bbox((320, 320, 500, 500), 'in',\n None, False, False)\n bb2 = ax.spines.bottom.get_window_extent(fig.canvas.get_renderer())\n np.testing.assert_allclose(bb.get_points(), bb2.get_points(), rtol=1e-6)\n\n\ndef test_cartopy_backcompat():\n\n class Dummy(matplotlib.axes.Axes):\n ...\n\n class DummySubplot(matplotlib.axes.SubplotBase, Dummy):\n _axes_class = Dummy\n\n matplotlib.axes._subplots._subplot_classes[Dummy] = DummySubplot\n\n FactoryDummySubplot = matplotlib.axes.subplot_class_factory(Dummy)\n\n assert DummySubplot is FactoryDummySubplot\n\n\ndef test_gettightbbox_ignore_nan():\n fig, ax = plt.subplots()\n remove_ticks_and_titles(fig)\n ax.text(np.NaN, 1, 'Boo')\n renderer = fig.canvas.get_renderer()\n np.testing.assert_allclose(ax.get_tightbbox(renderer).width, 496)\n\n\ndef test_scatter_series_non_zero_index(pd):\n # create non-zero index\n ids = range(10, 18)\n x = pd.Series(np.random.uniform(size=8), index=ids)\n y = pd.Series(np.random.uniform(size=8), index=ids)\n c = pd.Series([1, 1, 1, 1, 1, 0, 0, 0], index=ids)\n plt.scatter(x, y, c)\n\n\ndef test_scatter_empty_data():\n # making sure this does not raise an exception\n plt.scatter([], [])\n plt.scatter([], [], s=[], c=[])\n\n\n@image_comparison(['annotate_across_transforms.png'],\n style='mpl20', remove_text=True)\ndef test_annotate_across_transforms():\n x = np.linspace(0, 10, 200)\n y = np.exp(-x) * np.sin(x)\n\n fig, ax = plt.subplots(figsize=(3.39, 3))\n ax.plot(x, y)\n axins = ax.inset_axes([0.4, 0.5, 0.3, 0.3])\n axins.set_aspect(0.2)\n axins.xaxis.set_visible(False)\n axins.yaxis.set_visible(False)\n ax.annotate(\"\", xy=(x[150], y[150]), xycoords=ax.transData,\n xytext=(1, 0), textcoords=axins.transAxes,\n arrowprops=dict(arrowstyle=\"->\"))\n\n\n@image_comparison(['secondary_xy.png'], style='mpl20')\ndef test_secondary_xy():\n fig, axs = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)\n\n def invert(x):\n with np.errstate(divide='ignore'):\n return 1 / x\n\n for nn, ax in enumerate(axs):\n ax.plot(np.arange(2, 11), np.arange(2, 11))\n if nn == 0:\n secax = ax.secondary_xaxis\n else:\n secax = ax.secondary_yaxis\n\n secax(0.2, functions=(invert, invert))\n secax(0.4, functions=(lambda x: 2 * x, lambda x: x / 2))\n secax(0.6, functions=(lambda x: x**2, lambda x: x**(1/2)))\n secax(0.8)\n\n\ndef test_secondary_fail():\n fig, ax = plt.subplots()\n ax.plot(np.arange(2, 11), np.arange(2, 11))\n with pytest.raises(ValueError):\n ax.secondary_xaxis(0.2, functions=(lambda x: 1 / x))\n with pytest.raises(ValueError):\n ax.secondary_xaxis('right')\n with pytest.raises(ValueError):\n ax.secondary_yaxis('bottom')\n\n\ndef test_secondary_resize():\n fig, ax = plt.subplots(figsize=(10, 5))\n ax.plot(np.arange(2, 11), np.arange(2, 11))\n\n def invert(x):\n with np.errstate(divide='ignore'):\n return 1 / x\n\n ax.secondary_xaxis('top', functions=(invert, invert))\n fig.canvas.draw()\n fig.set_size_inches((7, 4))\n assert_allclose(ax.get_position().extents, [0.125, 0.1, 0.9, 0.9])\n\n\ndef test_secondary_minorloc():\n fig, ax = plt.subplots(figsize=(10, 5))\n ax.plot(np.arange(2, 11), np.arange(2, 11))\n\n def invert(x):\n with np.errstate(divide='ignore'):\n return 1 / x\n\n secax = ax.secondary_xaxis('top', functions=(invert, invert))\n assert isinstance(secax._axis.get_minor_locator(),\n mticker.NullLocator)\n secax.minorticks_on()\n assert isinstance(secax._axis.get_minor_locator(),\n mticker.AutoMinorLocator)\n ax.set_xscale('log')\n plt.draw()\n assert isinstance(secax._axis.get_minor_locator(),\n mticker.LogLocator)\n ax.set_xscale('linear')\n plt.draw()\n assert isinstance(secax._axis.get_minor_locator(),\n mticker.NullLocator)\n\n\ndef test_secondary_formatter():\n fig, ax = plt.subplots()\n ax.set_xscale(\"log\")\n secax = ax.secondary_xaxis(\"top\")\n secax.xaxis.set_major_formatter(mticker.ScalarFormatter())\n fig.canvas.draw()\n assert isinstance(\n secax.xaxis.get_major_formatter(), mticker.ScalarFormatter)\n\n\ndef color_boxes(fig, axs):\n \"\"\"\n Helper for the tests below that test the extents of various axes elements\n \"\"\"\n fig.canvas.draw()\n\n renderer = fig.canvas.get_renderer()\n bbaxis = []\n for nn, axx in enumerate([axs.xaxis, axs.yaxis]):\n bb = axx.get_tightbbox(renderer)\n if bb:\n axisr = plt.Rectangle(\n (bb.x0, bb.y0), width=bb.width, height=bb.height,\n linewidth=0.7, edgecolor='y', facecolor=\"none\", transform=None,\n zorder=3)\n fig.add_artist(axisr)\n bbaxis += [bb]\n\n bbspines = []\n for nn, a in enumerate(['bottom', 'top', 'left', 'right']):\n bb = axs.spines[a].get_window_extent(renderer)\n spiner = plt.Rectangle(\n (bb.x0, bb.y0), width=bb.width, height=bb.height,\n linewidth=0.7, edgecolor=\"green\", facecolor=\"none\", transform=None,\n zorder=3)\n fig.add_artist(spiner)\n bbspines += [bb]\n\n bb = axs.get_window_extent()\n rect2 = plt.Rectangle(\n (bb.x0, bb.y0), width=bb.width, height=bb.height,\n linewidth=1.5, edgecolor=\"magenta\", facecolor=\"none\", transform=None,\n zorder=2)\n fig.add_artist(rect2)\n bbax = bb\n\n bb2 = axs.get_tightbbox(renderer)\n rect2 = plt.Rectangle(\n (bb2.x0, bb2.y0), width=bb2.width, height=bb2.height,\n linewidth=3, edgecolor=\"red\", facecolor=\"none\", transform=None,\n zorder=1)\n fig.add_artist(rect2)\n bbtb = bb2\n return bbaxis, bbspines, bbax, bbtb\n\n\ndef test_normal_axes():\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots(dpi=200, figsize=(6, 6))\n fig.canvas.draw()\n plt.close(fig)\n bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)\n\n # test the axis bboxes\n target = [\n [123.375, 75.88888888888886, 983.25, 33.0],\n [85.51388888888889, 99.99999999999997, 53.375, 993.0]\n ]\n for nn, b in enumerate(bbaxis):\n targetbb = mtransforms.Bbox.from_bounds(*target[nn])\n assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)\n\n target = [\n [150.0, 119.999, 930.0, 11.111],\n [150.0, 1080.0, 930.0, 0.0],\n [150.0, 119.9999, 11.111, 960.0],\n [1068.8888, 119.9999, 11.111, 960.0]\n ]\n for nn, b in enumerate(bbspines):\n targetbb = mtransforms.Bbox.from_bounds(*target[nn])\n assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)\n\n target = [150.0, 119.99999999999997, 930.0, 960.0]\n targetbb = mtransforms.Bbox.from_bounds(*target)\n assert_array_almost_equal(bbax.bounds, targetbb.bounds, decimal=2)\n\n target = [85.5138, 75.88888, 1021.11, 1017.11]\n targetbb = mtransforms.Bbox.from_bounds(*target)\n assert_array_almost_equal(bbtb.bounds, targetbb.bounds, decimal=2)\n\n # test that get_position roundtrips to get_window_extent\n axbb = ax.get_position().transformed(fig.transFigure).bounds\n assert_array_almost_equal(axbb, ax.get_window_extent().bounds, decimal=2)\n\n\ndef test_nodecorator():\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots(dpi=200, figsize=(6, 6))\n fig.canvas.draw()\n ax.set(xticklabels=[], yticklabels=[])\n bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)\n\n # test the axis bboxes\n for nn, b in enumerate(bbaxis):\n assert b is None\n\n target = [\n [150.0, 119.999, 930.0, 11.111],\n [150.0, 1080.0, 930.0, 0.0],\n [150.0, 119.9999, 11.111, 960.0],\n [1068.8888, 119.9999, 11.111, 960.0]\n ]\n for nn, b in enumerate(bbspines):\n targetbb = mtransforms.Bbox.from_bounds(*target[nn])\n assert_allclose(b.bounds, targetbb.bounds, atol=1e-2)\n\n target = [150.0, 119.99999999999997, 930.0, 960.0]\n targetbb = mtransforms.Bbox.from_bounds(*target)\n assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)\n\n target = [150., 120., 930., 960.]\n targetbb = mtransforms.Bbox.from_bounds(*target)\n assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)\n\n\ndef test_displaced_spine():\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots(dpi=200, figsize=(6, 6))\n ax.set(xticklabels=[], yticklabels=[])\n ax.spines.bottom.set_position(('axes', -0.1))\n fig.canvas.draw()\n bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)\n\n targets = [\n [150., 24., 930., 11.111111],\n [150.0, 1080.0, 930.0, 0.0],\n [150.0, 119.9999, 11.111, 960.0],\n [1068.8888, 119.9999, 11.111, 960.0]\n ]\n for target, bbspine in zip(targets, bbspines):\n targetbb = mtransforms.Bbox.from_bounds(*target)\n assert_allclose(bbspine.bounds, targetbb.bounds, atol=1e-2)\n\n target = [150.0, 119.99999999999997, 930.0, 960.0]\n targetbb = mtransforms.Bbox.from_bounds(*target)\n assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)\n\n target = [150., 24., 930., 1056.]\n targetbb = mtransforms.Bbox.from_bounds(*target)\n assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)\n\n\ndef test_tickdirs():\n \"\"\"\n Switch the tickdirs and make sure the bboxes switch with them\n \"\"\"\n targets = [[[150.0, 120.0, 930.0, 11.1111],\n [150.0, 120.0, 11.111, 960.0]],\n [[150.0, 108.8889, 930.0, 11.111111111111114],\n [138.889, 120, 11.111, 960.0]],\n [[150.0, 114.44444444444441, 930.0, 11.111111111111114],\n [144.44444444444446, 119.999, 11.111, 960.0]]]\n for dnum, dirs in enumerate(['in', 'out', 'inout']):\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots(dpi=200, figsize=(6, 6))\n ax.tick_params(direction=dirs)\n fig.canvas.draw()\n bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)\n for nn, num in enumerate([0, 2]):\n targetbb = mtransforms.Bbox.from_bounds(*targets[dnum][nn])\n assert_allclose(\n bbspines[num].bounds, targetbb.bounds, atol=1e-2)\n\n\ndef test_minor_accountedfor():\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots(dpi=200, figsize=(6, 6))\n fig.canvas.draw()\n ax.tick_params(which='both', direction='out')\n\n bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)\n bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)\n targets = [[150.0, 108.88888888888886, 930.0, 11.111111111111114],\n [138.8889, 119.9999, 11.1111, 960.0]]\n for n in range(2):\n targetbb = mtransforms.Bbox.from_bounds(*targets[n])\n assert_allclose(\n bbspines[n * 2].bounds, targetbb.bounds, atol=1e-2)\n\n fig, ax = plt.subplots(dpi=200, figsize=(6, 6))\n fig.canvas.draw()\n ax.tick_params(which='both', direction='out')\n ax.minorticks_on()\n ax.tick_params(axis='both', which='minor', length=30)\n fig.canvas.draw()\n bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)\n targets = [[150.0, 36.66666666666663, 930.0, 83.33333333333334],\n [66.6667, 120.0, 83.3333, 960.0]]\n\n for n in range(2):\n targetbb = mtransforms.Bbox.from_bounds(*targets[n])\n assert_allclose(\n bbspines[n * 2].bounds, targetbb.bounds, atol=1e-2)\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_axis_bool_arguments(fig_test, fig_ref):\n # Test if False and \"off\" give the same\n fig_test.add_subplot(211).axis(False)\n fig_ref.add_subplot(211).axis(\"off\")\n # Test if True after False gives the same as \"on\"\n ax = fig_test.add_subplot(212)\n ax.axis(False)\n ax.axis(True)\n fig_ref.add_subplot(212).axis(\"on\")\n\n\ndef test_axis_extent_arg():\n fig, ax = plt.subplots()\n xmin = 5\n xmax = 10\n ymin = 15\n ymax = 20\n extent = ax.axis([xmin, xmax, ymin, ymax])\n\n # test that the docstring is correct\n assert tuple(extent) == (xmin, xmax, ymin, ymax)\n\n # test that limits were set per the docstring\n assert (xmin, xmax) == ax.get_xlim()\n assert (ymin, ymax) == ax.get_ylim()\n\n\ndef test_datetime_masked():\n # make sure that all-masked data falls back to the viewlim\n # set in convert.axisinfo....\n x = np.array([datetime.datetime(2017, 1, n) for n in range(1, 6)])\n y = np.array([1, 2, 3, 4, 5])\n m = np.ma.masked_greater(y, 0)\n\n fig, ax = plt.subplots()\n ax.plot(x, m)\n dt = mdates.date2num(np.datetime64('0000-12-31'))\n assert ax.get_xlim() == (730120.0 + dt, 733773.0 + dt)\n\n\ndef test_hist_auto_bins():\n _, bins, _ = plt.hist([[1, 2, 3], [3, 4, 5, 6]], bins='auto')\n assert bins[0] <= 1\n assert bins[-1] >= 6\n\n\ndef test_hist_nan_data():\n fig, (ax1, ax2) = plt.subplots(2)\n\n data = [1, 2, 3]\n nan_data = data + [np.nan]\n\n bins, edges, _ = ax1.hist(data)\n with np.errstate(invalid='ignore'):\n nanbins, nanedges, _ = ax2.hist(nan_data)\n\n np.testing.assert_allclose(bins, nanbins)\n np.testing.assert_allclose(edges, nanedges)\n\n\ndef test_hist_range_and_density():\n _, bins, _ = plt.hist(np.random.rand(10), \"auto\",\n range=(0, 1), density=True)\n assert bins[0] == 0\n assert bins[-1] == 1\n\n\ndef test_bar_errbar_zorder():\n # Check that the zorder of errorbars is always greater than the bar they\n # are plotted on\n fig, ax = plt.subplots()\n x = [1, 2, 3]\n barcont = ax.bar(x=x, height=x, yerr=x, capsize=5, zorder=3)\n\n data_line, caplines, barlinecols = barcont.errorbar.lines\n for bar in barcont.patches:\n for capline in caplines:\n assert capline.zorder > bar.zorder\n for barlinecol in barlinecols:\n assert barlinecol.zorder > bar.zorder\n\n\ndef test_set_ticks_inverted():\n fig, ax = plt.subplots()\n ax.invert_xaxis()\n ax.set_xticks([.3, .7])\n assert ax.get_xlim() == (1, 0)\n\n\ndef test_aspect_nonlinear_adjustable_box():\n fig = plt.figure(figsize=(10, 10)) # Square.\n\n ax = fig.add_subplot()\n ax.plot([.4, .6], [.4, .6]) # Set minpos to keep logit happy.\n ax.set(xscale=\"log\", xlim=(1, 10),\n yscale=\"logit\", ylim=(1/11, 1/1001),\n aspect=1, adjustable=\"box\")\n ax.margins(0)\n pos = fig.transFigure.transform_bbox(ax.get_position())\n assert pos.height / pos.width == pytest.approx(2)\n\n\ndef test_aspect_nonlinear_adjustable_datalim():\n fig = plt.figure(figsize=(10, 10)) # Square.\n\n ax = fig.add_axes([.1, .1, .8, .8]) # Square.\n ax.plot([.4, .6], [.4, .6]) # Set minpos to keep logit happy.\n ax.set(xscale=\"log\", xlim=(1, 100),\n yscale=\"logit\", ylim=(1 / 101, 1 / 11),\n aspect=1, adjustable=\"datalim\")\n ax.margins(0)\n ax.apply_aspect()\n\n assert ax.get_xlim() == pytest.approx([1*10**(1/2), 100/10**(1/2)])\n assert ax.get_ylim() == (1 / 101, 1 / 11)\n\n\ndef test_box_aspect():\n # Test if axes with box_aspect=1 has same dimensions\n # as axes with aspect equal and adjustable=\"box\"\n\n fig1, ax1 = plt.subplots()\n axtwin = ax1.twinx()\n axtwin.plot([12, 344])\n\n ax1.set_box_aspect(1)\n\n fig2, ax2 = plt.subplots()\n ax2.margins(0)\n ax2.plot([0, 2], [6, 8])\n ax2.set_aspect(\"equal\", adjustable=\"box\")\n\n fig1.canvas.draw()\n fig2.canvas.draw()\n\n bb1 = ax1.get_position()\n bbt = axtwin.get_position()\n bb2 = ax2.get_position()\n\n assert_array_equal(bb1.extents, bb2.extents)\n assert_array_equal(bbt.extents, bb2.extents)\n\n\ndef test_box_aspect_custom_position():\n # Test if axes with custom position and box_aspect\n # behaves the same independent of the order of setting those.\n\n fig1, ax1 = plt.subplots()\n ax1.set_position([0.1, 0.1, 0.9, 0.2])\n fig1.canvas.draw()\n ax1.set_box_aspect(1.)\n\n fig2, ax2 = plt.subplots()\n ax2.set_box_aspect(1.)\n fig2.canvas.draw()\n ax2.set_position([0.1, 0.1, 0.9, 0.2])\n\n fig1.canvas.draw()\n fig2.canvas.draw()\n\n bb1 = ax1.get_position()\n bb2 = ax2.get_position()\n\n assert_array_equal(bb1.extents, bb2.extents)\n\n\ndef test_bbox_aspect_axes_init():\n # Test that box_aspect can be given to axes init and produces\n # all equal square axes.\n fig, axs = plt.subplots(2, 3, subplot_kw=dict(box_aspect=1),\n constrained_layout=True)\n fig.canvas.draw()\n renderer = fig.canvas.get_renderer()\n sizes = []\n for ax in axs.flat:\n bb = ax.get_window_extent(renderer)\n sizes.extend([bb.width, bb.height])\n\n assert_allclose(sizes, sizes[0])\n\n\ndef test_redraw_in_frame():\n fig, ax = plt.subplots(1, 1)\n ax.plot([1, 2, 3])\n fig.canvas.draw()\n ax.redraw_in_frame()\n\n\ndef test_invisible_axes():\n # invisible axes should not respond to events...\n fig, ax = plt.subplots()\n assert fig.canvas.inaxes((200, 200)) is not None\n ax.set_visible(False)\n assert fig.canvas.inaxes((200, 200)) is None\n\n\ndef test_xtickcolor_is_not_markercolor():\n plt.rcParams['lines.markeredgecolor'] = 'white'\n ax = plt.axes()\n ticks = ax.xaxis.get_major_ticks()\n for tick in ticks:\n assert tick.tick1line.get_markeredgecolor() != 'white'\n\n\ndef test_ytickcolor_is_not_markercolor():\n plt.rcParams['lines.markeredgecolor'] = 'white'\n ax = plt.axes()\n ticks = ax.yaxis.get_major_ticks()\n for tick in ticks:\n assert tick.tick1line.get_markeredgecolor() != 'white'\n\n\[email protected]('axis', ('x', 'y'))\[email protected]('auto', (True, False, None))\ndef test_unautoscale(axis, auto):\n fig, ax = plt.subplots()\n x = np.arange(100)\n y = np.linspace(-.1, .1, 100)\n ax.scatter(y, x)\n\n get_autoscale_on = getattr(ax, f'get_autoscale{axis}_on')\n set_lim = getattr(ax, f'set_{axis}lim')\n get_lim = getattr(ax, f'get_{axis}lim')\n\n post_auto = get_autoscale_on() if auto is None else auto\n\n set_lim((-0.5, 0.5), auto=auto)\n assert post_auto == get_autoscale_on()\n fig.canvas.draw()\n assert_array_equal(get_lim(), (-0.5, 0.5))\n\n\n@check_figures_equal(extensions=[\"png\"])\ndef test_polar_interpolation_steps_variable_r(fig_test, fig_ref):\n l, = fig_test.add_subplot(projection=\"polar\").plot([0, np.pi/2], [1, 2])\n l.get_path()._interpolation_steps = 100\n fig_ref.add_subplot(projection=\"polar\").plot(\n np.linspace(0, np.pi/2, 101), np.linspace(1, 2, 101))\n\n\[email protected]('default')\ndef test_autoscale_tiny_sticky():\n fig, ax = plt.subplots()\n ax.bar(0, 1e-9)\n fig.canvas.draw()\n assert ax.get_ylim() == (0, 1.05e-9)\n\n\ndef test_xtickcolor_is_not_xticklabelcolor():\n plt.rcParams['xtick.color'] = 'yellow'\n plt.rcParams['xtick.labelcolor'] = 'blue'\n ax = plt.axes()\n ticks = ax.xaxis.get_major_ticks()\n for tick in ticks:\n assert tick.tick1line.get_color() == 'yellow'\n assert tick.label1.get_color() == 'blue'\n\n\ndef test_ytickcolor_is_not_yticklabelcolor():\n plt.rcParams['ytick.color'] = 'yellow'\n plt.rcParams['ytick.labelcolor'] = 'blue'\n ax = plt.axes()\n ticks = ax.yaxis.get_major_ticks()\n for tick in ticks:\n assert tick.tick1line.get_color() == 'yellow'\n assert tick.label1.get_color() == 'blue'\n\n\[email protected]('size', [size for size in mfont_manager.font_scalings\n if size is not None] + [8, 10, 12])\[email protected]('default')\ndef test_relative_ticklabel_sizes(size):\n mpl.rcParams['xtick.labelsize'] = size\n mpl.rcParams['ytick.labelsize'] = size\n fig, ax = plt.subplots()\n fig.canvas.draw()\n\n for name, axis in zip(['x', 'y'], [ax.xaxis, ax.yaxis]):\n for tick in axis.get_major_ticks():\n assert tick.label1.get_size() == axis._get_tick_label_size(name)\n\n\ndef test_multiplot_autoscale():\n fig = plt.figure()\n ax1, ax2 = fig.subplots(2, 1, sharex='all')\n ax1.scatter([1, 2, 3, 4], [2, 3, 2, 3])\n ax2.axhspan(-5, 5)\n xlim = ax1.get_xlim()\n assert np.allclose(xlim, [0.5, 4.5])\n\n\ndef test_sharing_does_not_link_positions():\n fig = plt.figure()\n ax0 = fig.add_subplot(221)\n ax1 = fig.add_axes([.6, .6, .3, .3], sharex=ax0)\n init_pos = ax1.get_position()\n fig.subplots_adjust(left=0)\n assert (ax1.get_position().get_points() == init_pos.get_points()).all()\n\n\n@check_figures_equal(extensions=[\"pdf\"])\ndef test_2dcolor_plot(fig_test, fig_ref):\n color = np.array([0.1, 0.2, 0.3])\n # plot with 1D-color:\n axs = fig_test.subplots(5)\n axs[0].plot([1, 2], [1, 2], c=color.reshape(-1))\n axs[1].scatter([1, 2], [1, 2], c=color.reshape(-1))\n axs[2].step([1, 2], [1, 2], c=color.reshape(-1))\n axs[3].hist(np.arange(10), color=color.reshape(-1))\n axs[4].bar(np.arange(10), np.arange(10), color=color.reshape(-1))\n # plot with 2D-color:\n axs = fig_ref.subplots(5)\n axs[0].plot([1, 2], [1, 2], c=color.reshape((1, -1)))\n axs[1].scatter([1, 2], [1, 2], c=color.reshape((1, -1)))\n axs[2].step([1, 2], [1, 2], c=color.reshape((1, -1)))\n axs[3].hist(np.arange(10), color=color.reshape((1, -1)))\n axs[4].bar(np.arange(10), np.arange(10), color=color.reshape((1, -1)))\n\n\n@check_figures_equal(extensions=['png'])\ndef test_shared_axes_clear(fig_test, fig_ref):\n x = np.arange(0.0, 2*np.pi, 0.01)\n y = np.sin(x)\n\n axs = fig_ref.subplots(2, 2, sharex=True, sharey=True)\n for ax in axs.flat:\n ax.plot(x, y)\n\n axs = fig_test.subplots(2, 2, sharex=True, sharey=True)\n for ax in axs.flat:\n ax.clear()\n ax.plot(x, y)\n\n\ndef test_shared_axes_retick():\n fig, axs = plt.subplots(2, 2, sharex='all', sharey='all')\n\n for ax in axs.flat:\n ax.plot([0, 2], 'o-')\n\n axs[0, 0].set_xticks([-0.5, 0, 1, 1.5]) # should affect all axes xlims\n for ax in axs.flat:\n assert ax.get_xlim() == axs[0, 0].get_xlim()\n\n axs[0, 0].set_yticks([-0.5, 0, 2, 2.5]) # should affect all axes ylims\n for ax in axs.flat:\n assert ax.get_ylim() == axs[0, 0].get_ylim()\n\n\[email protected]('ha', ['left', 'center', 'right'])\ndef test_ylabel_ha_with_position(ha):\n fig = Figure()\n ax = fig.subplots()\n ax.set_ylabel(\"test\", y=1, ha=ha)\n ax.yaxis.set_label_position(\"right\")\n assert ax.yaxis.get_label().get_ha() == ha\n\n\ndef test_bar_label_location_vertical():\n ax = plt.gca()\n xs, heights = [1, 2], [3, -4]\n rects = ax.bar(xs, heights)\n labels = ax.bar_label(rects)\n assert labels[0].xy == (xs[0], heights[0])\n assert labels[0].get_ha() == 'center'\n assert labels[0].get_va() == 'bottom'\n assert labels[1].xy == (xs[1], heights[1])\n assert labels[1].get_ha() == 'center'\n assert labels[1].get_va() == 'top'\n\n\ndef test_bar_label_location_horizontal():\n ax = plt.gca()\n ys, widths = [1, 2], [3, -4]\n rects = ax.barh(ys, widths)\n labels = ax.bar_label(rects)\n assert labels[0].xy == (widths[0], ys[0])\n assert labels[0].get_ha() == 'left'\n assert labels[0].get_va() == 'center'\n assert labels[1].xy == (widths[1], ys[1])\n assert labels[1].get_ha() == 'right'\n assert labels[1].get_va() == 'center'\n\n\ndef test_bar_label_location_center():\n ax = plt.gca()\n ys, widths = [1, 2], [3, -4]\n rects = ax.barh(ys, widths)\n labels = ax.bar_label(rects, label_type='center')\n assert labels[0].xy == (widths[0] / 2, ys[0])\n assert labels[0].get_ha() == 'center'\n assert labels[0].get_va() == 'center'\n assert labels[1].xy == (widths[1] / 2, ys[1])\n assert labels[1].get_ha() == 'center'\n assert labels[1].get_va() == 'center'\n\n\ndef test_bar_label_location_errorbars():\n ax = plt.gca()\n xs, heights = [1, 2], [3, -4]\n rects = ax.bar(xs, heights, yerr=1)\n labels = ax.bar_label(rects)\n assert labels[0].xy == (xs[0], heights[0] + 1)\n assert labels[0].get_ha() == 'center'\n assert labels[0].get_va() == 'bottom'\n assert labels[1].xy == (xs[1], heights[1] - 1)\n assert labels[1].get_ha() == 'center'\n assert labels[1].get_va() == 'top'\n\n\ndef test_bar_label_fmt():\n ax = plt.gca()\n rects = ax.bar([1, 2], [3, -4])\n labels = ax.bar_label(rects, fmt='%.2f')\n assert labels[0].get_text() == '3.00'\n assert labels[1].get_text() == '-4.00'\n\n\ndef test_bar_label_labels():\n ax = plt.gca()\n rects = ax.bar([1, 2], [3, -4])\n labels = ax.bar_label(rects, labels=['A', 'B'])\n assert labels[0].get_text() == 'A'\n assert labels[1].get_text() == 'B'\n\n\ndef test_bar_label_nan_ydata():\n ax = plt.gca()\n bars = ax.bar([2, 3], [np.nan, 1])\n labels = ax.bar_label(bars)\n assert [l.get_text() for l in labels] == ['', '1']\n assert labels[0].xy == (2, 0)\n assert labels[0].get_va() == 'bottom'\n\n\ndef test_patch_bounds(): # PR 19078\n fig, ax = plt.subplots()\n ax.add_patch(mpatches.Wedge((0, -1), 1.05, 60, 120, 0.1))\n bot = 1.9*np.sin(15*np.pi/180)**2\n np.testing.assert_array_almost_equal_nulp(\n np.array((-0.525, -(bot+0.05), 1.05, bot+0.1)), ax.dataLim.bounds, 16)\n\n\[email protected]('default')\ndef test_warn_ignored_scatter_kwargs():\n with pytest.warns(UserWarning,\n match=r\"You passed a edgecolor/edgecolors\"):\n\n c = plt.scatter(\n [0], [0], marker=\"+\", s=500, facecolor=\"r\", edgecolor=\"b\"\n )\n",
"\"\"\"\nThese tests are originally part of CellProfiler, code licensed under both GPL and BSD licenses.\n\nWebsite: http://www.cellprofiler.org\nCopyright (c) 2003-2009 Massachusetts Institute of Technology\nCopyright (c) 2009-2011 Broad Institute\nAll rights reserved.\nOriginal author: Lee Kamentsky\n\"\"\"\nimport numpy as np\n\nfrom skimage.morphology.greyreconstruct import reconstruction\nfrom skimage._shared import testing\nfrom skimage._shared.testing import assert_array_almost_equal\n\n\ndef test_zeros():\n \"\"\"Test reconstruction with image and mask of zeros\"\"\"\n assert_array_almost_equal(\n reconstruction(np.zeros((5, 7)), np.zeros((5, 7))), 0)\n\n\ndef test_image_equals_mask():\n \"\"\"Test reconstruction where the image and mask are the same\"\"\"\n assert_array_almost_equal(\n reconstruction(np.ones((7, 5)), np.ones((7, 5))), 1)\n\n\ndef test_image_less_than_mask():\n \"\"\"Test reconstruction where the image is uniform and less than mask\"\"\"\n image = np.ones((5, 5))\n mask = np.ones((5, 5)) * 2\n assert_array_almost_equal(reconstruction(image, mask), 1)\n\n\ndef test_one_image_peak():\n \"\"\"Test reconstruction with one peak pixel\"\"\"\n image = np.ones((5, 5))\n image[2, 2] = 2\n mask = np.ones((5, 5)) * 3\n assert_array_almost_equal(reconstruction(image, mask), 2)\n\n\ndef test_two_image_peaks():\n \"\"\"Test reconstruction with two peak pixels isolated by the mask\"\"\"\n image = np.array([[1, 1, 1, 1, 1, 1, 1, 1],\n [1, 2, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 3, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n\n mask = np.array([[4, 4, 4, 1, 1, 1, 1, 1],\n [4, 4, 4, 1, 1, 1, 1, 1],\n [4, 4, 4, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 4, 4, 4],\n [1, 1, 1, 1, 1, 4, 4, 4],\n [1, 1, 1, 1, 1, 4, 4, 4]])\n\n expected = np.array([[2, 2, 2, 1, 1, 1, 1, 1],\n [2, 2, 2, 1, 1, 1, 1, 1],\n [2, 2, 2, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 3, 3, 3],\n [1, 1, 1, 1, 1, 3, 3, 3],\n [1, 1, 1, 1, 1, 3, 3, 3]])\n assert_array_almost_equal(reconstruction(image, mask), expected)\n\n\ndef test_zero_image_one_mask():\n \"\"\"Test reconstruction with an image of all zeros and a mask that's not\"\"\"\n result = reconstruction(np.zeros((10, 10)), np.ones((10, 10)))\n assert_array_almost_equal(result, 0)\n\n\ndef test_fill_hole():\n \"\"\"Test reconstruction by erosion, which should fill holes in mask.\"\"\"\n seed = np.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0])\n mask = np.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0])\n result = reconstruction(seed, mask, method='erosion')\n assert_array_almost_equal(result, np.array([0, 3, 6, 4, 4, 4, 4, 4, 2, 0]))\n\n\ndef test_invalid_seed():\n seed = np.ones((5, 5))\n mask = np.ones((5, 5))\n with testing.raises(ValueError):\n reconstruction(seed * 2, mask,\n method='dilation')\n with testing.raises(ValueError):\n reconstruction(seed * 0.5, mask,\n method='erosion')\n\n\ndef test_invalid_selem():\n seed = np.ones((5, 5))\n mask = np.ones((5, 5))\n with testing.raises(ValueError):\n reconstruction(seed, mask,\n selem=np.ones((4, 4)))\n with testing.raises(ValueError):\n reconstruction(seed, mask,\n selem=np.ones((3, 4)))\n reconstruction(seed, mask, selem=np.ones((3, 3)))\n\n\ndef test_invalid_method():\n seed = np.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0])\n mask = np.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0])\n with testing.raises(ValueError):\n reconstruction(seed, mask, method='foo')\n\n\ndef test_invalid_offset_not_none():\n \"\"\"Test reconstruction with invalid not None offset parameter\"\"\"\n image = np.array([[1, 1, 1, 1, 1, 1, 1, 1],\n [1, 2, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 3, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n\n mask = np.array([[4, 4, 4, 1, 1, 1, 1, 1],\n [4, 4, 4, 1, 1, 1, 1, 1],\n [4, 4, 4, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 4, 4, 4],\n [1, 1, 1, 1, 1, 4, 4, 4],\n [1, 1, 1, 1, 1, 4, 4, 4]])\n with testing.raises(ValueError):\n reconstruction(image, mask, method='dilation',\n selem=np.ones((3, 3)), offset=np.array([3, 0]))\n\n\ndef test_offset_not_none():\n \"\"\"Test reconstruction with valid offset parameter\"\"\"\n seed = np.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0])\n mask = np.array([0, 8, 6, 8, 8, 8, 8, 4, 4, 0])\n expected = np.array([0, 3, 6, 6, 6, 6, 6, 4, 4, 0])\n\n assert_array_almost_equal(\n reconstruction(seed, mask, method='dilation',\n selem=np.ones(3), offset=np.array([0])), expected)\n",
"import numpy as np\nfrom skimage.morphology import remove_small_objects, remove_small_holes\n\nfrom skimage._shared import testing\nfrom skimage._shared.testing import assert_array_equal, assert_equal\nfrom skimage._shared._warnings import expected_warnings\n\n\ntest_image = np.array([[0, 0, 0, 1, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 1]], bool)\n\n\ndef test_one_connectivity():\n expected = np.array([[0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0]], bool)\n observed = remove_small_objects(test_image, min_size=6)\n assert_array_equal(observed, expected)\n\n\ndef test_two_connectivity():\n expected = np.array([[0, 0, 0, 1, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0]], bool)\n observed = remove_small_objects(test_image, min_size=7, connectivity=2)\n assert_array_equal(observed, expected)\n\n\ndef test_in_place():\n image = test_image.copy()\n observed = remove_small_objects(image, min_size=6, in_place=True)\n assert_equal(observed is image, True,\n \"remove_small_objects in_place argument failed.\")\n\n\ndef test_labeled_image():\n labeled_image = np.array([[2, 2, 2, 0, 1],\n [2, 2, 2, 0, 1],\n [2, 0, 0, 0, 0],\n [0, 0, 3, 3, 3]], dtype=int)\n expected = np.array([[2, 2, 2, 0, 0],\n [2, 2, 2, 0, 0],\n [2, 0, 0, 0, 0],\n [0, 0, 3, 3, 3]], dtype=int)\n observed = remove_small_objects(labeled_image, min_size=3)\n assert_array_equal(observed, expected)\n\n\ndef test_uint_image():\n labeled_image = np.array([[2, 2, 2, 0, 1],\n [2, 2, 2, 0, 1],\n [2, 0, 0, 0, 0],\n [0, 0, 3, 3, 3]], dtype=np.uint8)\n expected = np.array([[2, 2, 2, 0, 0],\n [2, 2, 2, 0, 0],\n [2, 0, 0, 0, 0],\n [0, 0, 3, 3, 3]], dtype=np.uint8)\n observed = remove_small_objects(labeled_image, min_size=3)\n assert_array_equal(observed, expected)\n\n\ndef test_single_label_warning():\n image = np.array([[0, 0, 0, 1, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0]], int)\n with expected_warnings(['use a boolean array?']):\n remove_small_objects(image, min_size=6)\n\n\ndef test_float_input():\n float_test = np.random.rand(5, 5)\n with testing.raises(TypeError):\n remove_small_objects(float_test)\n\n\ndef test_negative_input():\n negative_int = np.random.randint(-4, -1, size=(5, 5))\n with testing.raises(ValueError):\n remove_small_objects(negative_int)\n\n\ntest_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool)\n\n\ndef test_one_connectivity_holes():\n expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool)\n observed = remove_small_holes(test_holes_image, area_threshold=3)\n assert_array_equal(observed, expected)\n\n\ndef test_two_connectivity_holes():\n expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool)\n observed = remove_small_holes(test_holes_image, area_threshold=3,\n connectivity=2)\n assert_array_equal(observed, expected)\n\n\ndef test_in_place_holes():\n image = test_holes_image.copy()\n observed = remove_small_holes(image, area_threshold=3, in_place=True)\n assert_equal(observed is image, True,\n \"remove_small_holes in_place argument failed.\")\n\n\ndef test_labeled_image_holes():\n labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 2, 2],\n [0, 0, 0, 0, 0, 0, 0, 2, 0, 2],\n [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],\n dtype=int)\n expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool)\n with expected_warnings(['returned as a boolean array']):\n observed = remove_small_holes(labeled_holes_image, area_threshold=3)\n assert_array_equal(observed, expected)\n\n\ndef test_uint_image_holes():\n labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 2, 2],\n [0, 0, 0, 0, 0, 0, 0, 2, 0, 2],\n [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],\n dtype=np.uint8)\n expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool)\n with expected_warnings(['returned as a boolean array']):\n observed = remove_small_holes(labeled_holes_image, area_threshold=3)\n assert_array_equal(observed, expected)\n\n\ndef test_label_warning_holes():\n labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 2, 2],\n [0, 0, 0, 0, 0, 0, 0, 2, 0, 2],\n [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],\n dtype=int)\n with expected_warnings(['use a boolean array?']):\n remove_small_holes(labeled_holes_image, area_threshold=3)\n remove_small_holes(labeled_holes_image.astype(bool), area_threshold=3)\n\n\ndef test_float_input_holes():\n float_test = np.random.rand(5, 5)\n with testing.raises(TypeError):\n remove_small_holes(float_test)\n",
"\"\"\"\n:author: Stefan van der Walt, 2008\n:license: modified BSD\n\"\"\"\n\nimport numpy as np\nfrom .._shared.fft import fftmodule as fft\nfrom .._shared.utils import check_nD\n\neps = np.finfo(float).eps\n\n\ndef _min_limit(x, val=eps):\n mask = np.abs(x) < eps\n x[mask] = np.sign(x[mask]) * eps\n\n\ndef _centre(x, oshape):\n \"\"\"Return an array of oshape from the centre of x.\n\n \"\"\"\n start = (np.array(x.shape) - np.array(oshape)) // 2 + 1\n out = x[tuple(slice(s, s + n) for s, n in zip(start, oshape))]\n return out\n\n\ndef _pad(data, shape):\n \"\"\"Pad the data to the given shape with zeros.\n\n Parameters\n ----------\n data : 2-d ndarray\n Input data\n shape : (2,) tuple\n\n \"\"\"\n out = np.zeros(shape)\n out[tuple(slice(0, n) for n in data.shape)] = data\n return out\n\n\nclass LPIFilter2D(object):\n \"\"\"Linear Position-Invariant Filter (2-dimensional)\n\n \"\"\"\n\n def __init__(self, impulse_response, **filter_params):\n \"\"\"\n Parameters\n ----------\n impulse_response : callable `f(r, c, **filter_params)`\n Function that yields the impulse response. ``r`` and ``c`` are\n 1-dimensional vectors that represent row and column positions, in\n other words coordinates are (r[0],c[0]),(r[0],c[1]) etc.\n `**filter_params` are passed through.\n\n In other words, ``impulse_response`` would be called like this:\n\n >>> def impulse_response(r, c, **filter_params):\n ... pass\n >>>\n >>> r = [0,0,0,1,1,1,2,2,2]\n >>> c = [0,1,2,0,1,2,0,1,2]\n >>> filter_params = {'kw1': 1, 'kw2': 2, 'kw3': 3}\n >>> impulse_response(r, c, **filter_params)\n\n\n Examples\n --------\n Gaussian filter: Use a 1-D gaussian in each direction without\n normalization coefficients.\n\n >>> def filt_func(r, c, sigma = 1):\n ... return np.exp(-np.hypot(r, c)/sigma)\n >>> filter = LPIFilter2D(filt_func)\n\n \"\"\"\n if not callable(impulse_response):\n raise ValueError(\"Impulse response must be a callable.\")\n\n self.impulse_response = impulse_response\n self.filter_params = filter_params\n self._cache = None\n\n def _prepare(self, data):\n \"\"\"Calculate filter and data FFT in preparation for filtering.\n\n \"\"\"\n dshape = np.array(data.shape)\n dshape += (dshape % 2 == 0) # all filter dimensions must be uneven\n oshape = np.array(data.shape) * 2 - 1\n\n if self._cache is None or np.any(self._cache.shape != oshape):\n coords = np.mgrid[[slice(0, float(n)) for n in dshape]]\n # this steps over two sets of coordinates,\n # not over the coordinates individually\n for k, coord in enumerate(coords):\n coord -= (dshape[k] - 1) / 2.\n coords = coords.reshape(2, -1).T # coordinate pairs (r,c)\n\n f = self.impulse_response(coords[:, 0], coords[:, 1],\n **self.filter_params).reshape(dshape)\n\n f = _pad(f, oshape)\n F = fft.fftn(f)\n self._cache = F\n else:\n F = self._cache\n\n data = _pad(data, oshape)\n G = fft.fftn(data)\n\n return F, G\n\n def __call__(self, data):\n \"\"\"Apply the filter to the given data.\n\n Parameters\n ----------\n data : (M,N) ndarray\n\n \"\"\"\n check_nD(data, 2, 'data')\n F, G = self._prepare(data)\n out = fft.ifftn(F * G)\n out = np.abs(_centre(out, data.shape))\n return out\n\n\ndef forward(data, impulse_response=None, filter_params={},\n predefined_filter=None):\n \"\"\"Apply the given filter to data.\n\n Parameters\n ----------\n data : (M,N) ndarray\n Input data.\n impulse_response : callable `f(r, c, **filter_params)`\n Impulse response of the filter. See LPIFilter2D.__init__.\n filter_params : dict\n Additional keyword parameters to the impulse_response function.\n\n Other Parameters\n ----------------\n predefined_filter : LPIFilter2D\n If you need to apply the same filter multiple times over different\n images, construct the LPIFilter2D and specify it here.\n\n Examples\n --------\n\n Gaussian filter:\n\n >>> def filt_func(r, c):\n ... return np.exp(-np.hypot(r, c)/1)\n >>>\n >>> from skimage import data\n >>> filtered = forward(data.coins(), filt_func)\n\n \"\"\"\n check_nD(data, 2, 'data')\n if predefined_filter is None:\n predefined_filter = LPIFilter2D(impulse_response, **filter_params)\n return predefined_filter(data)\n\n\ndef inverse(data, impulse_response=None, filter_params={}, max_gain=2,\n predefined_filter=None):\n \"\"\"Apply the filter in reverse to the given data.\n\n Parameters\n ----------\n data : (M,N) ndarray\n Input data.\n impulse_response : callable `f(r, c, **filter_params)`\n Impulse response of the filter. See LPIFilter2D.__init__.\n filter_params : dict\n Additional keyword parameters to the impulse_response function.\n max_gain : float\n Limit the filter gain. Often, the filter contains zeros, which would\n cause the inverse filter to have infinite gain. High gain causes\n amplification of artefacts, so a conservative limit is recommended.\n\n Other Parameters\n ----------------\n predefined_filter : LPIFilter2D\n If you need to apply the same filter multiple times over different\n images, construct the LPIFilter2D and specify it here.\n\n \"\"\"\n check_nD(data, 2, 'data')\n if predefined_filter is None:\n filt = LPIFilter2D(impulse_response, **filter_params)\n else:\n filt = predefined_filter\n\n F, G = filt._prepare(data)\n _min_limit(F)\n\n F = 1 / F\n mask = np.abs(F) > max_gain\n F[mask] = np.sign(F[mask]) * max_gain\n\n return _centre(np.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape)\n\n\ndef wiener(data, impulse_response=None, filter_params={}, K=0.25,\n predefined_filter=None):\n \"\"\"Minimum Mean Square Error (Wiener) inverse filter.\n\n Parameters\n ----------\n data : (M,N) ndarray\n Input data.\n K : float or (M,N) ndarray\n Ratio between power spectrum of noise and undegraded\n image.\n impulse_response : callable `f(r, c, **filter_params)`\n Impulse response of the filter. See LPIFilter2D.__init__.\n filter_params : dict\n Additional keyword parameters to the impulse_response function.\n\n Other Parameters\n ----------------\n predefined_filter : LPIFilter2D\n If you need to apply the same filter multiple times over different\n images, construct the LPIFilter2D and specify it here.\n\n \"\"\"\n check_nD(data, 2, 'data')\n\n if not isinstance(K, float):\n check_nD(K, 2, 'K')\n\n if predefined_filter is None:\n filt = LPIFilter2D(impulse_response, **filter_params)\n else:\n filt = predefined_filter\n\n F, G = filt._prepare(data)\n _min_limit(F)\n\n H_mag_sqr = np.abs(F) ** 2\n F = 1 / F * H_mag_sqr / (H_mag_sqr + K)\n\n return _centre(np.abs(fft.ifftshift(fft.ifftn(G * F))), data.shape)\n\n\ndef constrained_least_squares(data, lam, impulse_response=None,\n filter_params={}):\n raise NotImplementedError\n",
"import numpy as np\n\n\ndef binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2,\n volume_fraction=0.5, seed=None):\n \"\"\"\n Generate synthetic binary image with several rounded blob-like objects.\n\n Parameters\n ----------\n length : int, optional\n Linear size of output image.\n blob_size_fraction : float, optional\n Typical linear size of blob, as a fraction of ``length``, should be\n smaller than 1.\n n_dim : int, optional\n Number of dimensions of output image.\n volume_fraction : float, default 0.5\n Fraction of image pixels covered by the blobs (where the output is 1).\n Should be in [0, 1].\n seed : int, optional\n Seed to initialize the random number generator.\n If `None`, a random seed from the operating system is used.\n\n Returns\n -------\n blobs : ndarray of bools\n Output binary image\n\n Examples\n --------\n >>> from skimage import data\n >>> data.binary_blobs(length=5, blob_size_fraction=0.2, seed=1)\n array([[ True, False, True, True, True],\n [ True, True, True, False, True],\n [False, True, False, True, True],\n [ True, False, False, True, True],\n [ True, False, False, False, True]])\n >>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.1)\n >>> # Finer structures\n >>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.05)\n >>> # Blobs cover a smaller volume fraction of the image\n >>> blobs = data.binary_blobs(length=256, volume_fraction=0.3)\n \"\"\"\n # filters is quite an expensive import since it imports all of scipy.signal\n # We lazy import here\n from ..filters import gaussian\n\n rs = np.random.RandomState(seed)\n shape = tuple([length] * n_dim)\n mask = np.zeros(shape)\n n_pts = max(int(1. / blob_size_fraction) ** n_dim, 1)\n points = (length * rs.rand(n_dim, n_pts)).astype(int)\n mask[tuple(indices for indices in points)] = 1\n mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction)\n threshold = np.percentile(mask, 100 * (1 - volume_fraction))\n return np.logical_not(mask < threshold)\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"numpy.linspace",
"numpy.all",
"numpy.max",
"numpy.mean",
"numpy.tri",
"numpy.allclose",
"numpy.clip",
"numpy.arange",
"numpy.ceil",
"numpy.std",
"matplotlib.pyplot.subplot",
"numpy.argmax",
"numpy.interp",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.rand",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.random.seed",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"numpy.random.uniform"
],
[
"numpy.linspace",
"numpy.clip",
"numpy.unique",
"numpy.arange",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
],
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"matplotlib.testing.decorators.check_figures_equal",
"matplotlib.pyplot.axes",
"numpy.exp",
"matplotlib.pyplot.twinx",
"numpy.sin",
"numpy.full",
"numpy.multiply.outer",
"numpy.diff",
"matplotlib.pyplot.axis",
"numpy.outer",
"numpy.zeros",
"numpy.log",
"numpy.power",
"matplotlib.patches.Circle",
"numpy.full_like",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.boxplot",
"matplotlib.patches.Ellipse",
"matplotlib.colors.LogNorm",
"matplotlib.figure.Figure",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle",
"matplotlib.pyplot.FixedLocator",
"numpy.testing.assert_array_equal",
"numpy.indices",
"numpy.datetime64",
"numpy.random.uniform",
"matplotlib.ticker.FuncFormatter",
"numpy.vstack",
"matplotlib.colors.to_rgba_array",
"numpy.random.lognormal",
"matplotlib.pyplot.plot_date",
"matplotlib.ticker.MultipleLocator",
"numpy.asarray",
"numpy.random.random_sample",
"numpy.concatenate",
"numpy.hypot",
"matplotlib.axes.Axes._parse_scatter_color_args",
"matplotlib.ticker.StrMethodFormatter",
"numpy.allclose",
"matplotlib.testing.decorators.remove_ticks_and_titles",
"numpy.ma.masked_greater",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.pyplot.style.use",
"matplotlib.transforms.Bbox.from_bounds",
"matplotlib.transforms.TransformedPath",
"numpy.min",
"matplotlib.testing.jpl_units.day.convert",
"numpy.int64",
"matplotlib.testing.jpl_units.register",
"numpy.random.rand",
"numpy.errstate",
"numpy.random.RandomState",
"matplotlib.pyplot.hist",
"matplotlib.dates.num2date",
"matplotlib.rc_context",
"numpy.maximum",
"matplotlib.testing.jpl_units.Duration",
"matplotlib.pyplot.yscale",
"numpy.ones",
"matplotlib.collections.PolyCollection",
"matplotlib.pyplot.FormatStrFormatter",
"numpy.linspace",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.plot",
"numpy.zeros_like",
"numpy.histogram",
"matplotlib.pyplot.FixedFormatter",
"numpy.hstack",
"numpy.eye",
"matplotlib.pyplot.gcf",
"matplotlib.testing.decorators.image_comparison",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.close",
"numpy.repeat",
"numpy.testing.assert_array_almost_equal",
"matplotlib.pyplot.LinearLocator",
"numpy.nonzero",
"matplotlib.pyplot.ylim",
"matplotlib.patches.Rectangle",
"numpy.deg2rad",
"numpy.testing.assert_allclose",
"numpy.correlate",
"matplotlib.pyplot.Rectangle",
"matplotlib.axes._subplots.subplot_class_factory",
"matplotlib.collections.PatchCollection",
"numpy.log2",
"matplotlib.axes.subplot_class_factory",
"matplotlib.pyplot.FuncFormatter",
"numpy.cos",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.draw",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"pandas.plotting.deregister_matplotlib_converters",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.pcolor",
"matplotlib.pyplot.xscale",
"numpy.dot",
"matplotlib.colors.to_rgba",
"numpy.max",
"numpy.random.randn",
"numpy.ma.array",
"matplotlib.pyplot.gca",
"numpy.ones_like",
"matplotlib.style.use",
"numpy.arange",
"matplotlib.pyplot.eventplot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.stairs",
"matplotlib.pyplot.figure",
"numpy.isclose",
"matplotlib.patches.Arc",
"matplotlib.pyplot.savefig",
"numpy.ma.masked_equal",
"matplotlib.colors.ListedColormap",
"matplotlib.rcParams.update",
"numpy.ndenumerate",
"numpy.meshgrid",
"matplotlib.patches.Wedge",
"matplotlib.markers.MarkerStyle",
"numpy.random.random",
"numpy.abs",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.random.standard_normal",
"matplotlib.lines.Line2D",
"numpy.int32",
"numpy.random.normal",
"matplotlib.dates.date2num",
"matplotlib.pyplot.rc_context",
"matplotlib.pyplot.specgram"
],
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.array",
"numpy.random.rand",
"numpy.random.randint"
],
[
"numpy.abs",
"numpy.finfo",
"numpy.sign",
"numpy.any",
"numpy.array",
"numpy.zeros"
],
[
"numpy.logical_not",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.percentile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NickyBar/QIP | [
"11747b40beb38d41faa297fb2b53f28c6519c753"
] | [
"qiskit/basicplotter.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright 2017 IBM RESEARCH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"\nBasic plotting methods using matplotlib.\n\nThese include methods to plot Bloch vectors, histograms, and quantum spheres.\n\nAuthor: Andrew Cross, Jay Gambetta\n\"\"\"\nfrom mpl_toolkits.mplot3d import proj3d\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import FancyArrowPatch\nimport numpy as np\nfrom collections import Counter\nfrom functools import reduce\n\n\ndef plot_histogram(data, number_to_keep=None):\n \"\"\"Plot a histogram of data.\n\n data is a dictionary of {'000': 5, '010': 113, ...}\n number_to_keep is the number of terms to plot and rest is made into a\n single bar called other values\n \"\"\"\n if number_to_keep is not None:\n data_temp = dict(Counter(data).most_common(number_to_keep))\n data_temp[\"rest\"] = sum(data.values()) - sum(data_temp.values())\n data = data_temp\n\n labels = sorted(data)\n values = np.array([data[key] for key in labels], dtype=float)\n pvalues = values / sum(values)\n numelem = len(values)\n ind = np.arange(numelem) # the x locations for the groups\n width = 0.35 # the width of the bars\n fig, ax = plt.subplots()\n rects = ax.bar(ind, pvalues, width, color='seagreen')\n # add some text for labels, title, and axes ticks\n ax.set_ylabel('Probabilities', fontsize=12)\n ax.set_xticks(ind)\n ax.set_xticklabels(labels, fontsize=12)\n ax.set_ylim([0., min([1.2, max([1.2 * val for val in pvalues])])])\n # attach some text labels\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,\n '%f' % float(height),\n ha='center', va='bottom')\n plt.show()\n\n\n# Functions used for plotting on the qsphere.\n#\n# See:\n# lex_index:\n# https://msdn.microsoft.com/en-us/library/aa289166%28v=vs.71%29.aspx\n# n_choose_k: http://stackoverflow.com/questions/\n# 2096573/counting-combinations-and-permutations-efficiently\n\n\nclass Arrow3D(FancyArrowPatch):\n \"\"\"Standard 3D arrow.\"\"\"\n\n def __init__(self, xs, ys, zs, *args, **kwargs):\n \"\"\"Create arrow.\"\"\"\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n \"\"\"Draw the arrow.\"\"\"\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n\ndef compliment(value):\n \"\"\"Swap 1 and 0 in a vector.\"\"\"\n return ''.join(COMPLEMENT[x] for x in value)\n\n\nCOMPLEMENT = {'1': '0', '0': '1'}\n\n\ndef n_choose_k(n, k):\n \"\"\"Return the number of combinations.\"\"\"\n if n == 0:\n return 0.0\n else:\n return reduce(lambda x, y: x * y[0] / y[1],\n zip(range(n - k + 1, n + 1),\n range(1, k + 1)), 1)\n\n\ndef lex_index(n, k, lst):\n \"\"\"Return the index of a combination.\"\"\"\n assert len(lst) == k, \"list should have length k\"\n comb = list(map(lambda x: n - 1 - x, lst))\n dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])\n m = dualm\n return int(m)\n\n\ndef bit_string_index(s):\n \"\"\"Return the index of a string of 0s and 1s.\"\"\"\n n = len(s)\n k = s.count(\"1\")\n assert s.count(\"0\") == n - k, \"s must be a string of 0 and 1\"\n ones = [pos for pos, char in enumerate(s) if char == \"1\"]\n return lex_index(n, k, ones)\n\n\ndef plot_qsphere(data, number_to_keep, number_of_qubits):\n \"\"\"Plot the qsphere of data.\"\"\"\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.axes.set_xlim3d(-1.0, 1.0)\n ax.axes.set_ylim3d(-1.0, 1.0)\n ax.axes.set_zlim3d(-1.0, 1.0)\n ax.set_aspect(\"equal\")\n ax.axes.grid(False)\n # Plot semi-transparent sphere\n u = np.linspace(0, 2 * np.pi, 25)\n v = np.linspace(0, np.pi, 25)\n x = np.outer(np.cos(u), np.sin(v))\n y = np.outer(np.sin(u), np.sin(v))\n z = np.outer(np.ones(np.size(u)), np.cos(v))\n ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k', alpha=0.05,\n linewidth=0)\n # wireframe\n # Get rid of the panes\n # ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n # ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n # ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n # Get rid of the spines\n # ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n # ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n # ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n # Get rid of the ticks\n # ax.set_xticks([])\n # ax.set_yticks([])\n # ax.set_zticks([])\n d = number_of_qubits\n total_values = sum(data.values())\n for key in data:\n weight = key.count(\"1\")\n zvalue = -2 * weight / d + 1\n number_of_divisions = n_choose_k(d, weight)\n weight_order = bit_string_index(key)\n if weight_order >= number_of_divisions / 2:\n com_key = compliment(key)\n weight_order_temp = bit_string_index(com_key)\n weight_order = np.floor(\n number_of_divisions / 2) + weight_order_temp + 1\n print(key + \" \" + str(weight_order))\n angle = (weight_order) * 2 * np.pi / number_of_divisions\n xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)\n yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)\n linewidth = 5 * data.get(key) / total_values\n print([xvalue, yvalue, zvalue])\n a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue], mutation_scale=20,\n lw=linewidth, arrowstyle=\"->\", color=\"k\")\n ax.add_artist(a)\n for weight in range(d + 1):\n theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n z = -2 * weight / d + 1\n if weight == 0:\n z = z - 0.001\n if weight == d:\n z = z + 0.001\n r = np.sqrt(1 - z**2)\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n ax.plot(x, y, z, 'k')\n plt.show()\n\n\n# Functions used for plotting tomography.\n\n\ndef plot_bloch_vector(bloch, title=\"\"):\n \"\"\"Plot a Bloch vector.\n\n Plot a sphere, axes, the Bloch vector, and its projections onto each axis.\n bloch is a 3-tuple (x, y, z)\n title is a string, the plot title\n \"\"\"\n # Set arrow lengths\n arlen = 1.3\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_aspect(\"equal\")\n\n # Plot semi-transparent sphere\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n x = np.outer(np.cos(u), np.sin(v))\n y = np.outer(np.sin(u), np.sin(v))\n z = np.outer(np.ones(np.size(u)), np.cos(v))\n ax.plot_surface(x, y, z, color=\"b\", alpha=0.1)\n\n # Plot arrows (axes, Bloch vector, its projections)\n xa = Arrow3D([0, arlen], [0, 0], [0, 0], mutation_scale=20, lw=1,\n arrowstyle=\"-|>\", color=\"k\")\n ya = Arrow3D([0, 0], [0, arlen], [0, 0], mutation_scale=20, lw=1,\n arrowstyle=\"-|>\", color=\"k\")\n za = Arrow3D([0, 0], [0, 0], [0, arlen], mutation_scale=20, lw=1,\n arrowstyle=\"-|>\", color=\"k\")\n a = Arrow3D([0, bloch[0]], [0, bloch[1]], [0, bloch[2]], mutation_scale=20,\n lw=2, arrowstyle=\"simple\", color=\"k\")\n bax = Arrow3D([0, bloch[0]], [0, 0], [0, 0], mutation_scale=20, lw=2,\n arrowstyle=\"-\", color=\"r\")\n bay = Arrow3D([0, 0], [0, bloch[1]], [0, 0], mutation_scale=20, lw=2,\n arrowstyle=\"-\", color=\"g\")\n baz = Arrow3D([0, 0], [0, 0], [0, bloch[2]], mutation_scale=20, lw=2,\n arrowstyle=\"-\", color=\"b\")\n arrowlist = [xa, ya, za, a, bax, bay, baz]\n for arr in arrowlist:\n ax.add_artist(arr)\n\n # Rotate the view\n ax.view_init(30, 30)\n\n # Annotate the axes, shifts are ad-hoc for this (30, 30) view\n xp, yp, _ = proj3d.proj_transform(arlen, 0, 0, ax.get_proj())\n plt.annotate(\"x\", xy=(xp, yp), xytext=(-3, -8),\n textcoords='offset points', ha='right', va='bottom')\n xp, yp, _ = proj3d.proj_transform(0, arlen, 0, ax.get_proj())\n plt.annotate(\"y\", xy=(xp, yp), xytext=(6, -5),\n textcoords='offset points', ha='right', va='bottom')\n xp, yp, _ = proj3d.proj_transform(0, 0, arlen, ax.get_proj())\n plt.annotate(\"z\", xy=(xp, yp), xytext=(2, 0),\n textcoords='offset points', ha='right', va='bottom')\n\n plt.title(title)\n plt.show()\n\n\n# Functions used by randomized benchmarking.\n\n\ndef plot_rb_data(xdata, ydatas, yavg, fit, survival_prob):\n \"\"\"Plot randomized benchmarking data.\n\n xdata = list of subsequence lengths\n ydatas = list of lists of survival probabilities for each sequence\n yavg = mean of the survival probabilities at each sequence length\n fit = list of fitting parameters [a, b, alpha]\n survival_prob = function that computes survival probability\n \"\"\"\n # Plot the result for each sequence\n for ydata in ydatas:\n plt.plot(xdata, ydata, 'rx')\n # Plot the mean\n plt.plot(xdata, yavg, 'bo')\n # Plot the fit\n plt.plot(xdata, survival_prob(xdata, *fit), 'b-')\n plt.show()\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.patches.FancyArrowPatch.draw",
"numpy.size",
"numpy.floor",
"matplotlib.patches.FancyArrowPatch.__init__",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wfarah/frbpoppy | [
"e575c49e6b4a69015a66d3f38a3459e0ffe4eb05",
"e575c49e6b4a69015a66d3f38a3459e0ffe4eb05",
"e575c49e6b4a69015a66d3f38a3459e0ffe4eb05",
"e575c49e6b4a69015a66d3f38a3459e0ffe4eb05"
] | [
"tests/int_pro_surveys.py",
"tests/logn_logs_spectral_index.py",
"tests/int_pro_theory.py",
"frbpoppy/survey.py"
] | [
"\"\"\"Plot intensity profile of theoretical beam patterns.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import binned_statistic as bstat\n\nfrom frbpoppy.survey import Survey\n\nOBSERVATORIES = [('parkes', 'htru'),\n ('apertif', 'apertif')]\n\nn = int(1e6)\n\nfor obs in OBSERVATORIES:\n\n survey = obs[1]\n pattern = obs[0]\n\n s = Survey(survey, gain_pattern=pattern)\n int_pro, offset = s.intensity_profile(n_gen=n)\n\n # Sort the values\n sorted_int = np.argsort(offset)\n int_pro = int_pro[sorted_int]\n offset = offset[sorted_int]\n\n # Offset in degrees\n offset = offset/60.\n\n bins = 1e2\n\n bin_means, bin_edges, bin_numbers = bstat(offset,\n int_pro,\n statistic='mean',\n bins=bins)\n\n bin_mins, _, _ = bstat(offset, int_pro, statistic='min', bins=bins)\n bin_maxs, _, _ = bstat(offset, int_pro, statistic='max', bins=bins)\n\n center = (bin_edges[:-1] + bin_edges[1:]) / 2\n\n plt.plot(center, bin_means, label=pattern)\n plt.fill_between(center, bin_mins, bin_maxs, alpha=0.2)\n\n\nplt.xlabel(f'Offset ($\\degree$)')\nplt.ylabel('Intensity Profile')\nplt.yscale('log')\nplt.legend()\nplt.tight_layout()\nplt.savefig('plots/int_pro_surveys.pdf')\n",
"\"\"\"Script to reproduce Fig. 6 of Connor et al. 2017.\n\nPlot N(>S) over log S (S being the flux density) for various spectral indices.\n\"\"\"\nimport copy\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.signal import savgol_filter\n\nfrom frbpoppy import CosmicPopulation, Survey, SurveyPopulation, unpickle\n\nCREATE = False\nOBSERVE = False\nSIS = (-2, 0, 2) # Spectral indices\n\npop = {}\n\nif CREATE:\n days = 14\n n_per_day = 5000\n n_tot = n_per_day*days\n\n for si in SIS:\n\n if si == min(SIS):\n\n pop[si] = CosmicPopulation(n_tot,\n days=days,\n name=f'si-{si}',\n dm_host_model='normal',\n dm_host_mu=0,\n dm_host_sigma=0,\n dm_igm_index=1200,\n dm_igm_sigma=0,\n dm_mw_model='zero',\n emission_range=[10e6, 10e9],\n lum_range=[1e40, 1e40],\n lum_index=0,\n n_model='vol_co',\n pulse_model='uniform',\n pulse_range=[1., 1.],\n pulse_mu=1.,\n pulse_sigma=0.,\n repeat=0.,\n si_mu=si,\n si_sigma=0.,\n z_max=2.5)\n pop[si].save()\n\n else:\n pop[si] = copy.deepcopy(pop[min(SIS)])\n pop[si].frbs.si = np.random.normal(si, 0, n_tot)\n pop[si].name = f'si-{si}'\n pop[si].save()\n\npop_obs = {}\n\nif OBSERVE or CREATE:\n\n for si in SIS:\n\n if not CREATE:\n pop[si] = unpickle(f'si-{si}')\n\n # Create Survey\n perfect = Survey('perfect', gain_pattern='perfect')\n\n # Observe populations\n pop_obs[si] = SurveyPopulation(pop[si], perfect)\n pop_obs[si].name = f'si-{si}-obs'\n pop_obs[si].rates()\n pop_obs[si].save()\n\nelse:\n for si in SIS:\n pop_obs[si] = unpickle(f'si-{si}-obs')\n\n\n# Plot log N and alpha versus log S\nf, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\nmin_s = 1e99\nmax_s = -1e99\n\nfor si in SIS:\n\n pop = pop_obs[si]\n\n s_peak = pop.frbs.s_peak\n\n # Bin up\n number, bins = np.histogram(np.log10(s_peak), bins=500) # N(S)\n n_gt_s = np.cumsum(number[::-1])[::-1] # N(>S) from N(S)\n x = bins[:-1] # log(S)\n y = np.log10(n_gt_s) # log(N(>S))\n\n ax1.step(x, y, where='pre', label=fr\"$\\gamma$ of {si}\")\n\n # Plot alpha\n # Calculate derivative\n der = np.diff(y) / np.diff(x)\n bin_centres = (x[:-1] + x[1:]) / 2\n\n # Smooth function\n derhat = savgol_filter(der, 51, 3)\n ax2.step(bin_centres, derhat, where='mid')\n\n if min(bin_centres) <= min_s:\n min_s = min(bin_centres)\n if max(bin_centres) >= max_s:\n max_s = max(bin_centres)\n\n# Add a -3/2 slope\nx = np.linspace(min_s, max_s, 1000)\ny = -1.5*x\ny -= min(y)\ny += min(np.log10(n_gt_s))\nx = x[y <= max(np.log10(n_gt_s))]\ny = y[y <= max(np.log10(n_gt_s))]\nax1.step(x, y, where='mid', color='grey', alpha=0.5)\n\n# Plot alpha over log S\n\n# Plot a Euclidean line\nx = np.linspace(min_s, max_s, 1000)\ny = np.ones_like(x) * -1.5\nax2.step(x, y, where='mid', color='grey', alpha=0.5)\n\n\nax1.set_ylabel(r'log N(>S$_{\\text{peak}}$)')\nax1.legend()\nax2.set_xlabel(r'log S$_{\\text{peak}}$')\nax2.set_ylabel(r'$\\alpha$')\nax2.set_ylim(ax2.get_ylim()[::-1])\n\nplt.tight_layout()\nplt.savefig(f'plots/logn_logs_si.pdf')\n",
"\"\"\"Plot intensity profile of theoretical beam patterns.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom frbpoppy.survey import Survey\n\nPATTERNS = ['perfect', 'gaussian', 'airy-0', 'airy-4']\nSURVEY = 'apertif'\nMIN_Y = 1e-6\nn = 500000\n\nfor pattern in PATTERNS:\n\n n_sidelobes = 1\n p = pattern\n z = 0\n if pattern.startswith('airy'):\n n_sidelobes = int(pattern[-1])\n p = 'airy'\n if n_sidelobes == 0:\n z = 10\n\n s = Survey(SURVEY, gain_pattern=p, n_sidelobes=n_sidelobes)\n int_pro, offset = s.intensity_profile(n_gen=n)\n\n # Sort the values\n sorted_int = np.argsort(offset)\n int_pro = int_pro[sorted_int]\n offset = offset[sorted_int]\n\n # Clean up lower limit\n offset = offset[int_pro > MIN_Y]\n int_pro = int_pro[int_pro > MIN_Y]\n\n # Offset in degrees\n offset = offset/60.\n\n print(s.beam_size_fwhm, s.beam_size)\n\n plt.plot(offset, int_pro, label=pattern, zorder=z)\n\n\nplt.xlabel(f'Offset ($\\degree$)')\nplt.ylabel('Intensity Profile')\nplt.yscale('log')\nplt.legend()\nplt.tight_layout()\nplt.savefig('plots/int_pro_theory.pdf')\n",
"\"\"\"Class holding survey properties.\"\"\"\nimport math\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.special import j1\n\nimport frbpoppy.galacticops as go\nfrom frbpoppy.log import pprint\nfrom frbpoppy.paths import paths\n\n\nclass Survey:\n \"\"\"\n Method containing survey parameters and functions.\n\n Args:\n name (str): Name of survey with which to observe population. Can\n either be a predefined survey present in frbpoppy or a path name to\n a new survey filename\n gain_pattern (str): Set gain pattern\n sidelobes (int): Number of sidelobes to include\n equal_area (int/bool): Ensures beam area on sky can be equal to a beam\n pattern with a max number of sizelobes. If unwanted, set to False\n\n \"\"\"\n\n def __init__(self,\n name,\n gain_pattern='gaussian',\n n_sidelobes=0.5):\n \"\"\"Initializing.\"\"\"\n # Set up parameters\n self.name = name\n self.gain_pattern = gain_pattern\n self.n_sidelobes = n_sidelobes\n\n # Parse survey file\n self.read_survey_parameters()\n\n def __str__(self):\n \"\"\"Define how to print a survey object to a console.\"\"\"\n s = 'Survey properties:'\n\n attributes = []\n for e in self.__dict__:\n attr = '\\n\\t{0:13.12}{1:.60}'.format(e, str(self.__dict__[e]))\n attributes.append(attr)\n\n s += ''.join(attributes)\n\n return s\n\n def read_survey_parameters(self):\n \"\"\"Read in survey parameters.\"\"\"\n # Read the survey file\n path = os.path.join(paths.surveys(), 'surveys.csv')\n df = pd.read_csv(path)\n df = df.set_index('survey')\n\n # Obtain the information relevant to a single survey\n survey = df[df.index == self.name].squeeze()\n\n # Parse parameters\n self.beta = survey['survey degradation factor']\n self.gain = survey['antenna gain (K/Jy)']\n self.t_obs = survey['integration time (s)']\n self.t_samp = survey['sampling time (ms)']\n self.T_rec = survey['receiver temperature (K)']\n self.central_freq = int(survey['centre frequency (MHz)'])\n self.bw = survey['bandwidth (MHz)']\n self.bw_chan = survey['channel bandwidth (MHz)']\n self.n_pol = survey['number of polarizations']\n self.beam_size_fwhm = survey['beam size (deg^2)']\n self.snr_limit = survey['signal-to-noise ratio [0-1]']\n self.max_w_eff = survey['maximum pulse width (ms)']\n self.ra_min = survey['minimum RA (deg)']\n self.ra_max = survey['maximum RA (deg)']\n self.dec_min = survey['minimum DEC (deg)']\n self.dec_max = survey['maximum DEC (deg)']\n self.gl_min = survey['minimum Galactic longitude (deg)']\n self.gl_max = survey['maximum Galactic longitude (deg)']\n self.gb_min = survey['minimum Galactic latitude (deg)']\n self.gb_max = survey['maximum Galactic latitude (deg)']\n self.up_time = survey['fractional uptime [0-1]']\n\n def in_region(self, frbs):\n \"\"\"\n Check if the given frbs are within the survey region.\n\n Args:\n frbs (Frbs): Frbs of which to check whether in survey region\n\n Returns:\n array: Boolean mask denoting whether frbs are within survey region\n\n \"\"\"\n # Create mask with False\n mask = np.ones_like(frbs.ra, dtype=bool)\n\n # Ensure in correct format\n frbs.gl[frbs.gl > 180.] -= 360.\n\n # Create region masks\n gl_limits = (frbs.gl > self.gl_max) | (frbs.gl < self.gl_min)\n gb_limits = (frbs.gb > self.gb_max) | (frbs.gb < self.gb_min)\n ra_limits = (frbs.ra > self.ra_max) | (frbs.ra < self.ra_min)\n dec_limits = (frbs.dec > self.dec_max) | (frbs.dec < self.dec_min)\n mask[gl_limits] = False\n mask[gb_limits] = False\n mask[ra_limits] = False\n mask[dec_limits] = False\n\n return mask\n\n def max_offset(self, x):\n \"\"\"Calculate the maximum offset of an FRB in an Airy disk.\n\n Args:\n x (int): Maximum sidelobe wanted\n\n \"\"\"\n # Null points of kasin for allow a number of sidelobes\n kasin_nulls = [3.831706,\n 7.015587,\n 10.173468,\n 13.323692,\n 16.47063,\n 19.615859,\n 22.760084,\n 25.903672,\n 29.046829,\n 32.18968,\n 35.332308,\n 38.474766]\n\n # Allow for cut at FWHM\n if x == 0.5:\n return 1\n\n try:\n arcsin = math.asin(self.fwhm*kasin_nulls[x]/(60*180))\n except ValueError:\n m = f'Beamsize including sidelobes would be larger than sky \\n'\n A = (90/kasin_nulls[x])**2*math.pi\n m += f'Ensure beamsize is smaller than {A}'\n raise ValueError(m)\n\n return 2/self.fwhm * 60*180/math.pi * arcsin\n\n def intensity_profile(self, n_gen=1, dimensions=2):\n \"\"\"Calculate intensity profile.\"\"\"\n # Calculate Full Width Half Maximum from beamsize\n self.fwhm = 2*math.sqrt(self.beam_size_fwhm/math.pi) * 60 # [arcmin]\n offset = self.fwhm/2 # Radius = diameter/2.\n\n if dimensions == 2: # 2D\n offset *= np.sqrt(np.random.random(n_gen))\n elif dimensions == 1: # 1D\n offset *= np.random.random(n_gen)\n\n # Allow for a perfect beam pattern in which all is detected\n if self.gain_pattern == 'perfect':\n int_pro = np.ones(n_gen)\n self.beam_size = self.beam_size_fwhm\n return int_pro, offset\n\n # Formula's based on 'Interferometry and Synthesis in Radio\n # Astronomy' by A. Richard Thompson, James. M. Moran and\n # George W. Swenson, JR. (Second edition), around p. 15\n\n max_offset = self.max_offset(self.n_sidelobes)\n self.beam_size = math.pi*(self.fwhm/2*max_offset/60)**2 # [sq degrees]\n\n if self.gain_pattern == 'gaussian':\n # Set the maximum offset equal to the null after a sidelobe\n # I realise this pattern isn't an airy, but you have to cut\n # somewhere\n offset *= max_offset\n alpha = 2*math.sqrt(math.log(2))\n int_pro = np.exp(-(alpha*offset/self.fwhm)**2)\n return int_pro, offset\n\n elif self.gain_pattern == 'airy':\n # Set the maximum offset equal to the null after a sidelobe\n offset *= max_offset\n c = 299792458\n conv = math.pi/(60*180) # Conversion arcmins -> radians\n eff_diam = c/(self.central_freq*1e6*conv*self.fwhm)\n a = eff_diam/2 # Effective radius of telescope\n lamda = c/(self.central_freq*1e6)\n ka = (2*math.pi*a/lamda)\n kasin = ka*np.sin(offset*conv)\n int_pro = 4*(j1(kasin)/kasin)**2\n return int_pro, offset\n\n elif self.gain_pattern in ['parkes', 'apertif']:\n\n place = paths.models() + f'/beams/{self.gain_pattern}.npy'\n beam_array = np.load(place)\n shape = beam_array.shape\n ran_x = np.random.randint(0, shape[0], n_gen)\n ran_y = np.random.randint(0, shape[1], n_gen)\n int_pro = beam_array[ran_x, ran_y]\n offset = np.sqrt((ran_x-shape[0]/2)**2 + (ran_y-shape[1]/2)**2)\n\n # Scaling factors to correct for pixel scale\n if self.gain_pattern == 'apertif': # 1 pixel = 0.94'\n offset *= 240/256 # [arcmin]\n self.beam_size = 25.\n if self.gain_pattern == 'parkes': # 1 pixel = 54\"\n offset *= 0.9 # [arcmin]\n self.beam_size = 9.\n\n return int_pro, offset\n\n else:\n pprint(f'Gain pattern \"{self.gain_pattern}\" not recognised')\n\n def dm_smear(self, frbs):\n \"\"\"\n Calculate delay in pulse across a channel due to dm smearing.\n\n Formula's based on 'Handbook of Pulsar Astronomy\" by Duncan Lorimer\n & Michael Kramer, section A2.4. Note the power of the forefactor has\n changed due to the central frequency being given in MHz.\n\n Args:\n frbs (FRBs): FRB object with a dm attribute\n\n Returns:\n t_dm (array): Time of delay [ms] at central band frequency\n \"\"\"\n\n t_dm = 8.297616e6 * self.bw_chan * frbs.dm * (self.central_freq)**-3\n return t_dm\n\n def calc_scat(self, dm):\n \"\"\"Calculate scattering timescale for FRBs.\n\n Offset according to Lorimer et al. (doi:10.1093/mnrasl/slt098)\n\n Args:\n dm (array): Dispersion Measure\n\n Returns:\n array: Scattering timescales [ms]\n\n \"\"\"\n freq = self.central_freq\n t_scat = go.scatter_bhat(dm, scindex=-3.86, offset=-9.5, freq=freq)\n return t_scat\n\n def calc_Ts(self, frbs):\n \"\"\"Set temperatures for frbs.\"\"\"\n T_sky = self.calc_T_sky(frbs)\n T_sys = self.T_rec + T_sky\n return T_sky, T_sys\n\n def calc_T_sky(self, frbs):\n \"\"\"\n Calculate the sky temperature from the Haslam table.\n\n Afterwards scale to the survey frequency. The temperature sky map is\n given in the weird units of HealPix and despite looking up info on this\n coordinate system, I don't have the foggiest idea of how to transform\n these to galactic coordinates. I have therefore directly copied the\n following code from psrpoppy in the assumption Sam Bates managed to\n figure it out.\n\n Args:\n frbs (FRBs): Needed for coordinates\n Returns:\n array: Sky temperature [K]\n \"\"\"\n T_sky_list = go.load_T_sky()\n\n # ensure l is in range 0 -> 360\n B = frbs.gb\n L = np.copy(frbs.gl)\n L[L < 0.] += 360\n\n # convert from l and b to list indices\n j = B + 90.5\n j[j > 179] = 179\n\n nl = L - 0.5\n nl[L < 0.5] = 359\n i = nl / 4.\n\n index = 180*i.astype(int) + j.astype(int)\n T_sky_haslam = np.take(T_sky_list, index)\n\n # scale temperature\n # Assuming dominated by syncrotron radiation\n T_sky = T_sky_haslam * (self.central_freq/408.0)**(-2.6)\n\n return T_sky\n\n def calc_s_peak(self, frbs, f_low=10e6, f_high=10e9):\n \"\"\"\n Calculate the mean spectral flux density.\n\n Following Lorimer et al, 2013, eq. 9., at the central frequency\n of the survey.\n\n Args:\n frbs (FRBs): FRBs\n f_low (float): Source emission lower frequency limit [Hz].\n f_high (float): Source emission higher frequency limit [Hz].\n\n Returns:\n array: Mean spectral flux density [Jy]\n\n \"\"\"\n # Limits observing bandwidth (as seen in rest frame source)\n f_1 = (self.central_freq - 0.5*self.bw)\n f_1 *= 1e6 # MHz -> Hz\n f_2 = (self.central_freq + 0.5*self.bw)\n f_2 *= 1e6 # MHz -> Hz\n\n # Spectral index\n sp = frbs.si + 1\n sm = frbs.si - 1\n\n # Convert distance in Gpc to metres\n dist = frbs.dist_co * 1e9 * 3.0856775814913673 * 1e16\n\n # Convert luminosity to Watts\n lum = frbs.lum_bol * 1e-7\n\n freq_frac = (f_2**sp - f_1**sp) / (f_2 - f_1)\n nom = lum * (1+frbs.z)**sm * freq_frac\n den = 4*np.pi*dist**2 * (f_high**sp - f_low**sp)\n s_peak = nom/den\n\n # Convert to Janskys\n s_peak *= 1e26\n\n # Add degradation factor due to pulse broadening (see Connor 2019)\n s_peak *= (frbs.w_arr / frbs.w_eff)\n\n return s_peak\n\n def calc_w_eff(self, frbs):\n \"\"\"Calculate effective pulse width [ms].\n\n From Narayan (1987, DOI: 10.1086/165442), and also Cordes & McLaughlin\n (2003, DOI: 10.1086/378231). For details see p. 30 of Emily Petroff's\n thesis (2016), found here: http://hdl.handle.net/1959.3/417307\n\n Args:\n frbs (FRBs): FRBs for which to calculate effective pulse width.\n\n Returns:\n array: Effective pulse width [ms]\n\n \"\"\"\n w_eff = np.sqrt(frbs.w_arr**2 +\n frbs.t_dm**2 +\n frbs.t_scat**2 +\n self.t_samp**2)\n return w_eff\n\n def calc_snr(self, frbs):\n \"\"\"\n Caculate the SNR of several frbs.\n\n Args:\n frbs (FRBs): FRBs of which to calculate the signal to noise\n\n Returns:\n array: Signal to noise ratio based on the radiometer\n equation for a single pulse.\n\n \"\"\"\n # Radiometer equation for single pulse (Dewey et al., 1984), but\n # adapted to allow for a degradation factor reducing the peak flux\n # as a pulse is stretched\n sp = frbs.s_peak\n snr = sp*self.gain*np.sqrt(self.n_pol*self.bw*frbs.w_arr*1e3)\n snr /= (frbs.T_sys * self.beta)\n return snr\n\n def calc_scint(self, frbs):\n \"\"\"\n Calculate scintillation effect on the signal to noise ratio.\n\n (Rather than adapting the flux, as the snr can change per survey\n attempt). Formulas based on 'Handbook of Pulsar Astronomy\" by Duncan\n Lorimer & Michael Kramer, section 4.2. Test this before applying - no\n rigorous testing has been applied to this.\n\n Args:\n frbs (FRBs): FRBs\n\n Returns:\n array: Signal to noise ratio modulation factors for scintillation\n\n \"\"\"\n # Calculate scattering\n if type(frbs.t_scat) is not np.ndarray:\n frbs.t_scat = self.calc_scat(frbs.dm)\n\n # Convert to seconds\n frbs.t_scat /= 1000.\n\n # Decorrelation bandwidth (eq. 4.39)\n decorr_bw = 1.16/(2*math.pi*frbs.t_scat)\n # Convert to MHz\n decorr_bw /= 1e6\n\n # Scintillation strength (eq. 4.33)\n u = np.sqrt(self.central_freq / decorr_bw)\n\n m = np.zeros_like(u)\n\n # Strong scintillation\n strong = (u < 1)\n m[strong] = np.sqrt(u[strong]**(5/3)) # (eq. 4.35)\n\n # Weak scintillation\n\n # Refractive scintillation (eq. 4.47)\n m_riss = u**-(1/3)\n\n # Taking the average kappa value\n kappa = 0.15\n\n t_diss, decorr_bw = go.ne2001_scint_time_bw(frbs.dist_co,\n frbs.gl,\n frbs.gb,\n self.central_freq)\n\n # Following Cordes and Lazio (1991) (eq. 4.43)\n n_t = np.ones_like(t_diss)\n n_t[~np.isnan(t_diss)] = 1 + kappa * self.t_obs / t_diss\n\n n_f = np.ones_like(decorr_bw)\n n_f[~np.isnan(decorr_bw)] = 1 + kappa * self.bw / decorr_bw\n\n # Diffractive scintillation (eq. 4.41)\n m_diss = 1 / np.sqrt(n_t * n_f)\n\n # (eq. 4.48)\n weak = (u >= 1)\n m[weak] = np.sqrt(m_diss**2 + m_riss**2 + m_diss*m_riss)\n\n # Distribute the scintillation according to gaussian distribution\n snr = np.random.normal(frbs.snr, m*frbs.snr)\n\n return snr\n\n def calc_fluence_limit(self, w_eff=None):\n \"\"\"Calculate the fluence limit.\n\n Read Keane, Petroff (2015) for more details on how this is calculated.\n\n Args:\n w_eff: Pulse width at which to calculate the fluence limit [ms].\n Default sets this to be at the maximum searched pulse width.\n\n Returns:\n float: Fluence limit for a maximum pulse width burst [Jy ms]\n\n \"\"\"\n if not w_eff:\n w_eff = self.max_w_eff\n\n # Line of constant S/N\n self.s_peak_limit = self.snr_limit*self.T_rec*self.beta\n self.s_peak_limit /= self.gain*math.sqrt(self.n_pol*self.bw*1e3)\n\n # Line of constant fluence\n self.fluence_limit = self.s_peak_limit / math.sqrt(w_eff)\n self.fluence_limit *= w_eff\n\n return self.fluence_limit\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"numpy.argsort",
"matplotlib.pyplot.xlabel",
"scipy.stats.binned_statistic",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.ones_like",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.cumsum",
"numpy.random.normal",
"numpy.log10",
"numpy.diff",
"scipy.signal.savgol_filter"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.argsort",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"pandas.read_csv",
"numpy.ones_like",
"numpy.take",
"numpy.sqrt",
"numpy.random.random",
"numpy.isnan",
"numpy.ones",
"numpy.sin",
"numpy.copy",
"numpy.random.normal",
"numpy.zeros_like",
"scipy.special.j1",
"numpy.load",
"numpy.exp",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
feature-engineer/glymur | [
"660b593ab7bfbb3036de5d15c3ecb43bef3bf919"
] | [
"tests/test_colour_specification_box.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nTest suite specifically targeting ICC profiles\n\"\"\"\n\n# Standard library imports ...\nfrom datetime import datetime\ntry:\n import importlib.resources as ir\nexcept ImportError: # pragma: no cover\n # before 3.7\n import importlib_resources as ir\nimport struct\nimport tempfile\nimport unittest\nimport warnings\n\n# Third party library imports\nimport numpy as np\n\n# Local imports\nimport glymur\nfrom glymur import Jp2k\nfrom glymur._iccprofile import _ICCProfile\nfrom glymur.jp2box import (\n ColourSpecificationBox, ContiguousCodestreamBox, FileTypeBox,\n ImageHeaderBox, JP2HeaderBox, JPEG2000SignatureBox, InvalidJp2kError\n)\nfrom glymur.core import SRGB\nfrom . import fixtures, data\n\n\nclass TestColourSpecificationBox(fixtures.TestCommon):\n \"\"\"Test suite for colr box instantiation.\"\"\"\n\n def setUp(self):\n super(TestColourSpecificationBox, self).setUp()\n\n j2k = Jp2k(self.j2kfile)\n codestream = j2k.get_codestream()\n height = codestream.segment[1].ysiz\n width = codestream.segment[1].xsiz\n num_components = len(codestream.segment[1].xrsiz)\n\n self.jp2b = JPEG2000SignatureBox()\n self.ftyp = FileTypeBox()\n self.jp2h = JP2HeaderBox()\n self.jp2c = ContiguousCodestreamBox()\n self.ihdr = ImageHeaderBox(height=height, width=width,\n num_components=num_components)\n\n self.icc_profile = ir.read_binary(data, 'sgray.icc')\n\n def test_bad_method_printing(self):\n \"\"\"\n SCENARIO: An ICC profile is both too short and has an invalid method\n value.\n\n EXPECTED RESULT: Warnings are issued. Printing the string\n representation should not error out.\n \"\"\"\n with ir.path(data, 'issue405.dat') as path:\n with path.open('rb') as f:\n f.seek(8)\n with warnings.catch_warnings():\n # Lots of things wrong with this file.\n warnings.simplefilter('ignore')\n box = ColourSpecificationBox.parse(f, length=80, offset=0)\n str(box)\n\n def test_colr_with_out_enum_cspace(self):\n \"\"\"must supply an enumerated colorspace when writing\"\"\"\n j2k = Jp2k(self.j2kfile)\n\n boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]\n boxes[2].box = [self.ihdr, ColourSpecificationBox(colorspace=None)]\n with open(self.temp_jp2_filename, mode='wb') as tfile:\n with self.assertRaises(InvalidJp2kError):\n j2k.wrap(tfile.name, boxes=boxes)\n\n def test_missing_colr_box(self):\n \"\"\"jp2h must have a colr box\"\"\"\n j2k = Jp2k(self.j2kfile)\n boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]\n boxes[2].box = [self.ihdr]\n with open(self.temp_jp2_filename, mode='wb') as tfile:\n with self.assertRaises(InvalidJp2kError):\n j2k.wrap(tfile.name, boxes=boxes)\n\n def test_bad_approx_jp2_field(self):\n \"\"\"JP2 has requirements for approx field\"\"\"\n j2k = Jp2k(self.j2kfile)\n boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]\n colr = ColourSpecificationBox(colorspace=SRGB, approximation=1)\n boxes[2].box = [self.ihdr, colr]\n with open(self.temp_jp2_filename, mode='wb') as tfile:\n with self.assertRaises(InvalidJp2kError):\n j2k.wrap(tfile.name, boxes=boxes)\n\n def test_default_colr(self):\n \"\"\"basic colr instantiation\"\"\"\n colr = ColourSpecificationBox(colorspace=SRGB)\n self.assertEqual(colr.method, glymur.core.ENUMERATED_COLORSPACE)\n self.assertEqual(colr.precedence, 0)\n self.assertEqual(colr.approximation, 0)\n self.assertEqual(colr.colorspace, SRGB)\n self.assertIsNone(colr.icc_profile)\n\n def test_icc_profile(self):\n \"\"\"basic colr box with ICC profile\"\"\"\n colr = ColourSpecificationBox(icc_profile=self.icc_profile)\n self.assertEqual(colr.method, glymur.core.ENUMERATED_COLORSPACE)\n self.assertEqual(colr.precedence, 0)\n self.assertEqual(colr.approximation, 0)\n\n icc_profile = _ICCProfile(colr.icc_profile)\n self.assertEqual(icc_profile.header['Version'], '2.1.0')\n self.assertEqual(icc_profile.header['Color Space'], 'gray')\n self.assertIsNone(icc_profile.header['Datetime'])\n\n # Only True for version4\n self.assertFalse('Profile Id' in icc_profile.header.keys())\n\n def test_colr_with_bad_color(self):\n \"\"\"\n SCENARIO: A colr box has an invalid colorspace.\n\n EXPECTED RESULT: An InvalidJp2kError is raised when attempting to\n write the box.\n \"\"\"\n with self.assertWarns(UserWarning):\n # A warning is issued due to the bad colorspace.\n colr = ColourSpecificationBox(colorspace=-1, approximation=0)\n\n with tempfile.TemporaryFile() as tfile:\n with self.assertRaises(InvalidJp2kError):\n colr.write(tfile)\n\n def test_write_colr_with_bad_method(self):\n \"\"\"\n SCENARIO: A colr box has an invalid method value.\n\n EXPECTED RESULT: InvalidJp2kError\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n colr = ColourSpecificationBox(colorspace=SRGB, method=5)\n with tempfile.TemporaryFile() as tfile:\n with self.assertRaises(InvalidJp2kError):\n colr.write(tfile)\n\n\nclass TestSuite(unittest.TestCase):\n \"\"\"Test suite for ICC Profile code.\"\"\"\n\n def setUp(self):\n self.buffer = ir.read_binary(data, 'sgray.icc')\n\n def test_bad_rendering_intent(self):\n \"\"\"\n The rendering intent is not in the range 0-4.\n\n It should be classified as 'unknown'\n \"\"\"\n intent = struct.pack('>I', 10)\n self.buffer = self.buffer[:64] + intent + self.buffer[68:]\n\n icc_profile = _ICCProfile(self.buffer)\n self.assertEqual(icc_profile.header['Rendering Intent'], 'unknown')\n\n def test_version4(self):\n \"\"\"\n ICC profile is version 4\n \"\"\"\n leadoff = struct.pack('>IIBB', 416, 0, 4, 0)\n self.buffer = leadoff + self.buffer[10:]\n\n icc_profile = _ICCProfile(self.buffer)\n self.assertEqual(icc_profile.header['Version'], '4.0.0')\n self.assertTrue('Profile Id' in icc_profile.header.keys())\n\n def test_icc_profile(self):\n \"\"\"\n SCENARIO: The ColourDefinitionBox has an ICC profile.\n\n EXPECTED RESULT: Verify the ICC profile metadata.\n \"\"\"\n with ir.path(data, 'text_GBR.jp2') as path:\n with self.assertWarns(UserWarning):\n # The brand is wrong, this is JPX, not JP2.\n j = Jp2k(path)\n box = j.box[3].box[1]\n\n self.assertEqual(box.icc_profile_header['Size'], 1328)\n self.assertEqual(box.icc_profile_header['Color Space'], 'RGB')\n self.assertEqual(box.icc_profile_header['Connection Space'], 'XYZ')\n self.assertEqual(box.icc_profile_header['Datetime'],\n datetime(2009, 2, 25, 11, 26, 11))\n self.assertEqual(box.icc_profile_header['File Signature'], 'acsp')\n self.assertEqual(box.icc_profile_header['Platform'], 'APPL')\n self.assertEqual(box.icc_profile_header['Flags'],\n 'not embedded, can be used independently')\n self.assertEqual(box.icc_profile_header['Device Manufacturer'], 'appl')\n self.assertEqual(box.icc_profile_header['Device Model'], '')\n self.assertEqual(box.icc_profile_header['Device Attributes'],\n ('reflective, glossy, positive media polarity, '\n 'color media'))\n self.assertEqual(box.icc_profile_header['Rendering Intent'],\n 'perceptual')\n np.testing.assert_almost_equal(box.icc_profile_header['Illuminant'],\n np.array([0.9642023, 1.0, 0.824905]),\n decimal=6)\n self.assertEqual(box.icc_profile_header['Creator'], 'appl')\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LiRunyi2001/cnSoftBei | [
"72b90033ade1e926d3fb23621f5c67fa8eec9bb4",
"72b90033ade1e926d3fb23621f5c67fa8eec9bb4"
] | [
"roberta/scripts/convert_bert_text_classification_from_huggingface_to_uer.py",
"roberta/finetune/run_chid.py"
] | [
"import sys\nimport os\nimport torch\nimport argparse\nimport collections\n\nuer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, uer_dir)\n\nfrom scripts.convert_bert_from_huggingface_to_uer import convert_bert_transformer_encoder_from_huggingface_to_uer\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"--input_model_path\", type=str, default=\"huggingface_model.bin\",\n help=\".\")\nparser.add_argument(\"--output_model_path\", type=str, default=\"pytorch_model.bin\",\n help=\".\")\nparser.add_argument(\"--layers_num\", type=int, default=12, help=\".\")\n\nargs = parser.parse_args()\npath = args.input_model_path\n\ninput_model = torch.load(args.input_model_path, map_location='cpu')\n\noutput_model = collections.OrderedDict()\n\noutput_model[\"embedding.word_embedding.weight\"] = input_model[\"bert.embeddings.word_embeddings.weight\"]\noutput_model[\"embedding.position_embedding.weight\"] = input_model[\"bert.embeddings.position_embeddings.weight\"]\noutput_model[\"embedding.segment_embedding.weight\"] = torch.cat((torch.Tensor([[0]*input_model[\"bert.embeddings.token_type_embeddings.weight\"].size()[1]]), input_model[\"bert.embeddings.token_type_embeddings.weight\"]), dim=0)\noutput_model[\"embedding.layer_norm.gamma\"] = input_model[\"bert.embeddings.LayerNorm.weight\"]\noutput_model[\"embedding.layer_norm.beta\"] = input_model[\"bert.embeddings.LayerNorm.bias\"]\n\nconvert_bert_transformer_encoder_from_huggingface_to_uer(input_model, output_model, args.layers_num)\n\noutput_model[\"output_layer_1.weight\"] = input_model[\"bert.pooler.dense.weight\"]\noutput_model[\"output_layer_1.bias\"] = input_model[\"bert.pooler.dense.bias\"]\noutput_model[\"output_layer_2.weight\"] = input_model[\"classifier.weight\"]\noutput_model[\"output_layer_2.bias\"] = input_model[\"classifier.bias\"]\n\ntorch.save(output_model, args.output_model_path)\n",
"\"\"\"\nThis script provides an example to wrap UER-py for ChID (a multiple choice dataset).\n\"\"\"\nimport sys\nimport os\nimport argparse\nimport json\nimport random\nimport torch\n\nuer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.append(uer_dir)\n\nfrom uer.layers import *\nfrom uer.encoders import *\nfrom uer.utils.constants import *\nfrom uer.utils.tokenizers import *\nfrom uer.utils.optimizers import *\nfrom uer.utils.config import load_hyperparam\nfrom uer.utils.seed import set_seed\nfrom uer.model_saver import save_model\nfrom uer.opts import finetune_opts\nfrom finetune.run_c3 import MultipleChoice\nfrom finetune.run_classifier import build_optimizer, load_or_initialize_parameters, train_model, batch_loader, evaluate\n\n\ndef tokenize_chid(text):\n output = []\n first_idiom = True\n while True:\n if first_idiom:\n idiom_index = text.find(\"#idiom\")\n output.extend(text[:idiom_index])\n output.append(text[idiom_index : idiom_index + 13])\n pre_idiom_index = idiom_index\n first_idiom = False\n else:\n if text[idiom_index + 1 :].find(\"#idiom\") == -1:\n output.extend(text[pre_idiom_index + 13 :])\n break\n else:\n idiom_index = idiom_index + 1 + text[idiom_index + 1 :].find(\"#idiom\")\n output.extend(text[pre_idiom_index + 13 : idiom_index])\n output.append(text[idiom_index : idiom_index + 13])\n pre_idiom_index = idiom_index\n\n return output\n\n\ndef add_tokens_around(tokens, idiom_index, tokens_num):\n left_tokens_num = tokens_num // 2\n right_tokens_num = tokens_num - left_tokens_num\n\n if idiom_index >= left_tokens_num and (len(tokens) - 1 - idiom_index) >= right_tokens_num:\n left_tokens = tokens[idiom_index - left_tokens_num : idiom_index]\n right_tokens = tokens[idiom_index + 1 : idiom_index + 1 + right_tokens_num]\n elif idiom_index < left_tokens_num:\n left_tokens = tokens[:idiom_index]\n right_tokens = tokens[idiom_index + 1 : idiom_index + 1 + tokens_num - len(left_tokens)]\n elif (len(tokens) - 1 - idiom_index) < right_tokens_num:\n right_tokens = tokens[idiom_index + 1 :]\n left_tokens = tokens[idiom_index - (tokens_num - len(right_tokens)) : idiom_index]\n\n return left_tokens, right_tokens\n\n\ndef read_dataset(args, data_path, answer_path):\n if answer_path is not None:\n answers = json.load(open(answer_path))\n dataset = []\n max_tokens_for_doc = args.seq_length - 3\n group_index = 0\n\n for line in open(data_path, mode=\"r\", encoding=\"utf-8\"):\n example = json.loads(line)\n options = example[\"candidates\"]\n for context in example[\"content\"]:\n chid_tokens = tokenize_chid(context)\n tags = [token for token in chid_tokens if \"#idiom\" in token]\n for tag in tags:\n if answer_path is not None:\n tgt = answers[tag]\n else:\n tgt = -1\n tokens = []\n for i, token in enumerate(chid_tokens):\n if \"#idiom\" in token:\n sub_tokens = [str(token)]\n else:\n sub_tokens = args.tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tokens.append(sub_token)\n idiom_index = tokens.index(tag)\n left_tokens, right_tokens = add_tokens_around(tokens, idiom_index, max_tokens_for_doc - 1)\n\n for i in range(len(left_tokens)):\n if \"#idiom\" in left_tokens[i] and left_tokens[i] != tag:\n left_tokens[i] = \"[MASK]\"\n for i in range(len(right_tokens)):\n if \"#idiom\" in right_tokens[i] and right_tokens[i] != tag:\n right_tokens[i] = \"[MASK]\"\n\n dataset.append(([], tgt, [], tag, group_index))\n\n for option in options:\n option_tokens = args.tokenizer.tokenize(option)\n tokens = [\"[CLS]\"] + option_tokens + [\"[SEP]\"] + left_tokens + [\"[unused1]\"] + right_tokens + [\"[SEP]\"]\n\n src = args.tokenizer.convert_tokens_to_ids(tokens)[: args.seq_length]\n seg = [0] * len(src)\n\n while len(src) < args.seq_length:\n src.append(0)\n seg.append(0)\n\n dataset[-1][0].append(src)\n dataset[-1][2].append(seg)\n\n while len(dataset[-1][0]) < args.max_choices_num:\n dataset[-1][0].append([0] * args.seq_length)\n dataset[-1][2].append([0] * args.seq_length)\n group_index += 1\n\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n finetune_opts(parser)\n\n parser.add_argument(\"--train_answer_path\", type=str, required=True,\n help=\"Path of the answers for trainset.\")\n parser.add_argument(\"--dev_answer_path\", type=str, required=True,\n help=\"Path of the answers for devset.\")\n\n parser.add_argument(\"--max_choices_num\", default=10, type=int,\n help=\"The maximum number of cadicate answer, shorter than this will be padded.\")\n\n args = parser.parse_args()\n\n args.labels_num = args.max_choices_num\n\n # Load the hyperparameters from the config file.\n args = load_hyperparam(args)\n\n set_seed(args.seed)\n\n # Build tokenizer.\n args.tokenizer = CharTokenizer(args)\n\n # Build multiple choice model.\n model = MultipleChoice(args)\n\n # Load or initialize parameters.\n load_or_initialize_parameters(args, model)\n\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(args.device)\n\n # Training phase.\n trainset = read_dataset(args, args.train_path, args.train_answer_path)\n random.shuffle(trainset)\n instances_num = len(trainset)\n batch_size = args.batch_size\n\n src = torch.LongTensor([example[0] for example in trainset])\n tgt = torch.LongTensor([example[1] for example in trainset])\n seg = torch.LongTensor([example[2] for example in trainset])\n\n args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1\n\n print(\"Batch size: \", batch_size)\n print(\"The number of training instances:\", instances_num)\n\n optimizer, scheduler = build_optimizer(args, model)\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n args.amp = amp\n\n if torch.cuda.device_count() > 1:\n print(\"{} GPUs are available. Let's use them.\".format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n args.model = model\n\n total_loss, result, best_result = 0.0, 0.0, 0.0\n\n print(\"Start training.\")\n\n for epoch in range(1, args.epochs_num + 1):\n model.train()\n for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):\n\n loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)\n total_loss += loss.item()\n\n if (i + 1) % args.report_steps == 0:\n print(\"Epoch id: {}, Training steps: {}, Avg loss: {:.3f}\".format(epoch, i + 1, total_loss / args.report_steps))\n total_loss = 0.0\n\n result = evaluate(args, read_dataset(args, args.dev_path, args.dev_answer_path))\n if result[0] > best_result:\n best_result = result[0]\n save_model(model, args.output_model_path)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.save",
"torch.load"
],
[
"torch.cuda.device_count",
"torch.LongTensor",
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zjplab/Pedestron | [
"07e1a2cee82b57e1584b0c744f5b44f1ae92be73",
"07e1a2cee82b57e1584b0c744f5b44f1ae92be73"
] | [
"mmdet/models/bbox_heads/mgan_head.py",
"mmdet/models/losses/focal_loss.py"
] | [
"import torch.nn as nn\n\nfrom ..registry import HEADS\nfrom ..utils import ConvModule\nfrom mmdetection.core import auto_fp16\n\n\[email protected]_module\nclass MGANHead(nn.Module):\n\n def __init__(self,\n num_convs=2,\n roi_feat_size=7,\n in_channels=512,\n conv_out_channels=512,\n conv_cfg=None,\n norm_cfg=None):\n super(MGANHead, self).__init__()\n self.num_convs = num_convs\n self.roi_feat_size = roi_feat_size\n self.in_channels = in_channels\n self.conv_out_channels = conv_out_channels\n\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n\n self.convs = nn.ModuleList()\n for i in range(self.num_convs):\n in_channels = (\n self.in_channels if i == 0 else self.conv_out_channels)\n self.convs.append(\n ConvModule(\n in_channels,\n self.conv_out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg))\n logits_in_channel = self.conv_out_channels\n self.conv_logits = nn.Conv2d(logits_in_channel, 1, 1)\n self.relu = nn.ReLU(inplace=True)\n self.debug_imgs = None\n\n @auto_fp16()\n def forward(self, x):\n for conv in self.convs:\n x = conv(x)\n x = self.conv_logits(x).sigmoid() * x\n return x\n\n\n",
"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdetection.ops import sigmoid_focal_loss as _sigmoid_focal_loss\nfrom .utils import weight_reduce_loss\nfrom ..registry import LOSSES\n\n\n# This method is only for debugging\ndef py_sigmoid_focal_loss(pred,\n target,\n weight=None,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n avg_factor=None):\n pred_sigmoid = pred.sigmoid()\n target = target.type_as(pred)\n pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)\n focal_weight = (alpha * target + (1 - alpha) *\n (1 - target)) * pt.pow(gamma)\n loss = F.binary_cross_entropy_with_logits(\n pred, target, reduction='none') * focal_weight\n loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n return loss\n\n\ndef sigmoid_focal_loss(pred,\n target,\n weight=None,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n avg_factor=None):\n # Function.apply does not accept keyword arguments, so the decorator\n # \"weighted_loss\" is not applicable\n loss = _sigmoid_focal_loss(pred, target, gamma, alpha)\n # TODO: find a proper way to handle the shape of weight\n if weight is not None:\n weight = weight.view(-1, 1)\n loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n return loss\n\n\[email protected]_module\nclass FocalLoss(nn.Module):\n\n def __init__(self,\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n loss_weight=1.0):\n super(FocalLoss, self).__init__()\n assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'\n self.use_sigmoid = use_sigmoid\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n pred,\n target,\n weight=None,\n avg_factor=None,\n reduction_override=None):\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if self.use_sigmoid:\n loss_cls = self.loss_weight * sigmoid_focal_loss(\n pred,\n target,\n weight,\n gamma=self.gamma,\n alpha=self.alpha,\n reduction=reduction,\n avg_factor=avg_factor)\n else:\n raise NotImplementedError\n return loss_cls\n"
] | [
[
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.ReLU"
],
[
"torch.nn.functional.binary_cross_entropy_with_logits"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bucaps/HotSpotMap | [
"655d83d98316acbf9328519850268f548ae44af4"
] | [
"HotSpotMap.py"
] | [
"# HotSpotMap: A python based temperature (thermal) map generation\n# tool for HotSpot-6.0 (http://lava.cs.virginia.edu/HotSpot/)\n# This tool uses python's turtle library\n#\n# Author: Gaurav Kothari ([email protected]) Copyright 2021\n#\n# This tool generates:\n# 1) Floor-plan image (using floor-plan file)\n# 2) Thermal map (using floor-plan file and steady temperature file)\n# 3) Fine grained thermal map (using floor-plan file and grid steady temperature file)\n#\n# Supports 2D and 3D stacked systems\n# Supports output formats: '.eps' and '.pdf'\nimport os\nimport time\nimport subprocess\nimport tkinter\nimport turtle\nimport tempfile\nimport numpy as np\nimport matplotlib\nfrom matplotlib import cm\nfrom matplotlib.colors import LinearSegmentedColormap\nimport argparse\nfrom sys import argv\n\n\n# To represent each floor-plan unit\nclass FloorplanUnit():\n def __init__(self, name, width, height, xpos, ypos, temp=0):\n self.name = name\n self.width = width\n self.height = height\n self.xpos = xpos\n self.ypos = ypos\n self.temp = temp # temperature\n\n\nmsg_prefix = \" HotSpotMap:\"\n\n# Home co-ordinates for drawing the chip floor-plan\n# Note: turtle's default home co-ordinates are (0,0)\n# For drawing the floor-plan, we will start from (-w/2,-h/2), where\n# w = width of the chip, h = height of the chip\nchip_home_xpos = 0\nchip_home_ypos = 0\n\n\n# Inspired from HotSpot 6.0\ndef get_chip_width(flp_units):\n min_x = flp_units[0].xpos\n max_x = flp_units[0].xpos + flp_units[0].width\n\n for i in range(1, len(flp_units)):\n if flp_units[i].xpos < min_x:\n min_x = flp_units[i].xpos\n if (flp_units[i].xpos + flp_units[i].width) > max_x:\n max_x = flp_units[i].xpos + flp_units[i].width\n\n return (max_x - min_x) * 1e3\n\n\n# Inspired from HotSpot 6.0\ndef get_chip_height(flp_units):\n min_y = flp_units[0].ypos\n max_y = flp_units[0].ypos + flp_units[0].height\n\n for i in range(1, len(flp_units)):\n if flp_units[i].ypos < min_y:\n min_y = flp_units[i].ypos\n if (flp_units[i].ypos + flp_units[i].height) > max_y:\n max_y = flp_units[i].ypos + flp_units[i].height\n\n return (max_y - min_y) * 1e3\n\n\ndef get_pos_from_chip_home(xpos, ypos):\n return (chip_home_xpos + xpos, chip_home_ypos + ypos)\n\n\n# Only for 3D systems, collect all the output files\n# (for every layer) to combine them later as a single PDF\noutput_3d_files = []\n\n\n#\n# Functions related to Turtle\n#\ndef turtle_setup(config):\n # setup screen\n ts = turtle.Screen()\n cw = (config.chip_width * 1e-3 * config.zoom_by)\n ch = (config.chip_height * 1e-3 * config.zoom_by)\n ts.reset()\n ts.colormode(255)\n ts.tracer(0, 0)\n global chip_home_xpos\n chip_home_xpos = -(cw / 2)\n global chip_home_ypos\n chip_home_ypos = -(ch / 2)\n\n # create turtle cursor\n t = turtle.Turtle()\n t.pen(shown=False)\n t.pensize(0.5)\n t.hideturtle()\n t.penup()\n t.setpos(chip_home_xpos, chip_home_ypos)\n return t\n\n\ndef turtle_save_image(config):\n ts = turtle.getscreen()\n eps_file = os.path.join(\n config.output_dir, \"{f}-{a}.eps\".format(f=config.output_file,\n a=config.action))\n pdf_file = os.path.join(\n config.output_dir, \"{f}-{a}.pdf\".format(f=config.output_file,\n a=config.action))\n\n canvas = ts.getcanvas()\n canvas.config(width=config.chip_width * 1e-3 * config.zoom_by,\n height=config.chip_height * 1e-3 * config.zoom_by)\n canvas.postscript(file=eps_file)\n print(\"{p} Generated eps file: {f}\".format(p=msg_prefix, f=eps_file))\n cmd = \"ps2pdf {i} {o}\".format(i=eps_file, o=pdf_file)\n process = subprocess.Popen(cmd, shell=True)\n process.wait()\n print(\"{p} Generated pdf file: {f}\".format(p=msg_prefix, f=pdf_file))\n\n if config.model_3d:\n output_3d_files.append(pdf_file)\n\n\ndef turtle_draw_unit(t,\n xpos,\n ypos,\n width,\n height,\n config,\n name,\n border_color=\"\",\n fill_color=\"\",\n hide_names=True):\n xpos *= config.zoom_by\n ypos *= config.zoom_by\n pos = get_pos_from_chip_home(xpos, ypos)\n xpos = pos[0]\n ypos = pos[1]\n width *= config.zoom_by\n height *= config.zoom_by\n t.penup()\n t.setpos(xpos, ypos)\n t.color(border_color, fill_color)\n if fill_color:\n t.begin_fill()\n t.pendown()\n t.forward(width)\n t.left(90)\n t.forward(height)\n t.left(90)\n t.forward(width)\n t.left(90)\n t.forward(height)\n t.left(90)\n if fill_color:\n t.end_fill()\n t.penup()\n\n if name and (hide_names == False):\n t.setpos(xpos + (width / 2), ypos + (height / 2))\n t.pendown()\n t.color(\"black\")\n print_name = name\n if config.print_area:\n area = (width / config.zoom_by) * (height /\n config.zoom_by) * 1e6 # mm2\n area = round(area, 3)\n print_name += \" ({a})\".format(a=area)\n t.write(print_name,\n align=\"center\",\n font=(config.font, config.font_size, config.font_weight))\n t.penup()\n\n\ndef draw_chip_dimensions(t, config):\n # draw height scale on left of the floor-plan\n arrow_height = 15\n xpos = -30\n ypos = 0\n t.penup()\n t.color(\"black\")\n t.setpos(get_pos_from_chip_home(xpos, ypos))\n t.left(90)\n t.pendown()\n t.forward(config.chip_height * 1e-3 * config.zoom_by)\n temp = t.pos()\n t.left(135)\n t.forward(arrow_height)\n t.setpos(temp)\n t.right(270)\n t.forward(arrow_height)\n t.penup()\n t.setpos(get_pos_from_chip_home(xpos, ypos))\n t.pendown()\n t.left(90)\n t.forward(arrow_height)\n t.penup()\n t.setpos(get_pos_from_chip_home(xpos, ypos))\n t.right(270)\n t.pendown()\n t.forward(arrow_height)\n t.right(135) # reset\n t.penup()\n\n canvas = turtle.getcanvas()\n xpos = -45\n ypos = (config.chip_height * 1e-3 * config.zoom_by) / 2\n pos = get_pos_from_chip_home(xpos, ypos)\n canvas.create_text(pos[0],\n pos[1],\n text=\"Height {h} mm\".format(h=config.chip_height),\n angle=90,\n font=(config.font, config.font_size,\n config.font_weight))\n\n # draw width scale on top of the floor-plan\n xpos = 0\n ypos = (config.chip_height * 1e-3 * config.zoom_by) + 30\n t.penup()\n t.setpos(get_pos_from_chip_home(xpos, ypos))\n t.pendown()\n t.forward(config.chip_width * 1e-3 * config.zoom_by)\n temp = t.pos()\n t.left(135)\n t.forward(arrow_height)\n t.setpos(temp)\n t.right(270)\n t.forward(arrow_height)\n t.penup()\n t.setpos(get_pos_from_chip_home(xpos, ypos))\n t.pendown()\n t.left(90)\n t.forward(arrow_height)\n t.penup()\n t.setpos(get_pos_from_chip_home(xpos, ypos))\n t.right(270)\n t.pendown()\n t.forward(arrow_height)\n t.penup()\n\n canvas = turtle.getcanvas()\n xpos = (config.chip_width * 1e-3 * config.zoom_by) / 2\n ypos = -45\n pos = get_pos_from_chip_home(xpos, ypos)\n canvas.create_text(pos[0],\n pos[1],\n text=\"Width {w} mm\".format(w=config.chip_width),\n angle=0,\n font=(config.font, config.font_size,\n config.font_weight))\n\n\n#\n# Function related to temperature color bar\n#\n\n# Colors used for temperature map\ncolors = [\n \"#ff0000\",\n \"#ff3300\",\n \"#ff6600\",\n \"#ff9900\",\n \"#ffcc00\",\n \"#ffff00\",\n \"#ccff00\",\n \"#99ff00\",\n \"#66ff00\",\n \"#33ff00\",\n \"#00ff00\",\n \"#00ff33\",\n \"#00ff66\",\n \"#00ff99\",\n \"#00ffcc\",\n \"#00ffff\",\n \"#00ccff\",\n \"#0099ff\",\n \"#0066ff\",\n \"#0033ff\",\n \"#0000ff\",\n]\n\n\n# Color map for temperatures\ndef get_chip_temp_cmap():\n global colors\n colors.reverse()\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n \"chipTemp\", colors)\n return cmap\n\n\ndef draw_color_bar(t, config, colors, temp_min, temp_max):\n xpos = ((config.chip_width + 0.05) * 1e-3)\n ypos = 0\n color_bar_max_height = config.chip_height * 1e-3\n color_cell_width = color_bar_max_height / len(colors)\n color_cell_height = color_cell_width\n\n temp_cell_width = color_cell_width * 3\n temp_cell_height = color_cell_height\n\n interval = len(colors)\n temp_values = np.linspace(temp_min,\n temp_max,\n num=int(interval),\n endpoint=True)\n temp_values = [round(val, 2) for val in temp_values]\n\n i = 0\n for color in colors:\n # draw the temperature value\n turtle_draw_unit(t,\n xpos,\n ypos,\n temp_cell_width,\n temp_cell_height,\n config,\n name=\"{f}K\".format(f=temp_values[i]),\n border_color=\"\",\n fill_color=\"\",\n hide_names=False)\n # color cell\n turtle_draw_unit(t,\n xpos + temp_cell_width,\n ypos,\n color_cell_width,\n color_cell_height,\n config,\n name=\"\",\n border_color=\"black\",\n fill_color=color)\n ypos += color_cell_height\n i += 1\n\n\n#\n# Functions related to drawing chip floor-plan\n#\n\n\n# Checks if floor-plan has duplicated units\ndef check_duplicated_flp_units(flp_units_names):\n flp_units_namesSet = set(flp_units_names)\n\n if len(flp_units_namesSet) != len(flp_units_names):\n print(\"{p} warning! duplicated floor-plan units detected\".format(\n p=msg_prefix))\n\n\ndef draw_floorplan(config, t):\n start = time.time()\n file = open(config.floor_plan, \"r\")\n flp = file.readlines()\n flp_units = []\n flp_units_names = []\n\n for line in flp:\n if \"#\" in line or line == \"\\n\" or not line:\n continue\n line = line.split(\"\\t\")\n flp_units_names.append(line[0])\n flp_units.append(\n FloorplanUnit(line[0], float(line[1]), float(line[2]),\n float(line[3]), float(line[4])))\n\n check_duplicated_flp_units(flp_units_names)\n\n print(\"{p} Drawing floor-plan\".format(p=msg_prefix))\n print(\n \"{p} Reading floor-plan file {f}: found {u} units, {w} mm chip-width, {h} mm chip-height\"\n .format(f=config.floor_plan,\n p=msg_prefix,\n u=len(flp_units),\n w=config.chip_width,\n h=config.chip_height))\n\n file.close()\n\n for unit in flp_units:\n turtle_draw_unit(turtle,\n unit.xpos,\n unit.ypos,\n unit.width,\n unit.height,\n config,\n name=unit.name,\n border_color=\"black\",\n fill_color=\"\",\n hide_names=config.hide_names)\n\n end = time.time()\n print(\"{p} Finished drawing floor-plan in {t} seconds\".format(\n p=msg_prefix, t=round((end - start), 2)))\n\n\n#\n# Functions related to draw the temperature maps\n#\n\n\n# This parses the given temperature file and extracts\n# min and max temperatures (for steady and grid steady file)\ndef get_temperature_file_config(temperature_file, grid_steady_file_3d=\"\"):\n file = open(temperature_file, \"r\")\n lines = file.readlines()\n\n temperatures = []\n for line in lines:\n if line == \"\\n\" or not line:\n continue\n line = line.split(\"\\t\")\n if len(line) == 1:\n continue # for 3D grid steady file, skip layer header\n temperatures.append(float(line[1]))\n\n file.close()\n\n grid_steady_config = []\n grid_steady_config.append(str(min(temperatures)))\n grid_steady_config.append(str(max(temperatures)))\n return grid_steady_config\n\n\ndef draw_grid_steady_thermal_map(config, turtle, grid_steady_file_3d=\"\"):\n start = time.time()\n\n temperature_limit_file = config.temperature_file\n\n if config.model_3d:\n # for 3D systems, use the original grid-steady file containing\n # the temperature data for all the layers to extract min and max\n # temperatures, because all the layers must use the same color range\n temperature_limit_file = grid_steady_file_3d\n\n # find min and max temperatures reported in grid steady file\n grid_steady_config = get_temperature_file_config(temperature_limit_file)\n\n rows = config.grid_rows\n cols = config.grid_cols\n temp_min = float(grid_steady_config[0])\n temp_max = float(grid_steady_config[1])\n print(\n \"{p} Reading grid steady file {f}, with {r} rows, {c} cols, {min} min-temp, {max} max-temp\"\n .format(p=msg_prefix,\n f=config.temperature_file,\n r=rows,\n c=cols,\n min=temp_min,\n max=temp_max))\n\n # normalize temperature range between 0 and 1, which will be used to fetch color from color map\n norm_temp_range = matplotlib.colors.Normalize(vmin=temp_min, vmax=temp_max)\n\n # generate color map\n cmap = get_chip_temp_cmap()\n\n global colors\n draw_color_bar(turtle, config, colors, temp_min, temp_max)\n\n grid_cell_width = (config.chip_width * 1e-3) / cols\n grid_cell_height = (config.chip_height * 1e-3) / rows\n\n file = open(config.temperature_file, \"r\")\n lines = file.readlines()\n\n xpos = 0\n ypos = (config.chip_height * 1e-3) - grid_cell_height\n print(\"{p} Drawing temperature grid\".format(p=msg_prefix))\n\n next_col = 0\n for line in lines:\n if line == \"\\n\" or not line:\n continue\n else:\n line = line.split(\"\\t\")\n col = line[0] # column number\n temp = float(\n line[1]) # temperature of the cell at current row and column\n\n color = matplotlib.colors.rgb2hex(cmap(norm_temp_range(temp)))\n turtle_draw_unit(turtle,\n xpos,\n ypos,\n grid_cell_width,\n grid_cell_height,\n config,\n name=\"\",\n border_color=color,\n fill_color=color)\n xpos += grid_cell_width\n next_col += 1\n\n if next_col == config.grid_cols:\n # one complete row is finished\n xpos = 0\n next_col = 0\n ypos -= grid_cell_height\n\n file.close()\n end = time.time()\n print(\"{p} Finished drawing temperature grid in {t} seconds\".format(\n p=msg_prefix, t=round((end - start), 2)))\n\n\ndef draw_steady_thermal_map(config, turtle):\n start = time.time()\n # find min and max temperatures reported in steady file\n steady_config = get_temperature_file_config(config.temperature_file)\n\n temp_min = float(steady_config[0])\n temp_max = float(steady_config[1])\n print(\"{p} Reading steady file {f}, found {min} min-temp, {max} max-temp\".\n format(p=msg_prefix,\n f=config.temperature_file,\n min=temp_min,\n max=temp_max))\n\n # normalize temperature range between 0 and 1, which will be used to fetch color from color map\n norm_temp_range = matplotlib.colors.Normalize(vmin=temp_min, vmax=temp_max)\n\n # generate color map\n cmap = get_chip_temp_cmap()\n\n draw_color_bar(turtle, config, colors, temp_min, temp_max)\n\n # read all the floor-plan units\n file = open(config.floor_plan, \"r\")\n flp = file.readlines()\n flp_units = []\n\n for line in flp:\n if \"#\" in line or line == \"\\n\":\n continue\n line = line.split(\"\\t\")\n flp_units.append(\n FloorplanUnit(line[0], float(line[1]), float(line[2]),\n float(line[3]), float(line[4])))\n\n file.close()\n\n file = open(config.temperature_file, \"r\")\n lines = file.readlines()\n\n for line in lines:\n line = line.split(\"\\t\")\n name = line[0]\n temp = float(line[1])\n\n # for 3D steady temperature file, each unit is appended with prefix layer_<layer>_\n # we need to remove that prefix\n if config.model_3d and \"layer_\" in name:\n name = name[name.find(\"_\") + 1:]\n name = name[name.find(\"_\") + 1:]\n\n for unit in flp_units:\n if unit.name == name:\n color = matplotlib.colors.rgb2hex(cmap(norm_temp_range(temp)))\n turtle_draw_unit(turtle,\n unit.xpos,\n unit.ypos,\n unit.width,\n unit.height,\n config,\n name=unit.name,\n border_color=\"black\",\n fill_color=color,\n hide_names=config.hide_names)\n\n file.close()\n end = time.time()\n print(\"{p} Finished steady temperature map in {t} seconds\".format(\n p=msg_prefix, t=round((end - start), 2)))\n\n\n#\n# Function related to parse file for 3D system (such as LCF and grid-steady file)\n#\n\n\n# Parse HotSpot's layer configuration file (lcf) for 3D systems\n# For 3D systems, config.floor_plan is the lCF\ndef read_lcf(config):\n file = open(config.floor_plan, \"r\")\n lines = file.readlines()\n\n config_lines = [\n ] # To store lcf after removing all the comments and blank lines\n\n for line in lines:\n if \"#\" in line or not line or line == \"\\n\":\n continue\n config_lines.append(line)\n\n file.close()\n\n layer_num_pos = 0 # pos of layer number for the corresponding layer\n has_power_pos = 2 # pos of power dissipation flag for the corresponding layer\n floor_plan_file_pos = 6 # pos of floor plan file for the corresponding layer\n\n current_line = 0\n current_layer = []\n\n lcf_home_dir = os.path.dirname(config.floor_plan)\n lcf_breakdown_list = []\n\n while current_line < len(config_lines):\n if current_line and ((current_line % 7) == 0):\n temp = []\n temp.append(current_layer[layer_num_pos].rstrip())\n temp.append(current_layer[has_power_pos].rstrip())\n temp.append(\n os.path.join(lcf_home_dir,\n current_layer[floor_plan_file_pos].rstrip()))\n lcf_breakdown_list.append(temp)\n current_layer.clear()\n\n current_layer.append(config_lines[current_line])\n current_line += 1\n\n print(\"{p} Finished reading lcf file: {f}, found {flp} floor-plan files\".\n format(p=msg_prefix,\n f=config.floor_plan,\n flp=len(lcf_breakdown_list)))\n\n return lcf_breakdown_list\n\n\ndef extract_grid_temperatures_for_layer(config, temperature_file, layer):\n file = open(temperature_file, \"r\")\n lines = file.readlines()\n file.close()\n\n # remove all the empty lines\n cleaned_lines = []\n for line in lines:\n if line == \"\\n\" or not line:\n continue\n cleaned_lines.append(line)\n\n line_num = 0\n look_for_layer = \"layer_{l}\".format(l=layer)\n\n while cleaned_lines[line_num].rstrip() != look_for_layer:\n line_num += 1\n\n print(\n \"{p} Grid temperature data for layer {l} starts at line {n} in file: {f}\"\n .format(p=msg_prefix, l=layer, n=line_num, f=temperature_file))\n\n # grid temperatures for current layer start at line_num\n line_num += 1 # skip the header line for this layer\n file = open(\"temp.grid.steady\", \"w\")\n\n # we will read grid_rows x grid_cols line from this line onwards\n lines_read = line_num\n lines_to_read = line_num + (config.grid_rows * config.grid_cols)\n\n while lines_read < lines_to_read:\n current_line = cleaned_lines[lines_read]\n file.write(\"{l}\\n\".format(l=current_line.rstrip()))\n lines_read += 1\n\n file.close()\n\n\n# For 2D systems\ndef main_2d(config):\n turtle = turtle_setup(config)\n if config.action == \"flp\":\n draw_floorplan(config, turtle)\n else:\n if config.action == \"grid-steady\":\n draw_grid_steady_thermal_map(config, turtle)\n draw_floorplan(\n config, turtle\n ) # This will superimpose floor-plan onto temperature grid\n else:\n draw_steady_thermal_map(config, turtle)\n\n if config.print_chip_dim:\n draw_chip_dimensions(turtle, config)\n turtle_save_image(config)\n\n\n# For 3D stacked systems\ndef main_3d(config):\n lcf_breakdown_list = read_lcf(config)\n\n output_file_bkp = config.output_file\n temperature_file_bkp = config.temperature_file\n\n for lcf_layer in lcf_breakdown_list:\n layer = int(lcf_layer[0]) # layer number\n\n # override the config parameters\n config.floor_plan = lcf_layer[2]\n config.output_file = output_file_bkp\n config.output_file += \"-layer-{l}\".format(l=layer)\n\n turtle = turtle_setup(config)\n\n print(\"{s} Processing layer {l} with floor-plan: {f}\".format(\n s=msg_prefix, l=layer, f=config.floor_plan))\n\n if config.action == \"flp\":\n draw_floorplan(config, turtle)\n else:\n if config.action == \"grid-steady\":\n extract_grid_temperatures_for_layer(config,\n temperature_file_bkp,\n layer)\n\n # this file has extracted grid temperatures for current layer\n config.temperature_file = \"temp.grid.steady\"\n draw_grid_steady_thermal_map(config, turtle,\n temperature_file_bkp)\n draw_floorplan(\n config, turtle\n ) # this will superimpose floor-plan onto temperature grid\n os.remove(\"temp.grid.steady\")\n else:\n draw_steady_thermal_map(config, turtle)\n\n if config.print_chip_dim:\n draw_chip_dimensions(turtle, config)\n\n\n turtle_save_image(config)\n\n print(\"\")\n\n if config.concat:\n # this code block combines all the files\n # generated for each layer into a single PDF\n output_file_list_str = \"\"\n\n for file in output_3d_files:\n output_file_list_str += \"{f} \".format(f=file)\n\n final_concat_output = os.path.join(\n config.output_dir, \"{p}-{a}-concat.pdf\".format(p=output_file_bkp,a=config.action))\n\n pdfjam = \"pdfjam --nup {n}x1 --landscape {files} -o {output}\".format(\n n=len(output_3d_files),\n files=output_file_list_str,\n output=final_concat_output)\n\n print(\"{p} Executing {c}\".format(p=msg_prefix, c=pdfjam))\n process = subprocess.Popen(pdfjam, shell=True)\n process.wait()\n stdout, stderr = process.communicate()\n\n if stdout:\n print(stdout)\n\n if stderr:\n print(stderr)\n\n\ndef setup_chip_dimensions(config):\n floor_plan_file = config.floor_plan\n\n if config.model_3d:\n lcf_breakdown_list = read_lcf(config)\n # index 0 in lcf_breakdown_list is the 1st layer in 3D system\n # index 2 in 1st layer is the floor-plan file for that layer\n # for stacked 3D system, all layers must have equal dimensions, so pick any 1 layer\n floor_plan_file = lcf_breakdown_list[0][2]\n\n file = open(floor_plan_file, \"r\")\n flp = file.readlines()\n flp_units = []\n file.close()\n\n for line in flp:\n if \"#\" in line or line == \"\\n\" or not line:\n continue\n line = line.split(\"\\t\")\n flp_units.append(\n FloorplanUnit(line[0], float(line[1]), float(line[2]),\n float(line[3]), float(line[4])))\n\n config.chip_height = round(get_chip_height(flp_units), 5)\n config.chip_width = round(get_chip_width(flp_units), 5)\n\n print(\"{p} Calculated chip's width as {w} mm and chip's height as {h} mm\".\n format(p=msg_prefix, w=config.chip_width, h=config.chip_height))\n\n\ndef parse_command_line():\n version = 2.0\n description = \"A python based temperature (thermal) map generation tool for HotSpot-6.0 (http://lava.cs.virginia.edu/HotSpot/), Author: Gaurav Kothari ([email protected]) v{v}\".format(\n v=version)\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\"-a\",\n \"--action\",\n action=\"store\",\n dest=\"action\",\n required=True,\n choices=[\"flp\", \"steady\", \"grid-steady\"],\n help=\"Action type\")\n parser.add_argument(\"-3D\",\n \"--model-3D\",\n action=\"store_true\",\n dest=\"model_3d\",\n required=False,\n default=False,\n help=\"To indicate a 3D system\")\n parser.add_argument(\"-f\",\n \"--flp\",\n action=\"store\",\n dest=\"floor_plan\",\n required=True,\n help=\"Floor-plan file\")\n parser.add_argument(\n \"-t\",\n \"--temperature\",\n action=\"store\",\n dest=\"temperature_file\",\n required=(\"steady\" in argv) or (\"grid-steady\" in argv),\n help=\n \"Steady temperature file or Grid steady temperature file based on action\"\n )\n parser.add_argument(\"-r\",\n \"--row\",\n action=\"store\",\n dest=\"grid_rows\",\n type=int,\n required=(\"grid-steady\" in argv),\n help=\"Number of rows in grid-steady model\")\n parser.add_argument(\"-c\",\n \"--col\",\n action=\"store\",\n dest=\"grid_cols\",\n type=int,\n required=(\"grid-steady\" in argv),\n help=\"Number of columns in grid-steady model\")\n parser.add_argument(\"-ft\",\n \"--font\",\n action=\"store\",\n dest=\"font\",\n required=False,\n default=\"Ubuntu\",\n help=\"Font family\")\n parser.add_argument(\"-fts\",\n \"--font-size\",\n action=\"store\",\n dest=\"font_size\",\n required=False,\n default=9,\n type=int,\n help=\"Font size\")\n parser.add_argument(\"-ftw\",\n \"--font-weight\",\n action=\"store\",\n dest=\"font_weight\",\n required=False,\n default=\"normal\",\n help=\"Font weight\")\n parser.add_argument(\"-o\",\n \"--output-file\",\n action=\"store\",\n dest=\"output_file\",\n required=True,\n help=\"Output file name prefix\")\n parser.add_argument(\"-d\",\n \"--output-directory\",\n action=\"store\",\n dest=\"output_dir\",\n required=False,\n default=os.getcwd(),\n help=\"Output directory\")\n parser.add_argument(\"-hn\",\n \"--hide-names\",\n action=\"store_true\",\n dest=\"hide_names\",\n required=False,\n default=False,\n help=\"Hide names on floor-plan\")\n parser.add_argument(\"-z\",\n \"--zoom-by\",\n action=\"store\",\n dest=\"zoom_by\",\n type=int,\n required=False,\n default=75000,\n help=\"Zoom factor\")\n parser.add_argument(\"-pcd\",\n \"--print-chip-dim\",\n action=\"store_true\",\n dest=\"print_chip_dim\",\n required=False,\n default=False,\n help=\"Draw chip' width and height scale\")\n parser.add_argument(\"-concat\",\n \"--concat-3D\",\n action=\"store_true\",\n dest=\"concat\",\n required=False,\n default=False,\n help=\"Combines the images generated for all layer into a single PDF\")\n parser.add_argument(\n \"-pa\",\n \"--print-area\",\n action=\"store_true\",\n dest=\"print_area\",\n required=False,\n default=False,\n help=\n \"Print unit's area (mm2) alongside its name, rounded to three decimal places\"\n )\n args = parser.parse_args()\n print(\"{p} {d}\".format(p=msg_prefix, d=description))\n print(\"\")\n return args\n\n\ndef main():\n config = parse_command_line()\n\n # before we start drawing images, first quickly read floor-plan file\n # and calculate the chip's width and height\n setup_chip_dimensions(config)\n\n if config.model_3d:\n main_3d(config)\n else:\n main_2d(config)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.colors.Normalize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Xiaoying-Tian/selective-inference | [
"a20c5ad3f527beb709d5b8d7301016640738b092"
] | [
"selection/truncated/chi.py"
] | [
"\"\"\"\nThis module implements the class `truncated_chi2` which \nperforms (conditional) UMPU tests for Gaussians\nrestricted to a set of intervals.\n\n\"\"\"\nimport numpy as np\nimport mpmath as mp\nfrom scipy.stats import chi, chi2\n\nfrom .base import truncated, find_root\n\nclass truncated_chi(truncated):\n\n \"\"\"\n >>> from intervals import intervals\n >>> I = intervals.intersection(intervals((-1, 6)), \\\n intervals(( 0, 7)), \\\n ~intervals((1, 4)))\n >>> distr = trunc_chi(I, 3, 2.)\n >>> print distr.cdf(0)\n 0.0\n >>> z = distr.quantile(distr.cdf(5.))\n >>> np.abs(z - 5) < 1e-2\n True\n \"\"\"\n def __init__(self, I, k, scale = 1.):\n \"\"\"\n Create a new object for a truncated_chi distribution\n\n Parameters\n ----------\n I : intervals\n The intervals the distribution is truncated to\n\n k : int\n Number of degree of freedom of the distribution\n\n scale : float\n The distribution is \\sim scale * \\chi_k\n\n \n \"\"\"\n\n self._k = k\n self._scale = scale\n truncated.__init__(self, I)\n\n def _cdf_notTruncated(self, a, b, dps):\n \"\"\"\n Compute the probability of being in the interval (a, b)\n for a variable with a chi distribution (not truncated)\n \n Parameters\n ----------\n a, b : float\n Bounds of the interval. Can be infinite.\n\n dps : int\n Decimal precision (decimal places). Used in mpmath\n\n Returns\n -------\n p : float\n The probability of being in the intervals (a, b)\n P( a < X < b)\n for a non truncated variable\n\n \"\"\"\n scale = self._scale\n k = self._k\n\n dps_temp = mp.mp.dps\n mp.mp.dps = dps\n\n a = max(0, a)\n b = max(0, b)\n\n sf = mp.gammainc(1./2 * k, \n 1./2*((a/scale)**2), \n 1./2*((b/scale)**2), \n regularized=True)\n mp.mp.dps = dps_temp\n return sf\n\n def _pdf_notTruncated(self, z, dps):\n scale = self._scale\n k = self._k\n dps = self._dps\n\n return chi.pdf(z/scale, k)\n\n def _quantile_notTruncated(self, q, tol=1.e-6):\n \"\"\"\n Compute the quantile for the non truncated distribution\n\n Parameters\n ----------\n q : float\n quantile you want to compute. Between 0 and 1\n\n tol : float\n precision for the output\n\n Returns\n -------\n x : float\n x such that P(X < x) = q\n\n \"\"\"\n scale = self._scale\n k = self._k\n dps = self._dps\n \n z_approx = scale * chi.ppf(q, k)\n \n epsilon = scale * 0.001\n lb = z_approx - epsilon\n ub = z_approx + epsilon\n\n f = lambda z: self._cdf_notTruncated(-np.inf, z, dps)\n\n z = find_root(f, q, lb, ub, tol)\n\n return z \n \n\nclass truncated_chi2(truncated):\n\n \"\"\"\n\n >>> from intervals import intervals\n >>> I = intervals.intersection(intervals((-1, 6)), \\\n intervals(( 0, 7)), \\\n ~intervals((1, 4)))\n >>> distr = trunc_chi(I, 3, 2.)\n >>> print distr.cdf(0)\n 0.0\n >>> z = distr.quantile(distr.cdf(5.))\n >>> np.abs(z - 5) < 1e-2\n True\n \"\"\"\n def __init__(self, I, k, scale = 1.):\n \"\"\"\n Create a new object for a truncated_chi distribution\n\n Parameters\n ----------\n I : intervals\n The intervals the distribution is truncated to\n\n k : int\n Number of degree of freedom of the distribution\n\n scale : float\n The distribution is \\sim scale * \\chi_k\n\n \n \"\"\"\n\n self._k = k\n self._scale = scale\n truncated.__init__(self, I)\n\n def _cdf_notTruncated(self, a, b, dps):\n \"\"\"\n Compute the probability of being in the interval (a, b)\n for a variable with a chi distribution (not truncated)\n \n Parameters\n ----------\n a, b : float\n Bounds of the interval. Can be infinite.\n\n dps : int\n Decimal precision (decimal places). Used in mpmath\n\n Returns\n -------\n p : float\n The probability of being in the intervals (a, b)\n P( a < X < b)\n for a non truncated variable\n\n \"\"\"\n scale = self._scale\n k = self._k\n\n dps_temp = mp.mp.dps\n mp.mp.dps = dps\n\n a = max(0, a)\n b = max(0, b)\n\n cdf = mp.gammainc(1./2 * k, \n 1./2*(a/scale), \n 1./2*(b/scale), \n regularized=True)\n mp.mp.dps = dps_temp\n return cdf\n\n def _pdf_notTruncated(self, z, dps):\n scale = self._scale\n k = self._k\n dps = self._dps\n\n return chi2.pdf(z/scale, k)\n\n def _quantile_notTruncated(self, q, tol=1.e-6):\n \"\"\"\n Compute the quantile for the non truncated distribution\n\n Parameters\n ----------\n q : float\n quantile you want to compute. Between 0 and 1\n\n tol : float\n precision for the output\n\n Returns\n -------\n x : float\n x such that P(X < x) = q\n\n \"\"\"\n scale = self._scale\n k = self._k\n dps = self._dps\n \n z_approx = scale * chi.ppf(q, k)\n \n epsilon = scale * 0.001\n lb = z_approx - epsilon\n ub = z_approx + epsilon\n\n f = lambda z: self._cdf_notTruncated(-np.inf, z, dps)\n\n z = find_root(f, q, lb, ub, tol)\n\n return z \n\n def _pdf_notTruncated(self, z, dps):\n scale = self._scale\n k = self._k\n #dps = self._dps\n\n return chi2.pdf(z/scale, k)\n\n def _quantile_notTruncated(self, q, tol=1.e-6):\n \"\"\"\n Compute the quantile for the non truncated distribution\n\n Parameters\n ----------\n q : float\n quantile you want to compute. Between 0 and 1\n\n tol : float\n precision for the output\n\n Returns\n -------\n x : float\n x such that P(X < x) = q\n\n \"\"\"\n scale = self._scale\n k = self._k\n dps = self._dps\n \n z_approx = scale * chi2.ppf(q, k)\n \n epsilon = scale * 0.001\n lb = z_approx - epsilon\n ub = z_approx + epsilon\n\n f = lambda z: self._cdf_notTruncated(-np.inf, z, dps)\n\n z = find_root(f, q, lb, ub, tol)\n\n return z \n \n \n\nimport doctest\ndoctest.testmod()\n\n"
] | [
[
"scipy.stats.chi.ppf",
"scipy.stats.chi2.ppf",
"scipy.stats.chi2.pdf",
"scipy.stats.chi.pdf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lujiammy/coronavirus-machine-learning | [
"4af16b1c51a89a81206262b50a9bcf4d9b679853"
] | [
"mlp_uk_learning.py"
] | [
"import numpy as np\nnp.random.seed(1337)\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport matplotlib.pyplot as plt\n\n\nmodel = Sequential()\nmodel.add(Dense(units=50, input_dim=1, activation='relu'))\nmodel.add(Dense(units=50, activation='relu'))\nmodel.add(Dense(units=1, activation='sigmoid'))\nmodel.add(Dense(units=1, activation='linear'))\nmodel.compile(optimizer='adam', loss='mean_squared_error')\nmodel.summary()\n\n# uk corona\nimport json\n\nurl = 'https://api.covid19uk.live/historyfigures'\n\n\ndef read_url_to_json(url):\n import urllib.request as request\n webpage = request.urlopen(url)\n get_data = webpage.read()\n data = json.loads(get_data)\n return data\n\n\nread_data = read_url_to_json(url)\neach_data = read_data['data']\nuk_comfirmed_data = []\n\nfor each in each_data:\n uk_comfirmed_data.append(each['confirmed'])\n\nuk_date_length = len(uk_comfirmed_data)\nuk_dates = list(range(1, uk_date_length + 1))\n\nuk_comfirmed_data = np.array(uk_comfirmed_data)\nuk_dates = np.array(uk_dates)\n\nuk_absorb_amount = uk_comfirmed_data[uk_date_length-1]\n\nuk_comfirmed_data_norm = uk_comfirmed_data / uk_absorb_amount\n\n# fit model\nmodel.fit(uk_dates, uk_comfirmed_data_norm, epochs=10000, shuffle=False)\n\nuk_comfirmed_data_predict = model.predict(uk_dates)\nuk_comfirmed_data_predict = uk_comfirmed_data_predict * uk_absorb_amount\nfig2 = plt.figure(figsize=(7, 5))\nplt.scatter(uk_dates, uk_comfirmed_data, label='Real Confirmed')\nplt.plot(uk_dates, uk_comfirmed_data_predict, label='Predict Result')\nplt.title('UK Confirmed VS Dates')\nplt.xlabel('Dates')\nplt.ylabel('Amount')\nplt.legend()\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CrackedSTone/algorithm-detects-liver-pathology | [
"d52d08e4e6931b3502f083f20d6332f7b6839a3b"
] | [
"diplom_test/main.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\n#from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)\nimport matplotlib.image as mpimg\n\nimport sys\n\nimport radiomics_single as rs\n\nqtCreatorFile = \"design/diplom.ui\" # Enter file here.\n\nclass MatplotlibWidget(QMainWindow):\n\n def __init__(self):\n QMainWindow.__init__(self)\n loadUi(qtCreatorFile, self)\n self.FlagLoaded = False\n self.setWindowTitle(\"Texture Analysis for Diffuse Liver Diseases\")\n self.buttonLoader.clicked.connect(self.choose_file)\n self.buttonAnalyze.clicked.connect(self.analyze)\n #self.addToolBar(NavigationToolbar(self.MplWidget.canvas, self))\n self.setWindowIcon(QIcon(\"app.ico\"))\n\n mainMenu = self.menuBar()\n fileMenu = mainMenu.addMenu('File')\n helpMenu = mainMenu.addMenu('Help')\n\n buttonLoaderMenu = QAction('Download', self)\n buttonLoaderMenu.setShortcut('Ctrl+D')\n buttonLoaderMenu.setStatusTip('Download the region of the interest')\n buttonLoaderMenu.triggered.connect(self.choose_file)\n fileMenu.addAction(buttonLoaderMenu)\n\n buttonAnalyzeMenu = QAction('Analysis', self)\n buttonAnalyzeMenu.setShortcut('Ctrl+A')\n buttonAnalyzeMenu.setStatusTip('Analyse the loaded region of the interest')\n buttonAnalyzeMenu.triggered.connect(self.analyze)\n fileMenu.addAction(buttonAnalyzeMenu)\n\n buttonExit = QAction('Quit', self)\n buttonExit.setShortcut('Ctrl+Q')\n buttonExit.setStatusTip('Quit out of application')\n buttonExit.triggered.connect(sys.exit)\n fileMenu.addAction(buttonExit)\n\n buttonLaunch = QAction('How to run', self)\n buttonLaunch.setStatusTip('Get info about how to run the application')\n self.msgBox1 = QMessageBox(self)\n self.msgBox1.setIcon(QMessageBox.Information)\n self.msgBox1.setWindowTitle(\"How to run\")\n self.msgBox1.setText(\"To run the classifier:\\n1) push the button <Choose an image>\\n2) push the button <Analyse>\")\n buttonLaunch.triggered.connect(self.msgBox1.exec_)\n helpMenu.addAction(buttonLaunch)\n\n\n\n buttonInfo = QAction('Application', self)\n buttonInfo.setStatusTip('Get info about the application')\n self.msgBox2 = QMessageBox(self)\n self.msgBox2.setIcon(QMessageBox.Information)\n self.msgBox2.setWindowTitle(\"Application\")\n self.msgBox2.setText(\"This application give an ability to load ROI and predict a probable presence of diffuse liver diseases.\")\n buttonInfo.triggered.connect(self.msgBox2.exec_)\n helpMenu.addAction(buttonInfo)\n\n buttonInfo = QAction('Developer', self)\n buttonInfo.setStatusTip('Get info about the developer')\n self.msgBox3 = QMessageBox(self)\n self.msgBox3.setIcon(QMessageBox.Information)\n self.msgBox3.setWindowTitle(\"Developer\")\n self.msgBox3.setText(\"This application was developed by Illia Yankovyi, the student of the 4th year\"\n \"\\nNTUU Igor Sikorsky Kyiv Polytechnic Institute:\"\n \"\\nFaculty of Biomedical Engineering (FBME)\\n\"\n \"\\nAcademic unit:BS-52 group\\n\"\n \"\\nSupervisor: Nastenko I., M.D., Candidate of Engineering Sciences, Senior Research Fellow.\")\n buttonInfo.triggered.connect(self.msgBox3.exec_)\n helpMenu.addAction(buttonInfo)\n\n self.labelTitle.setText('Classifier of Diffuse Liver Diseases')\n font = QFont()\n font.setPointSize(20)\n font.setBold(True)\n self.labelTitle.setFont(font)\n self.labelTitle.setAlignment(Qt.AlignCenter)\n self.buttonAnalyze.setText('Analyze Image')\n self.buttonLoader.setText('Download Image')\n self.labelResult.setText('To get a prediction:\\n\\n1) Download the region of interest;\\n2) Run the analysis.')\n\n def analyze(self):\n if (self.FlagLoaded):\n self.labelResult.setText(rs.signle_prediction(self.path))\n else:\n self.labelResult.setText(\"Image was not chosen!\\n\\nPlease choose the image\\nbefore running the Analysis\")\n self.msgBox4 = QMessageBox(self)\n self.msgBox4.setIcon(QMessageBox.Warning)\n self.msgBox4.setWindowTitle(\"Error! Image was not chosen.\")\n self.msgBox4.setText(\n \"Image was not chosen! Please choose the image before running the Analysis.\")\n self.msgBox4.exec_()\n\n\n def choose_file(self):\n options = QFileDialog.Options()\n fileName, _ = QFileDialog.getOpenFileName(self, \"Choose an image\", \"\",\n \"Image (*.bmp *.png *.jpeg *.jpg)\", options=options)\n extensions = ['png', 'jpg', 'jpeg', 'bmp']\n fileExtension = (fileName.split('.'))[-1].lower()\n if fileName:\n if fileExtension in extensions:\n self.path = fileName\n self.img = mpimg.imread(self.path)\n self.MplWidget.canvas.axes.clear()\n self.MplWidget.canvas.axes.imshow(self.img)\n self.MplWidget.canvas.axes.set_title('Chosen image')\n self.MplWidget.canvas.draw()\n self.FlagLoaded = True\n else:\n self.labelResult.setText(\"Chosen filetype is not supported.\\nSupported filetypes:\\nBMP, PNG, JPEG, JPG\")\n self.msgBox5 = QMessageBox(self)\n self.msgBox5.setIcon(QMessageBox.Warning)\n self.msgBox5.setWindowTitle(\"Error! Chosen filetype is not supported.\")\n self.msgBox5.setText(\n \"Chosen filetype is not supported.\\nSupported filetypes:\\nBMP, PNG, JPEG, JPG.\")\n self.msgBox5.exec_()\n\nif __name__ == \"__main__\":\n app = QApplication([])\n window = MatplotlibWidget()\n window.show()\n sys.exit(app.exec_())"
] | [
[
"matplotlib.image.imread"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jbuxofplenty/quant | [
"2ef24012963e9ead6193e0f421c63fb009c78f80"
] | [
"strategies/hitoshi.py"
] | [
"from zipline.pipeline import Pipeline\nfrom zipline.api import attach_pipeline, pipeline_output\nfrom zipline.pipeline.data.equity_pricing import USEquityPricing\nfrom zipline.pipeline.data import morningstar\nfrom zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume\nfrom zipline.pipeline.filters.morningstar import IsPrimaryShare\n\nimport numpy as np # needed for NaN handling\nimport math # ceil and floor are useful for rounding\n\nfrom itertools import cycle\n\n\ndef initialize(context):\n # set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.50))\n set_slippage(\n slippage.VolumeShareSlippage(\n volume_limit=.20,\n price_impact=0.0))\n # set_slippage(slippage.FixedSlippage(spread=0.00))\n set_commission(commission.PerTrade(cost=0.00))\n # set_slippage(slippage.FixedSlippage(spread=0.00))\n set_long_only()\n\n context.MaxCandidates = 100\n context.MaxBuyOrdersAtOnce = 30\n context.MyLeastPrice = 3.00\n context.MyMostPrice = 25.00\n context.MyFireSalePrice = context.MyLeastPrice\n context.MyFireSaleAge = 6\n\n # over simplistic tracking of position age\n context.age = {}\n print(len(context.portfolio.positions))\n\n # Rebalance\n EveryThisManyMinutes = 10\n TradingDayHours = 6.5\n TradingDayMinutes = int(TradingDayHours * 60)\n for minutez in xrange(\n 1,\n TradingDayMinutes,\n EveryThisManyMinutes\n ):\n schedule_function(\n my_rebalance,\n date_rules.every_day(),\n time_rules.market_open(\n minutes=minutez))\n\n # Prevent excessive logging of canceled orders at market close.\n schedule_function(\n cancel_open_orders,\n date_rules.every_day(),\n time_rules.market_close(\n hours=0,\n minutes=1))\n\n # Record variables at the end of each day.\n schedule_function(\n my_record_vars,\n date_rules.every_day(),\n time_rules.market_close())\n\n # Create our pipeline and attach it to our algorithm.\n my_pipe = make_pipeline(context)\n attach_pipeline(my_pipe, 'my_pipeline')\n\n\ndef make_pipeline(context):\n \"\"\"\n Create our pipeline.\n \"\"\"\n\n # Filter for primary share equities. IsPrimaryShare is a built-in filter.\n primary_share = IsPrimaryShare()\n\n # Equities listed as common stock (as opposed to, say, preferred stock).\n # 'ST00000001' indicates common stock.\n common_stock = morningstar.share_class_reference.security_type.latest.eq(\n 'ST00000001')\n\n # Non-depositary receipts. Recall that the ~ operator inverts filters,\n # turning Trues into Falses and vice versa\n not_depositary = ~morningstar.share_class_reference.is_depositary_receipt.latest\n\n # Equities not trading over-the-counter.\n not_otc = ~morningstar.share_class_reference.exchange_id.latest.startswith(\n 'OTC')\n\n # Not when-issued equities.\n not_wi = ~morningstar.share_class_reference.symbol.latest.endswith('.WI')\n\n # Equities without LP in their name, .matches does a match using a regular\n # expression\n not_lp_name = ~morningstar.company_reference.standard_name.latest.matches(\n '.* L[. ]?P.?$')\n\n # Equities with a null value in the limited_partnership Morningstar\n # fundamental field.\n not_lp_balance_sheet = morningstar.balance_sheet.limited_partnership.latest.isnull()\n\n # Equities whose most recent Morningstar market cap is not null have\n # fundamental data and therefore are not ETFs.\n have_market_cap = morningstar.valuation.market_cap.latest.notnull()\n\n # At least a certain price\n price = USEquityPricing.close.latest\n AtLeastPrice = (price >= context.MyLeastPrice)\n AtMostPrice = (price <= context.MyMostPrice)\n\n # Filter for stocks that pass all of our previous filters.\n tradeable_stocks = (\n primary_share\n & common_stock\n & not_depositary\n & not_otc\n & not_wi\n & not_lp_name\n & not_lp_balance_sheet\n & have_market_cap\n & AtLeastPrice\n & AtMostPrice\n )\n\n LowVar = 6\n HighVar = 40\n\n log.info(\n '''\nAlgorithm initialized variables:\n context.MaxCandidates %s\n LowVar %s\n HighVar %s''' %\n (context.MaxCandidates, LowVar, HighVar))\n\n # High dollar volume filter.\n base_universe = AverageDollarVolume(\n window_length=20,\n mask=tradeable_stocks\n ).percentile_between(LowVar, HighVar)\n\n # Short close price average.\n ShortAvg = SimpleMovingAverage(\n inputs=[USEquityPricing.close],\n window_length=3,\n mask=base_universe\n )\n\n # Long close price average.\n LongAvg = SimpleMovingAverage(\n inputs=[USEquityPricing.close],\n window_length=45,\n mask=base_universe\n )\n\n percent_difference = (ShortAvg - LongAvg) / LongAvg\n\n # Filter to select securities to long.\n stocks_worst = percent_difference.bottom(context.MaxCandidates)\n securities_to_trade = (stocks_worst)\n\n return Pipeline(\n columns={\n 'stocks_worst': stocks_worst\n },\n screen=(securities_to_trade),\n )\n\n\ndef my_compute_weights(context):\n \"\"\"\n Compute ordering weights.\n \"\"\"\n # Compute even target weights for our long positions and short positions.\n stocks_worst_weight = 1.00 / len(context.stocks_worst)\n\n return stocks_worst_weight\n\n\ndef before_trading_start(context, data):\n # Gets our pipeline output every day.\n context.output = pipeline_output('my_pipeline')\n\n context.stocks_worst = context.output[\n context.output['stocks_worst']].index.tolist()\n\n context.stocks_worst_weight = my_compute_weights(context)\n\n context.MyCandidate = cycle(context.stocks_worst)\n\n context.LowestPrice = context.MyLeastPrice # reset beginning of day\n print(len(context.portfolio.positions))\n for stock in context.portfolio.positions:\n CurrPrice = float(data.current([stock], 'price'))\n if CurrPrice < context.LowestPrice:\n context.LowestPrice = CurrPrice\n if stock in context.age:\n context.age[stock] += 1\n else:\n context.age[stock] = 1\n for stock in context.age:\n if stock not in context.portfolio.positions:\n context.age[stock] = 0\n message = 'stock.symbol: {symbol} : age: {age}'\n log.info(message.format(symbol=stock.symbol, age=context.age[stock]))\n\n pass\n\n\ndef my_rebalance(context, data):\n BuyFactor = .99\n SellFactor = 1.01\n cash = context.portfolio.cash\n\n cancel_open_buy_orders(context, data)\n\n # Order sell at profit target in hope that somebody actually buys it\n for stock in context.portfolio.positions:\n if not get_open_orders(stock):\n StockShares = context.portfolio.positions[stock].amount\n CurrPrice = float(data.current([stock], 'price'))\n CostBasis = float(context.portfolio.positions[stock].cost_basis)\n SellPrice = float(\n make_div_by_05(\n CostBasis *\n SellFactor,\n buy=False))\n\n if np.isnan(SellPrice):\n pass # probably best to wait until nan goes away\n elif (stock in context.age and context.age[stock] == 1):\n pass\n elif (\n stock in context.age\n and context.MyFireSaleAge <= context.age[stock]\n and (\n context.MyFireSalePrice > CurrPrice\n or CostBasis > CurrPrice\n )\n ):\n if (stock in context.age and context.age[stock] < 2):\n pass\n elif stock not in context.age:\n context.age[stock] = 1\n else:\n SellPrice = float(\n make_div_by_05(.95 * CurrPrice, buy=False))\n order(stock, -StockShares,\n style=LimitOrder(SellPrice)\n )\n else:\n if (stock in context.age and context.age[stock] < 2):\n pass\n elif stock not in context.age:\n context.age[stock] = 1\n else:\n\n order(stock, -StockShares,\n style=LimitOrder(SellPrice)\n )\n\n WeightThisBuyOrder = float(1.00 / context.MaxBuyOrdersAtOnce)\n for ThisBuyOrder in range(context.MaxBuyOrdersAtOnce):\n stock = context.MyCandidate.next()\n PH = data.history([stock], 'price', 20, '1d')\n PH_Avg = float(PH.mean())\n CurrPrice = float(data.current([stock], 'price'))\n if np.isnan(CurrPrice):\n pass # probably best to wait until nan goes away\n else:\n if CurrPrice > float(1.25 * PH_Avg):\n BuyPrice = float(CurrPrice)\n else:\n BuyPrice = float(CurrPrice * BuyFactor)\n BuyPrice = float(make_div_by_05(BuyPrice, buy=True))\n StockShares = int(WeightThisBuyOrder * cash / BuyPrice)\n order(stock, StockShares,\n style=LimitOrder(BuyPrice)\n )\n\n# if cents not divisible by .05, round down if buy, round up if sell\n\n\ndef make_div_by_05(s, buy=False):\n s *= 20.00\n s = math.floor(s) if buy else math.ceil(s)\n s /= 20.00\n return s\n\n\ndef my_record_vars(context, data):\n \"\"\"\n Record variables at the end of each day.\n \"\"\"\n\n # Record our variables.\n record(leverage=context.account.leverage)\n record(positions=len(context.portfolio.positions))\n if 0 < len(context.age):\n MaxAge = context.age[max(\n context.age.keys(), key=(lambda k: context.age[k]))]\n print(MaxAge)\n record(MaxAge=MaxAge)\n record(LowestPrice=context.LowestPrice)\n\n\ndef log_open_order(StockToLog):\n oo = get_open_orders()\n if len(oo) == 0:\n return\n for stock, orders in oo.iteritems():\n if stock == StockToLog:\n for o in orders:\n message = 'Found open order for {amount} shares in {stock}'\n log.info(message.format(amount=o.amount, stock=stock))\n\n\ndef log_open_orders():\n oo = get_open_orders()\n if len(oo) == 0:\n return\n for stock, orders in oo.iteritems():\n for o in orders:\n message = 'Found open order for {amount} shares in {stock}'\n log.info(message.format(amount=o.amount, stock=stock))\n\n\ndef cancel_open_buy_orders(context, data):\n oo = get_open_orders()\n if len(oo) == 0:\n return\n for stock, orders in oo.iteritems():\n for o in orders:\n # message = 'Canceling order of {amount} shares in {stock}'\n # log.info(message.format(amount=o.amount, stock=stock))\n if 0 < o.amount: # it is a buy order\n cancel_order(o)\n\n\ndef cancel_open_orders(context, data):\n oo = get_open_orders()\n if len(oo) == 0:\n return\n for stock, orders in oo.iteritems():\n for o in orders:\n # message = 'Canceling order of {amount} shares in {stock}'\n # log.info(message.format(amount=o.amount, stock=stock))\n cancel_order(o)\n\n# This is the every minute stuff\n\n\ndef handle_data(context, data):\n pass\n"
] | [
[
"numpy.isnan"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Codelegant92/STC-ProtoNet | [
"f3e77bb1b363b0338cda6f1701bfabe0cd3accbe"
] | [
"save_features.py"
] | [
"import numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport os\nimport glob\nimport h5py\n\nimport configs\nimport backbone\nfrom data.datamgr import SimpleDataManager\nfrom methods.baselinetrain import BaselineTrain\nfrom methods.baselinefinetune import BaselineFinetune\nfrom methods.protonet import ProtoNet\nfrom methods.matchingnet import MatchingNet\nfrom methods.relationnet import RelationNet\nfrom methods.maml import MAML\nfrom io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file \n\n\ndef save_features(model, data_loader, outfile ):\n f = h5py.File(outfile, 'w')\n max_count = len(data_loader)*data_loader.batch_size\n all_labels = f.create_dataset('all_labels',(max_count,), dtype='i')\n all_feats=None\n count=0\n for i, (x,y) in enumerate(data_loader):\n if i%10 == 0:\n print('{:d}/{:d}'.format(i, len(data_loader)))\n x = x.cuda()\n x_var = Variable(x)\n feats = model(x_var)\n if all_feats is None:\n all_feats = f.create_dataset('all_feats', [max_count] + list( feats.size()[1:]) , dtype='f')\n all_feats[count:count+feats.size(0)] = feats.data.cpu().numpy()\n all_labels[count:count+feats.size(0)] = y.cpu().numpy()\n count = count + feats.size(0)\n\n count_var = f.create_dataset('count', (1,), dtype='i')\n count_var[0] = count\n\n f.close()\n\nif __name__ == '__main__':\n params = parse_args('save_features')\n assert params.method != 'maml' and params.method != 'maml_approx', 'maml do not support save_feature and run'\n\n if 'Conv' in params.model:\n image_size = 40\n\n split = params.split\n loadfile = configs.data_dir[params.dataset] + split + '.json'\n loadfile_unk = configs.data_dir[params.dataset] + split + '_unk.json'\n loadfile_sil = configs.data_dir[params.dataset] + split + '_sil.json'\n\n checkpoint_dir = '%s/checkpoints/%s/%s_%s_regularizer' %(configs.save_dir, params.dataset, params.model, params.method)\n #checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)\n \n if params.train_aug:\n checkpoint_dir += '_aug'\n if not params.method in ['baseline', 'baseline++'] :\n if params.train_n_way != -1:\n checkpoint_dir += '_%d-way_' %( params.train_n_way )\n else:\n checkpoint_dir += '_random-way_'\n if params.train_n_shot != -1:\n checkpoint_dir += '%d-shot' % ( params.train_n_shot )\n else:\n checkpoint_dir += 'random-shot'\n\n if params.save_iter != -1:\n modelfile = get_assigned_file(checkpoint_dir,params.save_iter)\n# elif params.method in ['baseline', 'baseline++'] :\n# modelfile = get_resume_file(checkpoint_dir) #comment in 2019/08/03 updates as the validation of baseline/baseline++ is added\n else:\n modelfile = get_best_file(checkpoint_dir, params.test_n_way)\n\n if params.save_iter != -1:\n outfile = os.path.join( checkpoint_dir.replace(\"checkpoints\",\"features\"), split + \"_\" + str(params.save_iter)+ \".hdf5\") \n else:\n outfile = os.path.join( checkpoint_dir.replace(\"checkpoints\",\"features\"), split + \".hdf5\") \n #outfile = os.path.join( checkpoint_dir.replace(\"checkpoints\",\"features\"), split + \"_test_random-way.hdf5\") \n\n datamgr = SimpleDataManager(image_size, batch_size = 64)\n data_loader = datamgr.get_data_loader(loadfile, [loadfile_unk, loadfile_sil], aug = False)\n\n if params.method in ['relationnet', 'relationnet_softmax']:\n if params.model == 'Conv4': \n model = backbone.Conv4NP()\n elif params.model == 'Conv6': \n model = backbone.Conv6NP()\n elif params.model == 'Conv4S': \n model = backbone.Conv4SNP()\n else:\n model = model_dict[params.model]( flatten = False )\n elif params.method in ['maml' , 'maml_approx']: \n raise ValueError('MAML do not support save feature')\n else:\n model = model_dict[params.model]()\n\n model = model.cuda()\n tmp = torch.load(modelfile)\n state = tmp['state']\n state_keys = list(state.keys())\n for i, key in enumerate(state_keys):\n if \"feature.\" in key:\n newkey = key.replace(\"feature.\",\"\") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx' \n state[newkey] = state.pop(key)\n else:\n state.pop(key)\n \n model.load_state_dict(state)\n model.eval()\n\n dirname = os.path.dirname(outfile)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n save_features(model, data_loader, outfile)\n"
] | [
[
"torch.autograd.Variable",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LSSTDESC/skyportal | [
"1a433aae67b26ffd3516e65e0fdbf866b4751486"
] | [
"skyportal/tests/api/test_photometry.py"
] | [
"import math\n\nimport numpy as np\nimport sncosmo\n\nfrom baselayer.app.env import load_env\nfrom skyportal.models import DBSession, Token\nfrom skyportal.tests import api\n\n_, cfg = load_env()\nPHOT_DETECTION_THRESHOLD = cfg[\"misc.photometry_detection_threshold_nsigma\"]\n\n\ndef test_token_user_post_get_photometry_data(\n upload_data_token, public_source, public_group, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n assert data['data']['ra'] is None\n assert data['data']['dec'] is None\n assert data['data']['ra_unc'] is None\n assert data['data']['dec_unc'] is None\n\n np.testing.assert_allclose(\n data['data']['flux'], 12.24 * 10 ** (-0.4 * (25.0 - 23.9))\n )\n\n\ndef test_token_user_post_put_photometry_data(\n upload_data_token, public_source, public_group, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'instrument_id': ztf_camera.id,\n \"mjd\": [59400, 59401, 59402],\n \"mag\": [19.2, 19.3, np.random.uniform(19, 20)],\n \"magerr\": [0.05, 0.06, np.random.uniform(0.01, 0.1)],\n \"limiting_mag\": [20.0, 20.1, 20.2],\n \"magsys\": [\"ab\", \"ab\", \"ab\"],\n \"filter\": [\"ztfr\", \"ztfg\", \"ztfr\"],\n \"ra\": [42.01, 42.01, 42.02],\n \"dec\": [42.02, 42.01, 42.03],\n \"origin\": [None, \"lol\", \"lol\"],\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n ids = data[\"data\"][\"ids\"]\n assert len(ids) == 3\n\n # POSTing photometry that contains the same first two points should fail:\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'instrument_id': ztf_camera.id,\n \"mjd\": [59400, 59401, 59402],\n \"mag\": [19.2, 19.3, np.random.uniform(19, 20)],\n \"magerr\": [0.05, 0.06, np.random.uniform(0.01, 0.1)],\n \"limiting_mag\": [20.0, 20.1, 20.2],\n \"magsys\": [\"ab\", \"ab\", \"ab\"],\n \"filter\": [\"ztfr\", \"ztfg\", \"ztfr\"],\n \"ra\": [42.01, 42.01, 42.02],\n \"dec\": [42.02, 42.01, 42.03],\n \"origin\": [None, \"lol\", \"lol\"],\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n\n # PUTing photometry that contains\n # the same first point, the second point with a different origin, and a new third point should succeed\n # only the last two points will be ingested\n status, data = api(\n 'PUT',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'instrument_id': ztf_camera.id,\n \"mjd\": [59400, 59401, 59402],\n \"mag\": [19.2, 19.3, np.random.uniform(19, 20)],\n \"magerr\": [0.05, 0.06, np.random.uniform(0.01, 0.1)],\n \"limiting_mag\": [20.0, 20.1, 20.2],\n \"magsys\": [\"ab\", \"ab\", \"ab\"],\n \"filter\": [\"ztfr\", \"ztfg\", \"ztfr\"],\n \"ra\": [42.01, 42.01, 42.02],\n \"dec\": [42.02, 42.01, 42.03],\n \"origin\": [None, \"omg\", \"lol\"],\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n new_ids = data[\"data\"][\"ids\"]\n assert len(new_ids) == 3\n assert len(set(new_ids).intersection(set(ids))) == 1\n\n\ndef test_token_user_post_put_get_photometry_data(\n upload_data_token_two_groups, public_source, public_group, public_group2, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'instrument_id': ztf_camera.id,\n \"mjd\": [59400, 59401, 59402],\n \"mag\": [19.2, 19.3, np.random.uniform(19, 20)],\n \"magerr\": [0.05, 0.06, np.random.uniform(0.01, 0.1)],\n \"limiting_mag\": [20.0, 20.1, 20.2],\n \"magsys\": [\"ab\", \"ab\", \"ab\"],\n \"filter\": [\"ztfr\", \"ztfg\", \"ztfr\"],\n \"ra\": [42.01, 42.01, 42.02],\n \"dec\": [42.02, 42.01, 42.03],\n \"origin\": [None, \"lol\", \"lol\"],\n 'group_ids': [public_group.id],\n },\n token=upload_data_token_two_groups,\n )\n assert status == 200\n assert data['status'] == 'success'\n ids = data[\"data\"][\"ids\"]\n assert len(ids) == 3\n\n status, data = api(\n 'GET', f'photometry/{ids[0]}?format=flux', token=upload_data_token_two_groups\n )\n assert status == 200\n assert data['status'] == 'success'\n group_ids = [g[\"id\"] for g in data['data']['groups']]\n assert len(group_ids) == 2\n assert public_group.id in group_ids\n\n # PUTing photometry that contains\n # the same first point, the second point with a different origin, and a new third point should succeed\n # only the last two points will be ingested\n status, data = api(\n 'PUT',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'instrument_id': ztf_camera.id,\n \"mjd\": [59400, 59401],\n \"mag\": [19.2, 19.3],\n \"magerr\": [0.05, 0.06],\n \"limiting_mag\": [20.0, 20.1],\n \"magsys\": [\"ab\", \"ab\"],\n \"filter\": [\"ztfr\", \"ztfg\"],\n \"ra\": [42.01, 42.01],\n \"dec\": [42.02, 42.01],\n \"origin\": [None, \"lol\"],\n 'group_ids': [public_group.id, public_group2.id],\n },\n token=upload_data_token_two_groups,\n )\n assert status == 200\n assert data['status'] == 'success'\n new_ids = data[\"data\"][\"ids\"]\n assert len(new_ids) == 2\n assert len(set(new_ids).intersection(set(ids))) == 2\n\n status, data = api(\n 'GET', f'photometry/{ids[0]}?format=flux', token=upload_data_token_two_groups\n )\n assert status == 200\n assert data['status'] == 'success'\n group_ids = [g[\"id\"] for g in data['data']['groups']]\n assert len(group_ids) == 3\n\n token_object = (\n DBSession()\n .query(Token)\n .filter(Token.id == upload_data_token_two_groups)\n .first()\n )\n\n assert sorted(group_ids) == sorted(\n [\n public_group.id,\n public_group2.id,\n token_object.created_by.single_user_group.id,\n ]\n )\n\n\ndef test_post_photometry_multiple_groups(\n upload_data_token_two_groups,\n public_source_two_groups,\n public_group,\n public_group2,\n ztf_camera,\n):\n upload_data_token = upload_data_token_two_groups\n public_source = public_source_two_groups\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id, public_group2.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n assert data['data']['ra'] is None\n assert data['data']['dec'] is None\n assert data['data']['ra_unc'] is None\n assert data['data']['dec_unc'] is None\n\n assert len(data['data']['groups']) == 3\n\n np.testing.assert_allclose(\n data['data']['flux'], 12.24 * 10 ** (-0.4 * (25.0 - 23.9))\n )\n\n\ndef test_post_photometry_all_groups(\n upload_data_token_two_groups,\n super_admin_token,\n public_source_two_groups,\n public_group,\n public_group2,\n ztf_camera,\n):\n upload_data_token = upload_data_token_two_groups\n public_source = public_source_two_groups\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': \"all\",\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET',\n f'photometry/{photometry_id}?format=flux',\n token=super_admin_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n assert data['data']['ra'] is None\n assert data['data']['dec'] is None\n assert data['data']['ra_unc'] is None\n assert data['data']['dec_unc'] is None\n\n assert len(data['data']['groups']) == 2\n assert data['data']['groups'][0]['name'] == cfg['misc']['public_group_name']\n\n np.testing.assert_allclose(\n data['data']['flux'], 12.24 * 10 ** (-0.4 * (25.0 - 23.9))\n )\n\n\ndef test_retrieve_photometry_group_membership_posted_by_other(\n upload_data_token_two_groups,\n view_only_token,\n public_source_two_groups,\n public_group,\n public_group2,\n ztf_camera,\n):\n upload_data_token = upload_data_token_two_groups\n public_source = public_source_two_groups\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id, public_group2.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=view_only_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n assert data['data']['ra'] is None\n assert data['data']['dec'] is None\n assert data['data']['ra_unc'] is None\n assert data['data']['dec_unc'] is None\n\n np.testing.assert_allclose(\n data['data']['flux'], 12.24 * 10 ** (-0.4 * (25.0 - 23.9))\n )\n\n\ndef test_retrieve_photometry_error_group_membership_posted_by_other(\n upload_data_token_two_groups,\n view_only_token,\n public_source_two_groups,\n public_group,\n public_group2,\n ztf_camera,\n):\n upload_data_token = upload_data_token_two_groups\n public_source = public_source_two_groups\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group2.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=view_only_token\n )\n # `view_only_token only` belongs to `public_group`, not `public_group2`\n assert status == 400\n assert data['status'] == 'error'\n assert \"Insufficient permissions\" in data['message']\n\n\ndef test_can_post_photometry_no_groups(\n upload_data_token, public_source, public_group, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n assert len(data['data']['groups']) == 1\n\n\ndef test_can_post_photometry_empty_groups_list(\n upload_data_token, public_source, public_group, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [],\n },\n token=upload_data_token,\n )\n\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n assert len(data['data']['groups']) == 1\n\n\ndef test_token_user_post_mag_photometry_data_and_convert(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': 21.0,\n 'magerr': 0.2,\n 'limiting_mag': 22.3,\n 'magsys': 'vega',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n ab = sncosmo.get_magsystem('ab')\n vega = sncosmo.get_magsystem('vega')\n correction = 2.5 * np.log10(vega.zpbandflux('ztfg') / ab.zpbandflux('ztfg'))\n\n np.testing.assert_allclose(\n data['data']['flux'], 10 ** (-0.4 * (21.0 - correction - 23.9))\n )\n\n np.testing.assert_allclose(\n data['data']['fluxerr'], 0.2 / (2.5 / np.log(10)) * data['data']['flux']\n )\n\n status, data = api('GET', f'photometry/{photometry_id}', token=upload_data_token)\n assert status == 200\n assert data['status'] == 'success'\n\n np.testing.assert_allclose(data['data']['mag'], 21.0 - correction)\n\n np.testing.assert_allclose(data['data']['magerr'], 0.2)\n\n\ndef test_token_user_post_and_get_different_systems_mag(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': 21.0,\n 'magerr': 0.2,\n 'limiting_mag': 22.3,\n 'magsys': 'vega',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET',\n f'photometry/{photometry_id}?format=mag&magsys=vega',\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n assert data['data']['magsys'] == 'vega'\n\n ab = sncosmo.get_magsystem('ab')\n vega = sncosmo.get_magsystem('vega')\n correction = 2.5 * np.log10(vega.zpbandflux('ztfg') / ab.zpbandflux('ztfg'))\n\n np.testing.assert_allclose(data['data']['mag'], 21.0)\n np.testing.assert_allclose(data['data']['magerr'], 0.2)\n np.testing.assert_allclose(data['data']['limiting_mag'], 22.3)\n\n status, data = api(\n 'GET',\n f'photometry/{photometry_id}?format=mag&magsys=ab',\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n np.testing.assert_allclose(data['data']['mag'], 21.0 - correction)\n np.testing.assert_allclose(data['data']['magerr'], 0.2)\n np.testing.assert_allclose(data['data']['limiting_mag'], 22.3 - correction)\n\n\ndef test_token_user_post_and_get_different_systems_flux(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': 21.0,\n 'magerr': 0.2,\n 'limiting_mag': 22.3,\n 'magsys': 'vega',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET',\n f'photometry/{photometry_id}?format=flux&magsys=vega',\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n ab = sncosmo.get_magsystem('ab')\n vega = sncosmo.get_magsystem('vega')\n correction = 2.5 * np.log10(vega.zpbandflux('ztfg') / ab.zpbandflux('ztfg'))\n\n np.testing.assert_allclose(\n data['data']['flux'], 10 ** (-0.4 * (21 - correction - 23.9))\n )\n np.testing.assert_allclose(\n data['data']['fluxerr'], 0.2 / (2.5 / np.log(10)) * data['data']['flux']\n )\n np.testing.assert_allclose(data['data']['zp'], 23.9 + correction)\n\n status, data = api(\n 'GET',\n f'photometry/{photometry_id}?format=flux&magsys=ab',\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n np.testing.assert_allclose(\n data['data']['flux'], 10 ** (-0.4 * (21 - correction - 23.9))\n )\n np.testing.assert_allclose(\n data['data']['fluxerr'], 0.2 / (2.5 / np.log(10)) * data['data']['flux']\n )\n np.testing.assert_allclose(data['data']['zp'], 23.9)\n\n\ndef test_token_user_mixed_photometry_post(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': 21.0,\n 'magerr': [0.2, 0.1],\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][1]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n np.testing.assert_allclose(data['data']['flux'], 10 ** (-0.4 * (21.0 - 23.9)))\n\n np.testing.assert_allclose(\n data['data']['fluxerr'], 0.1 / (2.5 / np.log(10)) * data['data']['flux']\n )\n\n # should fail as len(mag) != len(magerr)\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': [21.0],\n 'magerr': [0.2, 0.1],\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n\n\ndef test_token_user_mixed_mag_none_photometry_post(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': None,\n 'magerr': [0.2, 0.1],\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': [21.3, None],\n 'magerr': [0.2, 0.1],\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': [21.3, None],\n 'magerr': [None, 0.1],\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n\n\ndef test_token_user_post_photometry_limits(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': None,\n 'magerr': None,\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n assert data['data']['flux'] is None\n np.testing.assert_allclose(\n data['data']['fluxerr'], 10 ** (-0.4 * (22.3 - 23.9)) / PHOT_DETECTION_THRESHOLD\n )\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': None,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n assert data['data']['flux'] is None\n np.testing.assert_allclose(\n data['data']['fluxerr'], 0.031 * 10 ** (-0.4 * (25.0 - 23.9))\n )\n\n\ndef test_token_user_post_invalid_filter(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': None,\n 'magerr': None,\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'bessellv',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n\n\ndef test_token_user_post_photometry_data_series(\n upload_data_token, public_source, ztf_camera, public_group\n):\n # valid request\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': [58000.0, 58001.0, 58002.0],\n 'instrument_id': ztf_camera.id,\n 'flux': [12.24, 15.24, 12.24],\n 'fluxerr': [0.031, 0.029, 0.030],\n 'filter': ['ztfg', 'ztfg', 'ztfg'],\n 'zp': [25.0, 30.0, 21.2],\n 'magsys': ['ab', 'ab', 'ab'],\n 'ra': 264.1947917,\n 'dec': [50.5478333, 50.5478333 + 0.00001, 50.5478333],\n 'dec_unc': 0.2,\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n assert len(data['data']['ids']) == 3\n\n photometry_id = data['data']['ids'][1]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n assert np.allclose(data['data']['flux'], 15.24 * 10 ** (-0.4 * (30 - 23.9)))\n\n assert np.allclose(data['data']['dec'], 50.5478333 + 0.00001)\n\n assert np.allclose(data['data']['dec_unc'], 0.2)\n assert data['data']['ra_unc'] is None\n\n # invalid request\n status, data = api(\n 'POST',\n 'photometry',\n data=[\n {\n 'obj_id': str(public_source.id),\n 'mjd': 58000,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'filter': 'ztfg',\n 'zp': 25.0,\n 'magsys': 'ab',\n 'group_ids': [public_group.id],\n },\n {\n 'obj_id': str(public_source.id),\n 'mjd': 58001,\n 'instrument_id': ztf_camera.id,\n 'flux': 15.24,\n 'fluxerr': 0.031,\n 'filter': 'ztfg',\n 'zp': 30.0,\n 'magsys': 'ab',\n 'group_ids': [public_group.id],\n },\n {\n 'obj_id': str(public_source.id),\n 'mjd': 58002,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'filter': 'ztfg',\n 'zp': 21.2,\n 'magsys': 'vega',\n 'group_ids': [public_group.id],\n },\n ],\n token=upload_data_token,\n )\n\n assert status == 400\n assert data['status'] == 'error'\n\n\ndef test_post_photometry_no_access_token(\n view_only_token, public_source, ztf_camera, public_group\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=view_only_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n\n\ndef test_token_user_update_photometry(\n upload_data_token, public_source, ztf_camera, public_group\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))\n\n status, data = api(\n 'PATCH',\n f'photometry/{photometry_id}',\n data={\n 'obj_id': str(public_source.id),\n 'flux': 11.0,\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n np.testing.assert_allclose(data['data']['flux'], 11.0 * 10 ** (-0.4 * (25 - 23.9)))\n\n\ndef test_token_user_cannot_update_unowned_photometry(\n upload_data_token, manage_sources_token, public_source, ztf_camera, public_group\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))\n\n status, data = api(\n 'PATCH',\n f'photometry/{photometry_id}',\n data={\n 'obj_id': str(public_source.id),\n 'flux': 11.0,\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n },\n token=manage_sources_token,\n )\n assert status == 400\n\n\ndef test_token_user_update_photometry_groups(\n upload_data_token_two_groups,\n manage_sources_token_two_groups,\n public_source_two_groups,\n ztf_camera,\n public_group,\n public_group2,\n view_only_token,\n):\n upload_data_token = upload_data_token_two_groups\n public_source = public_source_two_groups\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n 'group_ids': [public_group.id, public_group2.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=view_only_token\n )\n assert status == 200\n assert data['status'] == 'success'\n\n status, data = api(\n 'PATCH',\n f'photometry/{photometry_id}',\n data={\n 'obj_id': str(public_source.id),\n 'flux': 11.0,\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n 'group_ids': [public_group2.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=view_only_token\n )\n assert status == 400\n assert data['status'] == 'error'\n assert \"Insufficient permissions\" in data[\"message\"]\n\n\ndef test_user_can_delete_owned_photometry_data(\n upload_data_token, public_source, ztf_camera, public_group\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))\n\n status, data = api('DELETE', f'photometry/{photometry_id}', token=upload_data_token)\n assert status == 200\n\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 400\n\n\ndef test_user_cannot_delete_unowned_photometry_data(\n upload_data_token, manage_sources_token, public_source, ztf_camera, public_group\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))\n\n status, data = api(\n 'DELETE', f'photometry/{photometry_id}', token=manage_sources_token\n )\n\n assert status == 400\n\n\ndef test_admin_can_delete_unowned_photometry_data(\n upload_data_token, super_admin_token, public_source, ztf_camera, public_group\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfi',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))\n\n status, data = api('DELETE', f'photometry/{photometry_id}', token=super_admin_token)\n assert status == 200\n\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 400\n\n\ndef test_token_user_retrieving_source_photometry_and_convert(\n view_only_token, public_source\n):\n status, data = api(\n 'GET',\n f'sources/{public_source.id}/photometry?format=flux&magsys=ab',\n token=view_only_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n assert isinstance(data['data'], list)\n assert 'mjd' in data['data'][0]\n assert 'ra_unc' in data['data'][0]\n\n data['data'] = sorted(data['data'], key=lambda d: d['mjd'])\n mag1_ab = -2.5 * np.log10(data['data'][0]['flux']) + data['data'][0]['zp']\n magerr1_ab = 2.5 / np.log(10) * data['data'][0]['fluxerr'] / data['data'][0]['flux']\n\n maglast_ab = -2.5 * np.log10(data['data'][-1]['flux']) + data['data'][-1]['zp']\n magerrlast_ab = (\n 2.5 / np.log(10) * data['data'][-1]['fluxerr'] / data['data'][-1]['flux']\n )\n\n status, data = api(\n 'GET',\n f'sources/{public_source.id}/photometry?format=mag&magsys=ab',\n token=view_only_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n data['data'] = sorted(data['data'], key=lambda d: d['mjd'])\n assert np.allclose(mag1_ab, data['data'][0]['mag'])\n assert np.allclose(magerr1_ab, data['data'][0]['magerr'])\n\n assert np.allclose(maglast_ab, data['data'][-1]['mag'])\n assert np.allclose(magerrlast_ab, data['data'][-1]['magerr'])\n\n status, data = api(\n 'GET',\n f'sources/{public_source.id}/photometry?format=flux&magsys=vega',\n token=view_only_token,\n )\n\n data['data'] = sorted(data['data'], key=lambda d: d['mjd'])\n mag1_vega = -2.5 * np.log10(data['data'][0]['flux']) + data['data'][0]['zp']\n magerr1_vega = (\n 2.5 / np.log(10) * data['data'][0]['fluxerr'] / data['data'][0]['flux']\n )\n\n maglast_vega = -2.5 * np.log10(data['data'][-1]['flux']) + data['data'][-1]['zp']\n magerrlast_vega = (\n 2.5 / np.log(10) * data['data'][-1]['fluxerr'] / data['data'][-1]['flux']\n )\n\n assert status == 200\n assert data['status'] == 'success'\n\n ab = sncosmo.get_magsystem('ab')\n vega = sncosmo.get_magsystem('vega')\n vega_to_ab = {\n filter: 2.5 * np.log10(ab.zpbandflux(filter) / vega.zpbandflux(filter))\n for filter in ['ztfg', 'ztfr', 'ztfi']\n }\n\n assert np.allclose(mag1_ab, mag1_vega + vega_to_ab[data['data'][0]['filter']])\n assert np.allclose(magerr1_ab, magerr1_vega)\n\n assert np.allclose(\n maglast_ab, maglast_vega + vega_to_ab[data['data'][-1]['filter']]\n )\n assert np.allclose(magerrlast_ab, magerrlast_vega)\n\n\ndef test_token_user_retrieve_null_photometry(\n upload_data_token, public_source, ztf_camera, public_group\n):\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': None,\n 'magerr': None,\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n assert data['data']['flux'] is None\n\n np.testing.assert_allclose(\n data['data']['fluxerr'], 10 ** (-0.4 * (22.3 - 23.9)) / PHOT_DETECTION_THRESHOLD\n )\n\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=mag', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n assert data['data']['mag'] is None\n assert data['data']['magerr'] is None\n\n\ndef test_token_user_big_post(\n upload_data_token, public_source, ztf_camera, public_group\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': [58000 + i for i in range(50000)],\n 'instrument_id': ztf_camera.id,\n 'mag': np.random.uniform(low=18, high=22, size=50000).tolist(),\n 'magerr': np.random.uniform(low=0.1, high=0.3, size=50000).tolist(),\n 'limiting_mag': 22.3,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n\ndef test_token_user_get_range_photometry(\n upload_data_token, public_source, public_group, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': [58000.0, 58500.0, 59000.0],\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n status, data = api(\n 'GET',\n 'photometry/range',\n token=upload_data_token,\n data={'instrument_ids': [ztf_camera.id], 'max_date': '2018-05-15T00:00:00'},\n )\n assert status == 200\n assert data['status'] == 'success'\n assert len(data['data']) == 1\n\n status, data = api(\n 'GET',\n 'photometry/range?format=flux&magsys=vega',\n token=upload_data_token,\n data={'instrument_ids': [ztf_camera.id], 'max_date': '2019-02-01T00:00:00'},\n )\n assert status == 200\n assert data['status'] == 'success'\n assert len(data['data']) == 2\n\n\ndef test_reject_photometry_inf(\n upload_data_token, public_source, public_group, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': [58000.0, 58500.0, 59000.0],\n 'instrument_id': ztf_camera.id,\n 'flux': math.inf,\n 'fluxerr': math.inf,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n\n assert status == 400\n assert data['status'] == 'error'\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': math.inf,\n 'magerr': math.inf,\n 'limiting_mag': 22.3,\n 'magsys': 'vega',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n\n assert status == 400\n assert data['status'] == 'error'\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': 2.0,\n 'magerr': 23.0,\n 'limiting_mag': math.inf,\n 'magsys': 'vega',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n\n assert status == 400\n assert data['status'] == 'error'\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': 58000.0,\n 'instrument_id': ztf_camera.id,\n 'mag': None,\n 'magerr': None,\n 'limiting_mag': -math.inf,\n 'magsys': 'vega',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n\n assert status == 400\n assert data['status'] == 'error'\n\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source.id),\n 'mjd': [58000.0, 58500.0, 59000.0],\n 'instrument_id': ztf_camera.id,\n 'flux': None,\n 'fluxerr': math.inf,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group.id],\n },\n token=upload_data_token,\n )\n\n assert status == 400\n assert data['status'] == 'error'\n\n\ndef test_token_user_post_to_foreign_group_and_retrieve(\n upload_data_token, public_source_two_groups, public_group2, ztf_camera\n):\n status, data = api(\n 'POST',\n 'photometry',\n data={\n 'obj_id': str(public_source_two_groups.id),\n 'mjd': [58000.0, 58500.0, 59000.0],\n 'instrument_id': ztf_camera.id,\n 'flux': 12.24,\n 'fluxerr': 0.031,\n 'zp': 25.0,\n 'magsys': 'ab',\n 'filter': 'ztfg',\n 'group_ids': [public_group2.id],\n },\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n photometry_id = data['data']['ids'][0]\n status, data = api(\n 'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token\n )\n assert status == 200\n\n\ndef test_problematic_photometry_1263(\n upload_data_token, public_source, public_group, ztf_camera, public_group2\n):\n\n payload = {\n \"obj_id\": public_source.id,\n \"group_ids\": [public_group.id, public_group2.id],\n \"magsys\": \"ab\",\n \"zp\": 23.9,\n \"instrument_id\": ztf_camera.id,\n 'mjd': [\n 59145.46447,\n 59149.50347,\n 59149.50347,\n 59150.50872,\n 59150.50872,\n 59152.51631,\n 59155.50801,\n 59152.51631,\n 59155.50801,\n 59156.48479,\n 59156.48479,\n 59126.48693,\n 59128.46834,\n 59130.50257,\n 59135.47329,\n 59137.4758,\n 59139.45454,\n 59141.47449,\n 59143.50987,\n 59143.50987,\n 59145.46447,\n 59145.50556,\n 59150.52806,\n 59150.52806,\n 59151.52116,\n 59151.52116,\n 59152.48332,\n 59152.48332,\n 59155.50022,\n 59155.50022,\n 59156.5383,\n 59126.53144,\n 59128.51928,\n 59130.53196,\n 59135.51196,\n 59137.51334,\n 59139.51507,\n 59141.51422,\n 59143.48529,\n 59143.48529,\n 59145.50556,\n ],\n 'filter': [\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfg',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n 'ztfr',\n ],\n 'flux': [\n 105.4095462,\n 100.4989583,\n 100.4986052,\n 97.45052422,\n 97.45411937,\n 91.71425204,\n 81.08011148,\n 91.71489652,\n 81.08110854,\n 59.37327478,\n 59.37452643,\n None,\n None,\n None,\n 73.17457336,\n 82.20150344,\n 89.14970986,\n 102.1692537,\n 98.6103674,\n 98.60984771,\n 105.4086204,\n 100.8602976,\n 94.84847105,\n 94.85063718,\n 104.8945366,\n 104.8961951,\n 101.6093671,\n 101.6061542,\n 82.34545782,\n 82.34560248,\n 72.48165796,\n None,\n None,\n None,\n 61.60270207,\n 72.73101786,\n 83.83015488,\n 98.70066264,\n 99.85275375,\n 99.84977174,\n 100.8608292,\n ],\n 'fluxerr': [\n 8.416851743,\n 10.10817406,\n 10.10811785,\n 11.74314252,\n 11.74356103,\n 11.40505647,\n 10.61680918,\n 11.40514417,\n 10.61696199,\n 10.6736128,\n 10.67382477,\n 13.51668635,\n 18.71327665,\n 9.509339593,\n 9.374956127,\n 9.638764985,\n 11.98599464,\n 10.42671307,\n 9.666542673,\n 9.666476165,\n 8.41682049,\n 8.680180822,\n 9.926401394,\n 9.926617677,\n 8.494021784,\n 8.494115051,\n 9.984017125,\n 9.983686084,\n 7.964270439,\n 7.964306468,\n 8.499519049,\n 12.65289244,\n 11.39803573,\n 9.771246706,\n 7.839855173,\n 7.592658663,\n 8.674127848,\n 8.965488502,\n 7.69135795,\n 7.691126885,\n 8.680212034,\n ],\n }\n\n status, data = api(\n 'POST',\n 'photometry',\n data=payload,\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n payload = {\n \"obj_id\": public_source.id,\n \"group_ids\": \"all\",\n \"magsys\": \"ab\",\n \"instrument_id\": ztf_camera.id,\n \"filter\": [\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n \"ztfr\",\n \"ztfg\",\n \"ztfg\",\n \"ztfr\",\n \"ztfg\",\n \"ztfg\",\n \"ztfr\",\n \"ztfr\",\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n \"ztfg\",\n \"ztfg\",\n \"ztfr\",\n ],\n \"mjd\": [\n 59130.53195599979,\n 59135.473286999855,\n 59135.51195599977,\n 59137.47579859989,\n 59137.51334490022,\n 59139.45453700004,\n 59139.51506939996,\n 59141.474490699824,\n 59141.51422449993,\n 59143.48528939998,\n 59143.50987270009,\n 59145.46446759999,\n 59145.50555559993,\n 59149.50347220013,\n 59150.50871529989,\n 59150.52805559989,\n 59151.52115740022,\n 59152.4833217999,\n 59152.516307900194,\n 59155.50021990016,\n 59155.5080093001,\n 59156.4847916998,\n 59156.53829859989,\n ],\n \"limiting_mag\": [\n 19.67770004272461,\n 20.11709976196289,\n 20.059200286865234,\n 20.281099319458008,\n 20.224000930786133,\n 19.809099197387695,\n 20.236799240112305,\n 20.57659912109375,\n 20.31290054321289,\n 20.414499282836914,\n 20.680700302124023,\n 20.57069969177246,\n 20.48349952697754,\n 20.242000579833984,\n 20.642900466918945,\n 20.029699325561523,\n 20.11090087890625,\n 19.808948516845703,\n 19.819171905517578,\n 19.9112606048584,\n 19.913991928100586,\n 19.600677490234375,\n 20.005773544311523,\n ],\n \"mag\": [\n None,\n 19.239099502563477,\n 19.426000595092773,\n 19.11280059814453,\n 19.24570083618164,\n 19.024700164794922,\n 19.09149932861328,\n 18.876699447631836,\n 18.914199829101562,\n 18.901599884033203,\n 18.915199279785156,\n 18.84280014038086,\n 18.89069938659668,\n 18.89459991455078,\n 18.92799949645996,\n 18.957399368286133,\n 18.848100662231445,\n 18.882665634155273,\n 18.993907928466797,\n 19.110898971557617,\n 19.127714157104492,\n 19.466022491455078,\n 19.24942970275879,\n ],\n \"magerr\": [\n None,\n 0.1391019970178604,\n 0.13817599415779114,\n 0.12731100618839264,\n 0.11334399878978729,\n 0.1459749937057495,\n 0.11234399676322937,\n 0.11080300062894821,\n 0.09862300008535385,\n 0.0836310014128685,\n 0.1064319983124733,\n 0.08669500052928925,\n 0.09344000369310379,\n 0.10920300334692001,\n 0.13083499670028687,\n 0.11362800002098083,\n 0.08791899681091309,\n 0.1066831648349762,\n 0.13501590490341187,\n 0.10501029342412949,\n 0.14216870069503784,\n 0.19518424570560455,\n 0.12731821835041046,\n ],\n \"ra\": [\n None,\n 134.5934039,\n 134.5934169,\n 134.5933773,\n 134.593404,\n 134.593372,\n 134.5933825,\n 134.5933984,\n 134.5933945,\n 134.5933917,\n 134.5933988,\n 134.5933848,\n 134.5933991,\n 134.5933909,\n 134.5934048,\n 134.5934296,\n 134.5934341,\n 134.593388,\n 134.5933606,\n 134.5933857,\n 134.5933939,\n 134.5933847,\n 134.5933954,\n ],\n \"dec\": [\n None,\n 15.0412865,\n 15.041256,\n 15.0412686,\n 15.0412482,\n 15.0412709,\n 15.0412572,\n 15.0412656,\n 15.0412765,\n 15.0412744,\n 15.0412673,\n 15.041271,\n 15.0412726,\n 15.0413061,\n 15.0412751,\n 15.041267,\n 15.0412856,\n 15.0412655,\n 15.0412913,\n 15.0412952,\n 15.0412737,\n 15.0411913,\n 15.0412605,\n ],\n }\n\n status, data = api(\n 'POST',\n 'photometry',\n data=payload,\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n payload['group_ids'] = 'all'\n\n status, data = api(\n 'PUT',\n 'photometry',\n data=payload,\n token=upload_data_token,\n )\n assert status == 200\n assert data['status'] == 'success'\n\n for id in data['data']['ids']:\n status, data = api(\n 'GET', f'photometry/{id}?format=flux', token=upload_data_token\n )\n assert status == 200\n assert data['status'] == 'success'\n assert len(data['data']['groups']) == 2\n\n\ndef test_problematic_photometry_1276(\n public_source, public_group, super_admin_token, ztf_camera\n):\n payload = {\n \"obj_id\": public_source.id,\n \"group_ids\": [public_group.id],\n \"magsys\": \"ab\",\n \"instrument_id\": ztf_camera.id,\n \"filter\": [\n \"ztfg\",\n \"ztfr\",\n \"ztfr\",\n \"ztfr\",\n \"ztfr\",\n \"ztfr\",\n \"ztfr\",\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n \"ztfr\",\n \"ztfg\",\n \"ztfg\",\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n \"ztfr\",\n \"ztfr\",\n \"ztfg\",\n \"ztfg\",\n \"ztfg\",\n \"ztfg\",\n \"ztfr\",\n \"ztfr\",\n \"ztfg\",\n \"ztfg\",\n \"ztfr\",\n \"ztfg\",\n \"ztfr\",\n ],\n \"mjd\": [\n 59123.41299769981,\n 59129.472291700076,\n 59134.451203700155,\n 59136.46903940011,\n 59136.46903940011,\n 59139.295057899784,\n 59139.295057899784,\n 59139.295057899784,\n 59139.389629600104,\n 59141.36341439979,\n 59141.36341439979,\n 59141.414189800154,\n 59141.414189800154,\n 59143.318460599985,\n 59143.39145829994,\n 59145.34545140015,\n 59145.34545140015,\n 59145.34545140015,\n 59145.41583329998,\n 59145.41583329998,\n 59149.4703819002,\n 59151.32671299996,\n 59151.33918979997,\n 59153.33692129981,\n 59153.404351899866,\n 59155.220972199924,\n 59155.290161999874,\n 59157.360347200185,\n 59157.433634299785,\n ],\n \"limiting_mag\": [\n 19.396099090576172,\n 20.23240089416504,\n 20.129100799560547,\n 20.493600845336914,\n 20.493600845336914,\n 20.422000885009766,\n 20.422000885009766,\n 20.422000885009766,\n 20.272199630737305,\n 20.18910026550293,\n 20.18910026550293,\n 20.846799850463867,\n 20.846799850463867,\n 20.624300003051758,\n 20.854000091552734,\n 20.628799438476562,\n 20.628799438476562,\n 20.628799438476562,\n 20.840900421142578,\n 20.840900421142578,\n 20.32859992980957,\n 19.60849952697754,\n 19.705799102783203,\n 19.47800064086914,\n 19.409400939941406,\n 19.462600708007812,\n 19.77630043029785,\n 19.678672790527344,\n 19.754121780395508,\n ],\n \"mag\": [\n 18.43560028076172,\n 17.338199615478516,\n 16.25189971923828,\n 16.011999130249023,\n 16.09589958190918,\n 15.974100112915039,\n 15.891500473022461,\n 15.891500473022461,\n None,\n 15.753999710083008,\n 15.819600105285645,\n 18.528499603271484,\n 18.57939910888672,\n 15.781000137329102,\n 18.309499740600586,\n 15.692399978637695,\n 15.692399978637695,\n 15.790599822998047,\n 18.305700302124023,\n 18.31529998779297,\n 18.13994026184082,\n 18.040000915527344,\n 15.505499839782715,\n 15.569299697875977,\n 17.812599182128906,\n 18.046100616455078,\n None,\n 17.95865249633789,\n 15.475956916809082,\n ],\n \"magerr\": [\n 0.18098600208759308,\n 0.12704600393772125,\n 0.03412500023841858,\n 0.018530000001192093,\n 0.09321600198745728,\n 0.1358170062303543,\n 0.017785999923944473,\n 0.017785999923944473,\n None,\n 0.017010999843478203,\n 0.0650859996676445,\n 0.1969199925661087,\n 0.08772700279951096,\n 0.05595200136303902,\n 0.17250700294971466,\n 0.0137339998036623,\n 0.0137339998036623,\n 0.06520400196313858,\n 0.06727799773216248,\n 0.13235700130462646,\n 0.12975013256072998,\n 0.11010699719190598,\n 0.04597700014710426,\n 0.049855999648571014,\n 0.10752200335264206,\n 0.13239599764347076,\n None,\n 0.139614999294281,\n 0.042450759559869766,\n ],\n \"ra\": [\n 56.0478815,\n 56.0468989,\n 56.0478,\n 56.0478343,\n 56.0480658,\n 56.0475873,\n 56.047908,\n 56.0480877,\n None,\n 56.0476469,\n 56.0477499,\n 56.047177,\n 56.0469751,\n 56.0480999,\n 56.0470656,\n 56.0477652,\n 56.0476761,\n 56.0476218,\n 56.0469908,\n 56.0472491,\n 56.0467978,\n 56.0472009,\n 56.0478524,\n 56.0476997,\n 56.0471999,\n 56.0476057,\n None,\n 56.0473734,\n 56.0477336,\n ],\n \"dec\": [\n 71.6368125,\n 71.6367721,\n 71.6367167,\n 71.6367615,\n 71.6367048,\n 71.6368681,\n 71.6368457,\n 71.6368389,\n None,\n 71.6367596,\n 71.6365229,\n 71.6367611,\n 71.6368439,\n 71.6367764,\n 71.6368222,\n 71.6367943,\n 71.6368108,\n 71.6367366,\n 71.6368412,\n 71.6367895,\n 71.6368039,\n 71.6367984,\n 71.6367866,\n 71.6367788,\n 71.6368348,\n 71.6367571,\n None,\n 71.6367753,\n 71.6367119,\n ],\n }\n\n status, data = api(\n 'PUT',\n 'photometry',\n data=payload,\n token=super_admin_token,\n )\n assert status == 400\n assert data['status'] == 'error'\n"
] | [
[
"numpy.log",
"numpy.allclose",
"numpy.log10",
"numpy.testing.assert_allclose",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
smolendawid/ubaar-competition | [
"28de972d6beb13343c537fc030101be672a852a3"
] | [
"feature_extraction/other_features.py"
] | [
"import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\n\n\ndef categorical_features(data, features):\n features['vehicleType'] = data['vehicleType']\n features['vehicleOption'] = data['vehicleOption']\n\n features['vehicleTypeOption'] = [a + '_' + b for a, b in zip(data['vehicleType'].values,\n data['vehicleOption'].values)]\n\n cat_columns_clusters = ['cluster_dest_db', 'cluster_src_db', 'cluster_src_km', 'cluster_dest_km']\n cat_columns_date = ['day', 'month']\n cat_columns = ['vehicleType', 'vehicleOption', 'vehicleTypeOption']\n # cat_columns += cat_columns_clusters\n # cat_columns += cat_columns_date\n\n features = pd.get_dummies(features, columns=cat_columns, drop_first=True)\n\n features['day'] = LabelEncoder().fit_transform(features['day'])\n features['month'] = LabelEncoder().fit_transform(features['month'])\n\n return features\n\n\ndef raw_features(data, features):\n features['weight'] = data['weight']\n features['distanceKM'] = data['distanceKM']\n features['taxiDurationMin'] = data['taxiDurationMin']\n\n features['sourceLatitude'] = data['sourceLatitude']\n features['sourceLongitude'] = data['sourceLongitude']\n features['destinationLatitude'] = data['destinationLatitude']\n features['destinationLongitude'] = data['destinationLongitude']\n\n features['src_dest'] = (data['SourceState'] == data['destinationState'])\n features['ave_speed'] = data['distanceKM'] / data['taxiDurationMin']\n\n import numpy as np\n features['weight_dur'] = np.log((data['taxiDurationMin'] + 30 * data['weight']))\n features['weight_dist_dur'] = np.log(1. + (10. + data['weight']) * (100. + data['distanceKM']) *\n (1000. + data['taxiDurationMin']))\n\n features['price'] = data['price']\n\n return features\n"
] | [
[
"numpy.log",
"sklearn.preprocessing.LabelEncoder",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nicolasoyharcabal/tensorflow | [
"0d3b58cfe91c6b865a14701345d7a84ce949c0e3"
] | [
"tensorflow/python/data/experimental/ops/batching.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Batching dataset transformations.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.ops import get_single_element\nfrom tensorflow.python.data.experimental.ops import grouping\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import convert\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import sparse\nfrom tensorflow.python.data.util import structure\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef batch_window(dataset):\n \"\"\"Batches a window of tensors.\n\n Args:\n dataset: the input dataset.\n\n Returns:\n A `Tensor` representing the batch of the entire input dataset.\n \"\"\"\n if isinstance(dataset.output_classes, tuple):\n raise TypeError(\"Input dataset expected to have a single component\")\n if dataset.output_classes is ops.Tensor:\n return _batch_dense_window(dataset)\n elif dataset.output_classes is sparse_tensor.SparseTensor:\n return _batch_sparse_window(dataset)\n else:\n raise TypeError(\"Unsupported dataset type: %s\" % dataset.output_classes)\n\n\ndef _batch_dense_window(dataset):\n \"\"\"Batches a window of dense tensors.\"\"\"\n\n def key_fn(_):\n return np.int64(0)\n\n def shape_init_fn(_):\n return array_ops.shape(first_element)\n\n def shape_reduce_fn(state, value):\n check_ops.assert_equal(state, array_ops.shape(value))\n return state\n\n def finalize_fn(state):\n return state\n\n if dataset.output_shapes.is_fully_defined():\n shape = dataset.output_shapes\n else:\n first_element = get_single_element.get_single_element(dataset.take(1))\n shape_reducer = grouping.Reducer(shape_init_fn, shape_reduce_fn,\n finalize_fn)\n shape = get_single_element.get_single_element(\n dataset.apply(grouping.group_by_reducer(key_fn, shape_reducer)))\n\n def batch_init_fn(_):\n batch_shape = array_ops.concat([[0], shape], 0)\n return gen_array_ops.empty(batch_shape, dtype=dataset.output_types)\n\n def batch_reduce_fn(state, value):\n return array_ops.concat([state, [value]], 0)\n\n batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)\n return get_single_element.get_single_element(\n dataset.apply(grouping.group_by_reducer(key_fn, batch_reducer)))\n\n\ndef _batch_sparse_window(dataset):\n \"\"\"Batches a window of sparse tensors.\"\"\"\n\n def key_fn(_):\n return np.int64(0)\n\n def shape_init_fn(_):\n return first_element.dense_shape\n\n def shape_reduce_fn(state, value):\n check_ops.assert_equal(state, value.dense_shape)\n return state\n\n def finalize_fn(state):\n return state\n\n if dataset.output_shapes.is_fully_defined():\n shape = dataset.output_shapes\n else:\n first_element = get_single_element.get_single_element(dataset.take(1))\n shape_reducer = grouping.Reducer(shape_init_fn, shape_reduce_fn,\n finalize_fn)\n shape = get_single_element.get_single_element(\n dataset.apply(grouping.group_by_reducer(key_fn, shape_reducer)))\n\n def batch_init_fn(_):\n indices_shape = array_ops.concat([[0], [array_ops.size(shape) + 1]], 0)\n return sparse_tensor.SparseTensor(\n indices=gen_array_ops.empty(indices_shape, dtype=dtypes.int64),\n values=constant_op.constant([], shape=[0], dtype=dataset.output_types),\n dense_shape=array_ops.concat(\n [np.array([0], dtype=np.int64),\n math_ops.cast(shape, dtypes.int64)], 0))\n\n def batch_reduce_fn(state, value):\n return sparse_ops.sparse_concat(0, [state, value])\n\n def reshape_fn(value):\n return sparse_ops.sparse_reshape(\n value,\n array_ops.concat([np.array([1], dtype=np.int64), value.dense_shape], 0))\n\n batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)\n return get_single_element.get_single_element(\n dataset.map(reshape_fn).apply(\n grouping.group_by_reducer(key_fn, batch_reducer)))\n\n\n@tf_export(\"data.experimental.dense_to_sparse_batch\")\ndef dense_to_sparse_batch(batch_size, row_shape):\n \"\"\"A transformation that batches ragged elements into `tf.SparseTensor`s.\n\n Like `Dataset.padded_batch()`, this transformation combines multiple\n consecutive elements of the dataset, which might have different\n shapes, into a single element. The resulting element has three\n components (`indices`, `values`, and `dense_shape`), which\n comprise a `tf.SparseTensor` that represents the same data. The\n `row_shape` represents the dense shape of each row in the\n resulting `tf.SparseTensor`, to which the effective batch size is\n prepended. For example:\n\n ```python\n # NOTE: The following examples use `{ ... }` to represent the\n # contents of a dataset.\n a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }\n\n a.apply(tf.data.experimental.dense_to_sparse_batch(\n batch_size=2, row_shape=[6])) ==\n {\n ([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices\n ['a', 'b', 'c', 'a', 'b'], # values\n [2, 6]), # dense_shape\n ([[0, 0], [0, 1], [0, 2], [0, 3]],\n ['a', 'b', 'c', 'd'],\n [1, 6])\n }\n ```\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the\n number of consecutive elements of this dataset to combine in a\n single batch.\n row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like\n object representing the equivalent dense shape of a row in the\n resulting `tf.SparseTensor`. Each element of this dataset must\n have the same rank as `row_shape`, and must have size less\n than or equal to `row_shape` in each dimension.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n \"\"\"\n\n def _apply_fn(dataset):\n return _DenseToSparseBatchDataset(dataset, batch_size, row_shape)\n\n return _apply_fn\n\n\ndef padded_batch_window(dataset, padded_shape, padding_value=None):\n \"\"\"Batches a window of tensors with padding.\n\n Args:\n dataset: the input dataset.\n padded_shape: (Optional.) `tf.TensorShape` or `tf.int64` vector tensor-like\n object representing the shape to which the input elements should be padded\n prior to batching. Any unknown dimensions (e.g. `tf.Dimension(None)` in a\n `tf.TensorShape` or `-1` in a tensor-like object) will be padded to the\n maximum size of that dimension in each batch.\n padding_value: (Optional.) A scalar-shaped `tf.Tensor`, representing the\n padding value to use. Defaults are `0` for numeric types and the empty\n string for string types. If `dataset` contains `tf.SparseTensor`, this\n value is ignored.\n\n Returns:\n A `Tensor` representing the batch of the entire input dataset.\n\n Raises:\n ValueError: if invalid arguments are provided.\n \"\"\"\n if not issubclass(dataset.output_classes,\n (ops.Tensor, sparse_tensor.SparseTensor)):\n raise TypeError(\"Input dataset expected to have a single tensor component\")\n if issubclass(dataset.output_classes, (ops.Tensor)):\n return _padded_batch_dense_window(dataset, padded_shape, padding_value)\n elif issubclass(dataset.output_classes, (sparse_tensor.SparseTensor)):\n if padding_value is not None:\n raise ValueError(\"Padding value not allowed for sparse tensors\")\n return _padded_batch_sparse_window(dataset, padded_shape)\n else:\n raise TypeError(\"Unsupported dataset type: %s\" % dataset.output_classes)\n\n\ndef _padded_batch_dense_window(dataset, padded_shape, padding_value=None):\n \"\"\"Batches a window of dense tensors with padding.\"\"\"\n\n padded_shape = math_ops.cast(\n convert.partial_shape_to_tensor(padded_shape), dtypes.int32)\n\n def key_fn(_):\n return np.int64(0)\n\n def max_init_fn(_):\n return padded_shape\n\n def max_reduce_fn(state, value):\n \"\"\"Computes the maximum shape to pad to.\"\"\"\n condition = math_ops.reduce_all(\n math_ops.logical_or(\n math_ops.less_equal(array_ops.shape(value), padded_shape),\n math_ops.equal(padded_shape, -1)))\n assert_op = control_flow_ops.Assert(condition, [\n \"Actual shape greater than padded shape: \",\n array_ops.shape(value), padded_shape\n ])\n with ops.control_dependencies([assert_op]):\n return math_ops.maximum(state, array_ops.shape(value))\n\n def finalize_fn(state):\n return state\n\n # Compute the padded shape.\n max_reducer = grouping.Reducer(max_init_fn, max_reduce_fn, finalize_fn)\n padded_shape = get_single_element.get_single_element(\n dataset.apply(grouping.group_by_reducer(key_fn, max_reducer)))\n\n if padding_value is None:\n if dataset.output_types == dtypes.string:\n padding_value = \"\"\n elif dataset.output_types == dtypes.bool:\n padding_value = False\n elif dataset.output_types == dtypes.variant:\n raise TypeError(\"Unable to create padding for field of type 'variant'\")\n else:\n padding_value = 0\n\n def batch_init_fn(_):\n batch_shape = array_ops.concat(\n [np.array([0], dtype=np.int32), padded_shape], 0)\n return gen_array_ops.empty(batch_shape, dtype=dataset.output_types)\n\n def batch_reduce_fn(state, value):\n return array_ops.concat([state, [value]], 0)\n\n def pad_fn(value):\n shape = array_ops.shape(value)\n left = array_ops.zeros_like(shape)\n right = padded_shape - shape\n return array_ops.pad(\n value, array_ops.stack([left, right], 1), constant_values=padding_value)\n\n batch_reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)\n return get_single_element.get_single_element(\n dataset.map(pad_fn).apply(\n grouping.group_by_reducer(key_fn, batch_reducer)))\n\n\ndef _padded_batch_sparse_window(dataset, padded_shape):\n \"\"\"Batches a window of sparse tensors with padding.\"\"\"\n\n def key_fn(_):\n return np.int64(0)\n\n def max_init_fn(_):\n return convert.partial_shape_to_tensor(padded_shape)\n\n def max_reduce_fn(state, value):\n \"\"\"Computes the maximum shape to pad to.\"\"\"\n condition = math_ops.reduce_all(\n math_ops.logical_or(\n math_ops.less_equal(value.dense_shape, padded_shape),\n math_ops.equal(padded_shape, -1)))\n assert_op = control_flow_ops.Assert(condition, [\n \"Actual shape greater than padded shape: \", value.dense_shape,\n padded_shape\n ])\n with ops.control_dependencies([assert_op]):\n return math_ops.maximum(state, value.dense_shape)\n\n def finalize_fn(state):\n return state\n\n # Compute the padded shape.\n max_reducer = grouping.Reducer(max_init_fn, max_reduce_fn, finalize_fn)\n padded_shape = get_single_element.get_single_element(\n dataset.apply(grouping.group_by_reducer(key_fn, max_reducer)))\n\n def batch_init_fn(_):\n indices_shape = array_ops.concat([[0], [array_ops.size(padded_shape) + 1]],\n 0)\n return sparse_tensor.SparseTensor(\n indices=gen_array_ops.empty(indices_shape, dtype=dtypes.int64),\n values=constant_op.constant([], shape=[0], dtype=dataset.output_types),\n dense_shape=array_ops.concat(\n [np.array([0], dtype=np.int64), padded_shape], 0))\n\n def batch_reduce_fn(state, value):\n padded_value = sparse_tensor.SparseTensor(\n indices=value.indices, values=value.values, dense_shape=padded_shape)\n reshaped_value = sparse_ops.sparse_reshape(\n padded_value,\n array_ops.concat(\n [np.array([1], dtype=np.int64), padded_value.dense_shape], 0))\n return sparse_ops.sparse_concat(0, [state, reshaped_value])\n\n reducer = grouping.Reducer(batch_init_fn, batch_reduce_fn, finalize_fn)\n return get_single_element.get_single_element(\n dataset.apply(grouping.group_by_reducer(key_fn, reducer)))\n\n\nclass _UnbatchDataset(dataset_ops.UnaryDataset):\n \"\"\"A dataset that splits the elements of its input into multiple elements.\"\"\"\n\n def __init__(self, input_dataset):\n \"\"\"See `unbatch()` for more details.\"\"\"\n super(_UnbatchDataset, self).__init__(input_dataset)\n flat_shapes = nest.flatten(input_dataset.output_shapes)\n if any(s.ndims == 0 for s in flat_shapes):\n raise ValueError(\"Cannot unbatch an input with scalar components.\")\n known_batch_dim = tensor_shape.Dimension(None)\n for s in flat_shapes:\n try:\n known_batch_dim = known_batch_dim.merge_with(s[0])\n except ValueError:\n raise ValueError(\"Cannot unbatch an input whose components have \"\n \"different batch sizes.\")\n self._input_dataset = input_dataset\n\n self._structure = structure.convert_legacy_structure(\n input_dataset.output_types,\n nest.map_structure(lambda s: s[1:], input_dataset.output_shapes),\n input_dataset.output_classes)\n\n def _as_variant_tensor(self):\n return ged_ops.experimental_unbatch_dataset(\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\n **dataset_ops.flat_structure(self))\n\n @property\n def _element_structure(self):\n return self._structure\n\n\n@tf_export(\"data.experimental.unbatch\")\ndef unbatch():\n \"\"\"Splits elements of a dataset into multiple elements on the batch dimension.\n\n For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,\n where `B` may vary for each input element, then for each element in the\n dataset, the unbatched dataset will contain `B` consecutive elements\n of shape `[a0, a1, ...]`.\n\n ```python\n # NOTE: The following example uses `{ ... }` to represent the contents\n # of a dataset.\n a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }\n\n a.apply(tf.data.experimental.unbatch()) == {\n 'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}\n ```\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n \"\"\"\n\n def _apply_fn(dataset):\n \"\"\"Function from `Dataset` to `Dataset` that applies the transformation.\"\"\"\n if not sparse.any_sparse(dataset.output_classes):\n return _UnbatchDataset(dataset)\n\n # NOTE(mrry): We must ensure that any SparseTensors in `dataset`\n # are normalized to the rank-1 dense representation, so that the\n # sparse-oblivious unbatching logic will slice them\n # appropriately. This leads to a somewhat inefficient re-encoding step\n # for all SparseTensor components.\n # TODO(mrry): Consider optimizing this in future\n # if it turns out to be a bottleneck.\n def normalize(arg, *rest):\n if rest:\n return sparse.serialize_many_sparse_tensors((arg,) + rest)\n else:\n return sparse.serialize_many_sparse_tensors(arg)\n\n normalized_dataset = dataset.map(normalize)\n\n # NOTE(mrry): Our `map()` has lost information about the sparseness\n # of any SparseTensor components, so re-apply the structure of the\n # original dataset.\n restructured_dataset = _RestructuredDataset(\n normalized_dataset,\n dataset.output_types,\n dataset.output_shapes,\n dataset.output_classes,\n allow_unsafe_cast=True)\n return _UnbatchDataset(restructured_dataset)\n\n return _apply_fn\n\n\nclass _DenseToSparseBatchDataset(dataset_ops.UnaryDataset):\n \"\"\"A `Dataset` that batches ragged dense elements into `tf.SparseTensor`s.\"\"\"\n\n def __init__(self, input_dataset, batch_size, row_shape):\n \"\"\"See `Dataset.dense_to_sparse_batch()` for more details.\"\"\"\n super(_DenseToSparseBatchDataset, self).__init__(input_dataset)\n if not isinstance(input_dataset.output_types, dtypes.DType):\n raise TypeError(\"DenseToSparseDataset requires an input whose elements \"\n \"have a single component, whereas the input has %r.\" %\n input_dataset.output_types)\n self._input_dataset = input_dataset\n self._batch_size = batch_size\n self._row_shape = row_shape\n self._structure = structure.SparseTensorStructure(\n input_dataset.output_types,\n tensor_shape.vector(None).concatenate(self._row_shape))\n\n def _as_variant_tensor(self):\n return ged_ops.experimental_dense_to_sparse_batch_dataset(\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\n self._batch_size,\n row_shape=convert.partial_shape_to_tensor(self._row_shape),\n **dataset_ops.flat_structure(self))\n\n @property\n def _element_structure(self):\n return self._structure\n\n\nclass _RestructuredDataset(dataset_ops.UnaryDataset):\n \"\"\"An internal helper for changing the structure and shape of a dataset.\"\"\"\n\n def __init__(self,\n dataset,\n output_types,\n output_shapes=None,\n output_classes=None,\n allow_unsafe_cast=False):\n \"\"\"Creates a new dataset with the given output types and shapes.\n\n The given `dataset` must have a structure that is convertible:\n * `dataset.output_types` must be the same as `output_types` module nesting.\n * Each shape in `dataset.output_shapes` must be compatible with each shape\n in `output_shapes` (if given).\n\n Note: This helper permits \"unsafe casts\" for shapes, equivalent to using\n `tf.Tensor.set_shape()` where domain-specific knowledge is available.\n\n Args:\n dataset: A `Dataset` object.\n output_types: A nested structure of `tf.DType` objects.\n output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects.\n If omitted, the shapes will be inherited from `dataset`.\n output_classes: (Optional.) A nested structure of class types.\n If omitted, the class types will be inherited from `dataset`.\n allow_unsafe_cast: (Optional.) If `True`, the caller may switch the\n reported output types and shapes of the restructured dataset, e.g. to\n switch a sparse tensor represented as `tf.variant` to its user-visible\n type and shape.\n\n Raises:\n ValueError: If either `output_types` or `output_shapes` is not compatible\n with the structure of `dataset`.\n \"\"\"\n super(_RestructuredDataset, self).__init__(dataset)\n self._input_dataset = dataset\n\n if not allow_unsafe_cast:\n # Validate that the types are compatible.\n output_types = nest.map_structure(dtypes.as_dtype, output_types)\n flat_original_types = nest.flatten(dataset.output_types)\n flat_new_types = nest.flatten(output_types)\n if flat_original_types != flat_new_types:\n raise ValueError(\n \"Dataset with output types %r cannot be restructured to have \"\n \"output types %r\" % (dataset.output_types, output_types))\n\n if output_shapes is None:\n # Inherit shapes from the original `dataset`.\n output_shapes = nest.pack_sequence_as(\n output_types, nest.flatten(dataset.output_shapes))\n else:\n if not allow_unsafe_cast:\n # Validate that the shapes are compatible.\n nest.assert_same_structure(output_types, output_shapes)\n flat_original_shapes = nest.flatten(dataset.output_shapes)\n flat_new_shapes = nest.flatten_up_to(output_types, output_shapes)\n\n for original_shape, new_shape in zip(flat_original_shapes,\n flat_new_shapes):\n if not original_shape.is_compatible_with(new_shape):\n raise ValueError(\n \"Dataset with output shapes %r cannot be restructured to have \"\n \"incompatible output shapes %r\" % (dataset.output_shapes,\n output_shapes))\n output_shapes = nest.map_structure_up_to(\n output_types, tensor_shape.as_shape, output_shapes)\n if output_classes is None:\n # Inherit class types from the original `dataset`.\n output_classes = nest.pack_sequence_as(\n output_types, nest.flatten(dataset.output_classes))\n\n self._structure = structure.convert_legacy_structure(\n output_types, output_shapes, output_classes)\n\n def _as_variant_tensor(self):\n return self._input_dataset._as_variant_tensor() # pylint: disable=protected-access\n\n @property\n def _element_structure(self):\n return self._structure\n\n\nclass _MapAndBatchDataset(dataset_ops.UnaryDataset):\n \"\"\"A `Dataset` that maps a function over a batch of elements.\"\"\"\n\n def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls,\n drop_remainder):\n \"\"\"See `Dataset.map()` for details.\"\"\"\n super(_MapAndBatchDataset, self).__init__(input_dataset)\n self._input_dataset = input_dataset\n self._map_func = dataset_ops.StructuredFunctionWrapper(\n map_func, \"tf.data.experimental.map_and_batch()\", dataset=input_dataset)\n self._batch_size_t = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n self._num_parallel_calls_t = ops.convert_to_tensor(\n num_parallel_calls, dtype=dtypes.int64, name=\"num_parallel_calls\")\n self._drop_remainder_t = ops.convert_to_tensor(\n drop_remainder, dtype=dtypes.bool, name=\"drop_remainder\")\n\n constant_drop_remainder = tensor_util.constant_value(self._drop_remainder_t)\n if constant_drop_remainder:\n # NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)\n # or `False` (explicitly retaining the remainder).\n self._structure = self._map_func.output_structure._batch( # pylint: disable=protected-access\n tensor_util.constant_value(self._batch_size_t))\n else:\n self._structure = self._map_func.output_structure._batch(None) # pylint: disable=protected-access\n\n def _functions(self):\n return [self._map_func]\n\n def _as_variant_tensor(self):\n # pylint: disable=protected-access\n return ged_ops.experimental_map_and_batch_dataset(\n self._input_dataset._as_variant_tensor(),\n self._map_func.function.captured_inputs,\n f=self._map_func.function,\n batch_size=self._batch_size_t,\n num_parallel_calls=self._num_parallel_calls_t,\n drop_remainder=self._drop_remainder_t,\n preserve_cardinality=True,\n **dataset_ops.flat_structure(self))\n\n @property\n def _element_structure(self):\n return self._structure\n\n\n@tf_export(\"data.experimental.map_and_batch\")\ndef map_and_batch(map_func,\n batch_size,\n num_parallel_batches=None,\n drop_remainder=False,\n num_parallel_calls=None):\n \"\"\"Fused implementation of `map` and `batch`.\n\n Maps `map_func` across `batch_size` consecutive elements of this dataset\n and then combines them into a batch. Functionally, it is equivalent to `map`\n followed by `batch`. However, by fusing the two transformations together, the\n implementation can be more efficient. Surfacing this transformation in the API\n is temporary. Once automatic input pipeline optimization is implemented,\n the fusing of `map` and `batch` will happen automatically and this API will be\n deprecated.\n\n Args:\n map_func: A function mapping a nested structure of tensors to another\n nested structure of tensors.\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,\n representing the number of batches to create in parallel. On one hand,\n higher values can help mitigate the effect of stragglers. On the other\n hand, higher values can increase contention if CPU is scarce.\n drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing\n whether the last batch should be dropped in case its size is smaller than\n desired; the default behavior is not to drop the smaller batch.\n num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,\n representing the number of elements to process in parallel. If not\n specified, `batch_size * num_parallel_batches` elements will be processed\n in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then\n the number of parallel calls is set dynamically based on available CPU.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n\n Raises:\n ValueError: If both `num_parallel_batches` and `num_parallel_calls` are\n specified.\n \"\"\"\n\n if num_parallel_batches is None and num_parallel_calls is None:\n num_parallel_calls = batch_size\n elif num_parallel_batches is not None and num_parallel_calls is None:\n num_parallel_calls = batch_size * num_parallel_batches\n elif num_parallel_batches is not None and num_parallel_calls is not None:\n raise ValueError(\"The `num_parallel_batches` and `num_parallel_calls` \"\n \"arguments are mutually exclusive.\")\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size,\n num_parallel_calls, drop_remainder)\n\n return _apply_fn\n"
] | [
[
"tensorflow.python.data.util.nest.map_structure_up_to",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.data.ops.dataset_ops.flat_structure",
"tensorflow.python.data.ops.dataset_ops.StructuredFunctionWrapper",
"tensorflow.python.ops.control_flow_ops.Assert",
"tensorflow.python.ops.sparse_ops.sparse_concat",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.data.experimental.ops.grouping.Reducer",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.data.util.nest.assert_same_structure",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.data.util.nest.flatten_up_to",
"tensorflow.python.ops.check_ops.assert_equal",
"numpy.int64",
"tensorflow.python.data.util.sparse.any_sparse",
"tensorflow.python.data.util.structure.convert_legacy_structure",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.data.util.nest.map_structure",
"tensorflow.python.data.util.nest.flatten",
"numpy.array",
"tensorflow.python.data.experimental.ops.grouping.group_by_reducer",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.less_equal",
"tensorflow.python.data.util.convert.partial_shape_to_tensor",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.framework.tensor_shape.vector",
"tensorflow.python.ops.gen_array_ops.empty",
"tensorflow.python.data.util.sparse.serialize_many_sparse_tensors",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.12"
]
}
] |
jsonbruce/MTSAnomalyDetection | [
"94e1b3177f8260804a4f9079ce7358f984521471",
"94e1b3177f8260804a4f9079ce7358f984521471",
"94e1b3177f8260804a4f9079ce7358f984521471"
] | [
"test/prophet_model.py",
"tsbitmaps/test/test_tsbitmaps.py",
"ensemblation/model.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\n# Created by max on 17-5-4.\n\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom fbprophet import Prophet\nfrom pandas import Series, DataFrame\n\n\nDATA_FILE = \"dataset/data0.csv\"\n\ndef main(args):\n data = pd.read_csv(DATA_FILE, parse_dates=True, index_col='timestamp')\n\n # Re-group data to fit for Prophet data format\n data['ds'] = data.index\n data = data.reindex(columns=['ds', 'v0', 'v1', 'result'])\n data = data.rename(columns={\"v0\": 'y'})\n\n model = Prophet()\n model.fit(data.ix[data.index[0:500]])\n\n future = model.make_future_dataframe(120, 'H')\n forecast = model.predict(future)\n\n model.plot(forecast)\n model.plot_components(forecast)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)",
"# coding=utf-8\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport unittest\n\nsys.path.append(os.path.dirname(__file__))\n\nfrom tsbitmaps.tsbitmapper import TSBitMapper\nfrom tsbitmaps.bitmapviz import create_bitmap_grid\n\n\nclass TestBitmapAlgorithm(unittest.TestCase):\n def test_bitmap(self):\n bmp = TSBitMapper(feature_window_size=5, bins=8, level_size=2,\n lag_window_size=10, lead_window_size=10, q=95)\n x = np.random.rand(500)\n binned_x = bmp.discretize(x)\n\n self.assertEqual(len(binned_x), len(x))\n self.assertTrue(set(binned_x) == set('01234567'))\n\n symbol_seq = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '1', '2', '3') # '01234567890123'\n sample_bitmap = bmp.get_bitmap(symbol_seq)\n self.assertEqual(len(sample_bitmap), 10)\n self.assertTrue(('4', '5') in sample_bitmap.keys())\n self.assertTrue(('9', '0') in sample_bitmap.keys())\n self.assertEqual(sample_bitmap['0', '1'], 1)\n\n sample_bitmap_w = bmp.get_bitmap_with_feat_window(symbol_seq)\n self.assertEqual(len(sample_bitmap_w), 8)\n self.assertTrue(('4', '5') not in sample_bitmap_w.keys())\n self.assertTrue(('9', '0') not in sample_bitmap_w.keys())\n self.assertEqual(sample_bitmap_w[('0', '1')], 1)\n\n ypred = bmp.fit_predict(x)\n scores = bmp.get_ref_bitmap_scores()\n self.assertTrue((scores[0:bmp._lag_window_size] == 0.0).all())\n self.assertTrue((scores[bmp._lag_window_size:-bmp._lead_window_size] >= 0).all())\n self.assertTrue(0 < (ypred == -1).sum() <= 25)\n\n def test_anomaly_detection_ecg(self):\n ecg_norm = np.loadtxt('data/ecg_normal.txt')\n ecg_anom = np.loadtxt('data/ecg_anom.txt')\n\n bmp = TSBitMapper(feature_window_size=20, bins=5, level_size=3, lag_window_size=200, lead_window_size=40)\n ypred_unsupervised = bmp.fit_predict(ecg_anom)\n self.assertTrue(0 < (ypred_unsupervised == -1).sum() <= 3)\n\n bmp.fit(ecg_norm)\n ypred_supervised = bmp.predict(ecg_anom)\n self.assertTrue(0 < (ypred_supervised == -1).sum() <= 3)\n\n def test_anomaly_detection_pattern(self):\n pattern_norm = np.loadtxt('data/pattern_normal.txt')\n pattern_anom = pd.read_csv('data/pattern_anom.txt').iloc[:, 0]\n\n bmp = TSBitMapper(feature_window_size=50, bins=5, level_size=2, lag_window_size=200, lead_window_size=100)\n ypred_unsupervised = bmp.fit_predict(pattern_anom)\n self.assertTrue(0 < (ypred_unsupervised == -1).sum() <= 3)\n\n bmp.fit(pattern_norm)\n ypred_supervised = bmp.predict(pattern_anom)\n self.assertTrue(0 < (ypred_supervised == -1).sum() <= 3)\n\n # @unittest.skip(\"tmp\")\n def test_bitmapviz(self):\n bmp = TSBitMapper(feature_window_size=20, bins=12, level_size=3, lag_window_size=200, lead_window_size=40)\n ecg_anom = np.loadtxt('data/ecg_anom.txt')\n ecg_bitmap = bmp.get_tsbitmap(ecg_anom)\n bmp_grid = create_bitmap_grid(ecg_bitmap, n=4, num_bins=12, level_size=3)\n self.assertEqual((bmp_grid > 0).sum(), len(ecg_bitmap))\n self.assertEqual(bmp_grid.shape, (27, 64))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"#!/usr/bin/env python\n# coding=utf-8\n\n# Created by max on 17-10-10\n\nfrom __future__ import division # for divide operation in python 2\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport random\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nimport matplotlib\n\nmatplotlib.use('Agg')\n\nfrom keras.layers import LSTM\nfrom keras.layers import Dense, Activation, Dropout, Bidirectional\nfrom keras.models import Sequential, load_model\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\n\nclass BiLSTMClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Bidirectional LSTM Model for Binary Classification.\n\n \"\"\"\n\n def __init__(self, input_shape, hidden_layers,\n loss, optimizer,\n epochs, batch_size,\n verbose):\n \"\"\"Initializing the classifier\n\n :param input_shape: tuple.\n :param hidden_layers: tuple. (units, ) the ith units is the total units of ith hidden layer.\n All len(hidden_layers) hidden layers.\n :param loss: str.\n :param optimizer: str.\n :param epochs: int\n :param batch_size: int.\n :param verbose: int.\n \"\"\"\n self.input_shape = input_shape\n self.loss = loss\n self.optimizer = optimizer\n self.epochs = epochs\n self.batch_size = batch_size\n self.verbose = verbose\n self.hidden_layers = hidden_layers\n\n # Construct model\n self.model = Sequential()\n\n for i, units in enumerate(self.hidden_layers):\n if i == 0:\n self.model.add(Bidirectional(LSTM(units, return_sequences=True), input_shape=input_shape))\n elif i == len(self.hidden_layers) - 1:\n self.model.add(Bidirectional(LSTM(units, return_sequences=False)))\n else:\n self.model.add(Bidirectional(LSTM(units, return_sequences=True)))\n\n self.model.add(Dense(1, activation='tanh'))\n\n # Configures the learning process.\n self.model.compile(loss=self.loss, optimizer=self.optimizer)\n\n def fit(self, X, y):\n \"\"\"Fit classifier.\n\n :param X: {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data.\n :param y: array-like, shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels in classification, real numbers in\n regression).\n :return: a trained LSTM model.\n \"\"\"\n train_x = X.reshape(X.shape[0], 1, X.shape[1])\n train_y = y\n\n filepath = \"weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5\"\n callbacks = [\n EarlyStopping(monitor='val_loss', patience=3, verbose=0),\n ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True, verbose=1),\n ]\n\n self.model.fit(train_x, train_y,\n epochs=self.epochs,\n batch_size=self.batch_size,\n validation_split=0.05, callbacks=callbacks,\n verbose=self.verbose)\n\n return self\n\n def predict(self, X):\n \"\"\"Predict using the trained model\n\n :param X: {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data.\n :return: pred_y.\n \"\"\"\n test_x = X\n if len(X.shape) == 2:\n test_x = X.reshape(X.shape[0], 1, X.shape[1])\n\n pred_y = self.model.predict(test_x)\n\n pred_y = pred_y.round()\n pred_y = pred_y.ravel()\n pred_y = pred_y.astype('int64')\n\n return pred_y\n\n\nif __name__ == \"__main__\":\n pass"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.show"
],
[
"pandas.read_csv",
"numpy.random.rand",
"numpy.loadtxt"
],
[
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FeryET/pytorch-lightning | [
"b1f8b111b5085373599758a4e155a482259cdbf0",
"b1f8b111b5085373599758a4e155a482259cdbf0"
] | [
"tests/lite/test_wrappers.py",
"tests/loggers/test_base.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest.mock import ANY, Mock\n\nimport pytest\nimport torch\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom pytorch_lightning.core.mixins import DeviceDtypeModuleMixin\nfrom pytorch_lightning.lite import LightningLite\nfrom pytorch_lightning.lite.wrappers import _LiteDataLoader, _LiteModule, _LiteOptimizer\nfrom tests.helpers.runif import RunIf\n\n\nclass EmptyLite(LightningLite):\n def run(self):\n pass\n\n\ndef test_lite_module_wraps():\n \"\"\"Test that the wrapped module is accessible via the property.\"\"\"\n module = Mock()\n assert _LiteModule(module, Mock()).module is module\n\n\n@RunIf(min_gpus=1)\[email protected](\n \"precision, input_type, expected_type\",\n [\n (32, torch.float16, torch.float32),\n (32, torch.float32, torch.float32),\n (32, torch.float64, torch.float32),\n (32, torch.int, torch.int),\n (16, torch.float32, torch.float16),\n (16, torch.float64, torch.float16),\n (16, torch.long, torch.long),\n pytest.param(\"bf16\", torch.float32, torch.bfloat16, marks=RunIf(min_torch=\"1.10\")),\n pytest.param(\"bf16\", torch.float64, torch.bfloat16, marks=RunIf(min_torch=\"1.10\")),\n pytest.param(\"bf16\", torch.bool, torch.bool, marks=RunIf(min_torch=\"1.10\")),\n ],\n)\ndef test_lite_module_forward_conversion(precision, input_type, expected_type):\n \"\"\"Test that the LiteModule performs autocasting on the input tensors and during forward().\"\"\"\n lite = EmptyLite(precision=precision, accelerator=\"gpu\", devices=1)\n device = torch.device(\"cuda\", 0)\n\n def check_autocast(forward_input):\n assert precision != 16 or torch.is_autocast_enabled()\n return forward_input\n\n module = Mock(wraps=torch.nn.Identity(), side_effect=check_autocast)\n lite_module = _LiteModule(module, lite._precision_plugin).to(device)\n out = lite_module(torch.tensor([1, 2, 3], dtype=input_type, device=device))\n assert module.call_args[0][0].dtype == expected_type\n assert out.dtype == input_type or out.dtype == torch.get_default_dtype()\n\n\[email protected](\n \"device\", [torch.device(\"cpu\"), pytest.param(torch.device(\"cuda\", 0), marks=RunIf(min_gpus=1))]\n)\[email protected](\"dtype\", [torch.float32, torch.float16])\ndef test_lite_module_device_dtype_propagation(device, dtype):\n \"\"\"Test that the LiteModule propagates device and dtype properties to its submodules (e.g. torchmetrics).\"\"\"\n\n class DeviceModule(DeviceDtypeModuleMixin):\n pass\n\n device_module = DeviceModule()\n lite_module = _LiteModule(device_module, Mock())\n lite_module.to(device)\n assert device_module.device == device\n assert lite_module.device == device\n\n lite_module.to(dtype)\n assert device_module.dtype == dtype\n assert lite_module.dtype == dtype\n\n\ndef test_lite_dataloader_iterator():\n \"\"\"Test that the iteration over a LiteDataLoader wraps the iterator of the underlying dataloader (no automatic\n device placement).\"\"\"\n dataloader = DataLoader(range(5), batch_size=2)\n lite_dataloader = _LiteDataLoader(dataloader)\n assert len(lite_dataloader) == len(dataloader) == 3\n\n iterator = iter(dataloader)\n lite_iterator = iter(lite_dataloader)\n\n assert torch.equal(next(iterator), next(lite_iterator))\n assert torch.equal(next(iterator), next(lite_iterator))\n assert torch.equal(next(iterator), next(lite_iterator))\n\n with pytest.raises(StopIteration):\n next(iterator)\n\n with pytest.raises(StopIteration):\n next(lite_iterator)\n\n\[email protected](\n \"src_device, dest_device\",\n [\n (torch.device(\"cpu\"), torch.device(\"cpu\")),\n pytest.param(torch.device(\"cpu\"), torch.device(\"cuda\", 0), marks=RunIf(min_gpus=1)),\n pytest.param(torch.device(\"cuda\", 0), torch.device(\"cpu\"), marks=RunIf(min_gpus=1)),\n ],\n)\ndef test_lite_dataloader_device_placement(src_device, dest_device):\n \"\"\"Test that the LiteDataLoader moves data to the device in its iterator.\"\"\"\n sample0 = torch.tensor(0, device=src_device)\n sample1 = torch.tensor(1, device=src_device)\n sample2 = {\"data\": torch.tensor(2, device=src_device)}\n sample3 = {\"data\": torch.tensor(3, device=src_device)}\n dataloader = DataLoader([sample0, sample1, sample2, sample3], batch_size=2)\n lite_dataloader = _LiteDataLoader(dataloader=dataloader, device=dest_device)\n iterator = iter(lite_dataloader)\n\n batch0 = next(iterator)\n assert torch.equal(batch0, torch.tensor([0, 1], device=dest_device))\n\n batch1 = next(iterator)\n assert torch.equal(batch1[\"data\"], torch.tensor([2, 3], device=dest_device))\n\n\ndef test_lite_optimizer_wraps():\n \"\"\"Test that the LiteOptimizer fully wraps the optimizer.\"\"\"\n optimizer_cls = torch.optim.SGD\n optimizer = Mock(spec=optimizer_cls)\n lite_optimizer = _LiteOptimizer(optimizer, Mock())\n assert lite_optimizer.optimizer is optimizer\n assert isinstance(lite_optimizer, optimizer_cls)\n\n\ndef test_lite_optimizer_state_dict():\n \"\"\"Test that the LiteOptimizer calls into the strategy to collect the state.\"\"\"\n optimizer = Mock()\n strategy = Mock()\n lite_optimizer = _LiteOptimizer(optimizer=optimizer, strategy=strategy)\n lite_optimizer.state_dict()\n strategy.optimizer_state.assert_called_with(optimizer)\n\n\ndef test_lite_optimizer_steps():\n \"\"\"Test that the LiteOptimizer forwards the step() and zero_grad() calls to the wrapped optimizer.\"\"\"\n optimizer = Mock()\n strategy = Mock()\n strategy.optimizer_step.return_value = 123\n lite_optimizer = _LiteOptimizer(optimizer=optimizer, strategy=strategy)\n step_output = lite_optimizer.step()\n assert step_output == 123\n strategy.optimizer_step.assert_called_once()\n strategy.optimizer_step.assert_called_with(optimizer, opt_idx=0, closure=ANY, model=strategy.model)\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pickle\nfrom argparse import Namespace\nfrom copy import deepcopy\nfrom typing import Any, Dict, Optional\nfrom unittest.mock import MagicMock, patch\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.loggers import LightningLoggerBase, LoggerCollection, TensorBoardLogger\nfrom pytorch_lightning.loggers.base import DummyExperiment, DummyLogger\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.logger import _convert_params, _sanitize_params\nfrom pytorch_lightning.utilities.rank_zero import rank_zero_only\nfrom tests.helpers.boring_model import BoringDataModule, BoringModel\n\n\ndef test_logger_collection():\n mock1 = MagicMock()\n mock2 = MagicMock()\n\n with pytest.deprecated_call(match=\"`LoggerCollection` is deprecated in v1.6\"):\n logger = LoggerCollection([mock1, mock2])\n\n assert logger[0] == mock1\n assert logger[1] == mock2\n\n assert logger.experiment[0] == mock1.experiment\n assert logger.experiment[1] == mock2.experiment\n\n assert logger.save_dir is None\n\n logger.update_agg_funcs({\"test\": np.mean}, np.sum)\n mock1.update_agg_funcs.assert_called_once_with({\"test\": np.mean}, np.sum)\n mock2.update_agg_funcs.assert_called_once_with({\"test\": np.mean}, np.sum)\n\n logger.log_metrics(metrics={\"test\": 2.0}, step=4)\n mock1.log_metrics.assert_called_once_with(metrics={\"test\": 2.0}, step=4)\n mock2.log_metrics.assert_called_once_with(metrics={\"test\": 2.0}, step=4)\n\n logger.finalize(\"success\")\n mock1.finalize.assert_called_once()\n mock2.finalize.assert_called_once()\n\n\ndef test_logger_collection_unique_names():\n unique_name = \"name1\"\n logger1 = CustomLogger(name=unique_name)\n logger2 = CustomLogger(name=unique_name)\n\n with pytest.deprecated_call(match=\"`LoggerCollection` is deprecated in v1.6\"):\n logger = LoggerCollection([logger1, logger2])\n\n assert logger.name == unique_name\n\n\ndef test_logger_collection_names_order():\n loggers = [CustomLogger(name=n) for n in (\"name1\", \"name2\", \"name1\", \"name3\")]\n with pytest.deprecated_call(match=\"`LoggerCollection` is deprecated in v1.6\"):\n logger = LoggerCollection(loggers)\n assert logger.name == f\"{loggers[0].name}_{loggers[1].name}_{loggers[3].name}\"\n\n\ndef test_logger_collection_unique_versions():\n unique_version = \"1\"\n logger1 = CustomLogger(version=unique_version)\n logger2 = CustomLogger(version=unique_version)\n\n with pytest.deprecated_call(match=\"`LoggerCollection` is deprecated in v1.6\"):\n logger = LoggerCollection([logger1, logger2])\n\n assert logger.version == unique_version\n\n\ndef test_logger_collection_versions_order():\n loggers = [CustomLogger(version=v) for v in (\"1\", \"2\", \"1\", \"3\")]\n with pytest.deprecated_call(match=\"`LoggerCollection` is deprecated in v1.6\"):\n logger = LoggerCollection(loggers)\n assert logger.version == f\"{loggers[0].version}_{loggers[1].version}_{loggers[3].version}\"\n\n\nclass CustomLogger(LightningLoggerBase):\n def __init__(self, experiment: str = \"test\", name: str = \"name\", version: str = \"1\"):\n super().__init__()\n self._experiment = experiment\n self._name = name\n self._version = version\n self.hparams_logged = None\n self.metrics_logged = {}\n self.finalized = False\n self.after_save_checkpoint_called = False\n\n @property\n def experiment(self):\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n self.hparams_logged = params\n\n @rank_zero_only\n def log_metrics(self, metrics, step):\n self.metrics_logged = metrics\n\n @rank_zero_only\n def finalize(self, status):\n self.finalized_status = status\n\n @property\n def save_dir(self) -> Optional[str]:\n \"\"\"Return the root directory where experiment logs get saved, or `None` if the logger does not save data\n locally.\"\"\"\n return None\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n return self._version\n\n def after_save_checkpoint(self, checkpoint_callback):\n self.after_save_checkpoint_called = True\n\n\ndef test_custom_logger(tmpdir):\n class CustomModel(BoringModel):\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n self.log(\"train_loss\", loss)\n return {\"loss\": loss}\n\n logger = CustomLogger()\n model = CustomModel()\n trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=logger, default_root_dir=tmpdir)\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert logger.metrics_logged != {}\n assert logger.after_save_checkpoint_called\n assert logger.finalized_status == \"success\"\n\n\ndef test_multiple_loggers(tmpdir):\n class CustomModel(BoringModel):\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n self.log(\"train_loss\", loss)\n return {\"loss\": loss}\n\n model = CustomModel()\n logger1 = CustomLogger()\n logger2 = CustomLogger()\n\n trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=[logger1, logger2], default_root_dir=tmpdir)\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n assert logger1.hparams_logged is None\n assert logger1.metrics_logged != {}\n assert logger1.finalized_status == \"success\"\n\n assert logger2.hparams_logged is None\n assert logger2.metrics_logged != {}\n assert logger2.finalized_status == \"success\"\n\n\ndef test_multiple_loggers_pickle(tmpdir):\n \"\"\"Verify that pickling trainer with multiple loggers works.\"\"\"\n\n logger1 = CustomLogger()\n logger2 = CustomLogger()\n\n trainer = Trainer(logger=[logger1, logger2])\n pkl_bytes = pickle.dumps(trainer)\n trainer2 = pickle.loads(pkl_bytes)\n for logger in trainer2.loggers:\n logger.log_metrics({\"acc\": 1.0}, 0)\n\n for logger in trainer2.loggers:\n assert logger.metrics_logged == {\"acc\": 1.0}\n\n\ndef test_adding_step_key(tmpdir):\n class CustomTensorBoardLogger(TensorBoardLogger):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.logged_step = 0\n\n def log_metrics(self, metrics, step):\n if \"val_acc\" in metrics:\n assert step == self.logged_step\n\n super().log_metrics(metrics, step)\n\n class CustomModel(BoringModel):\n def training_epoch_end(self, outputs):\n self.logger.logged_step += 1\n self.log_dict({\"step\": self.logger.logged_step, \"train_acc\": self.logger.logged_step / 10})\n\n def validation_epoch_end(self, outputs):\n self.logger.logged_step += 1\n self.log_dict({\"step\": self.logger.logged_step, \"val_acc\": self.logger.logged_step / 10})\n\n model = CustomModel()\n trainer = Trainer(\n max_epochs=3,\n logger=CustomTensorBoardLogger(save_dir=tmpdir),\n default_root_dir=tmpdir,\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n num_sanity_val_steps=0,\n )\n trainer.fit(model)\n\n\ndef test_dummyexperiment_support_indexing():\n \"\"\"Test that the DummyExperiment can imitate indexing the experiment in a LoggerCollection.\"\"\"\n experiment = DummyExperiment()\n assert experiment[0] == experiment\n\n\ndef test_dummylogger_support_indexing():\n \"\"\"Test that the DummyLogger can imitate indexing of a LoggerCollection.\"\"\"\n logger = DummyLogger()\n assert logger[0] == logger\n\n\ndef test_dummylogger_empty_iterable():\n \"\"\"Test that DummyLogger represents an empty iterable.\"\"\"\n logger = DummyLogger()\n for _ in logger:\n assert False\n\n\ndef test_dummylogger_noop_method_calls():\n \"\"\"Test that the DummyLogger methods can be called with arbitrary arguments.\"\"\"\n logger = DummyLogger()\n logger.log_hyperparams(\"1\", 2, three=\"three\")\n logger.log_metrics(\"1\", 2, three=\"three\")\n\n\ndef test_dummyexperiment_support_item_assignment():\n \"\"\"Test that the DummyExperiment supports item assignment.\"\"\"\n experiment = DummyExperiment()\n experiment[\"variable\"] = \"value\"\n assert experiment[\"variable\"] != \"value\" # this is only a stateless mock experiment\n\n\ndef test_np_sanitization():\n class CustomParamsLogger(CustomLogger):\n def __init__(self):\n super().__init__()\n self.logged_params = None\n\n @rank_zero_only\n def log_hyperparams(self, params):\n params = _convert_params(params)\n params = _sanitize_params(params)\n self.logged_params = params\n\n logger = CustomParamsLogger()\n np_params = {\n \"np.bool_\": np.bool_(1),\n \"np.byte\": np.byte(2),\n \"np.intc\": np.intc(3),\n \"np.int_\": np.int_(4),\n \"np.longlong\": np.longlong(5),\n \"np.single\": np.single(6.0),\n \"np.double\": np.double(8.9),\n \"np.csingle\": np.csingle(7 + 2j),\n \"np.cdouble\": np.cdouble(9 + 4j),\n }\n sanitized_params = {\n \"np.bool_\": True,\n \"np.byte\": 2,\n \"np.intc\": 3,\n \"np.int_\": 4,\n \"np.longlong\": 5,\n \"np.single\": 6.0,\n \"np.double\": 8.9,\n \"np.csingle\": \"(7+2j)\",\n \"np.cdouble\": \"(9+4j)\",\n }\n logger.log_hyperparams(Namespace(**np_params))\n assert logger.logged_params == sanitized_params\n\n\[email protected](\"logger\", [True, False])\n@patch(\"pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_hyperparams\")\ndef test_log_hyperparams_being_called(log_hyperparams_mock, tmpdir, logger):\n class TestModel(BoringModel):\n def __init__(self, param_one, param_two):\n super().__init__()\n self.save_hyperparameters(logger=logger)\n\n model = TestModel(\"pytorch\", \"lightning\")\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, num_sanity_val_steps=0\n )\n trainer.fit(model)\n\n if logger:\n log_hyperparams_mock.assert_called()\n else:\n log_hyperparams_mock.assert_not_called()\n\n\n@patch(\"pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_hyperparams\")\ndef test_log_hyperparams_key_collision(log_hyperparams_mock, tmpdir):\n class TestModel(BoringModel):\n def __init__(self, hparams: Dict[str, Any]) -> None:\n super().__init__()\n self.save_hyperparameters(hparams)\n\n class TestDataModule(BoringDataModule):\n def __init__(self, hparams: Dict[str, Any]) -> None:\n super().__init__()\n self.save_hyperparameters(hparams)\n\n class _Test:\n ...\n\n same_params = {1: 1, \"2\": 2, \"three\": 3.0, \"test\": _Test(), \"4\": torch.tensor(4)}\n model = TestModel(same_params)\n dm = TestDataModule(same_params)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n num_sanity_val_steps=0,\n enable_checkpointing=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n # there should be no exceptions raised for the same key/value pair in the hparams of both\n # the lightning module and data module\n trainer.fit(model)\n\n obj_params = deepcopy(same_params)\n obj_params[\"test\"] = _Test()\n model = TestModel(same_params)\n dm = TestDataModule(obj_params)\n trainer.fit(model)\n\n diff_params = deepcopy(same_params)\n diff_params.update({1: 0, \"test\": _Test()})\n model = TestModel(same_params)\n dm = TestDataModule(diff_params)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n num_sanity_val_steps=0,\n enable_checkpointing=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n with pytest.raises(MisconfigurationException, match=\"Error while merging hparams\"):\n trainer.fit(model, dm)\n\n tensor_params = deepcopy(same_params)\n tensor_params.update({\"4\": torch.tensor(3)})\n model = TestModel(same_params)\n dm = TestDataModule(tensor_params)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n num_sanity_val_steps=0,\n enable_checkpointing=False,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n with pytest.raises(MisconfigurationException, match=\"Error while merging hparams\"):\n trainer.fit(model, dm)\n"
] | [
[
"torch.tensor",
"torch.get_default_dtype",
"torch.is_autocast_enabled",
"torch.nn.Identity",
"torch.device",
"torch.utils.data.dataloader.DataLoader"
],
[
"numpy.intc",
"numpy.byte",
"numpy.single",
"numpy.int_",
"numpy.cdouble",
"torch.tensor",
"numpy.longlong",
"numpy.csingle",
"numpy.bool_",
"numpy.double"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zjzh/jax | [
"8372b98c4856b6b2363b7bb28abdb4579440a656",
"84dcfcd7e52471f1ac1955d108255467e7950820"
] | [
"jax/_src/device_array.py",
"jax/_src/lax/linalg.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# On-device arrays.\n\nfrom functools import partial, partialmethod\nimport operator\nfrom typing import (Any, List, Optional, Union)\nimport weakref\n\nimport numpy as np\n\nfrom jax import core\nfrom jax._src.config import config\nfrom jax._src import abstract_arrays\nfrom jax._src import dtypes\nfrom jax._src import profiler\nfrom jax._src.lib import xla_client as xc\nimport jax._src.util as util\n\n### device-persistent data\n\nxe = xc._xla\n\nDevice = xc.Device\nBuffer = xe.Buffer\n\n\ndef _forward_method(attrname, self, fun, *args):\n return fun(getattr(self, attrname), *args)\n_forward_to_value = partial(_forward_method, \"_value\")\n\n\n# The following is used for the type xc.Buffer or _DeviceArray.\nDeviceArrayProtocol = Any\nDeviceArray = xc.DeviceArrayBase\n\n\ndef make_device_array(\n aval: core.ShapedArray,\n device: Optional[Device],\n device_buffer: Buffer,\n) -> Union[Buffer, \"_DeviceArray\"]:\n \"\"\"Returns a DeviceArray implementation based on arguments.\n\n This is to be used only within JAX. It will return either a PythonDeviceArray\n or a C++ equivalent implementation.\n \"\"\"\n if isinstance(device_buffer, xc.Buffer):\n\n if device_buffer.aval == aval and device_buffer._device == device:\n return device_buffer\n device_buffer = device_buffer.clone()\n device_buffer._device = device\n device_buffer.aval = aval\n device_buffer.weak_type = aval.weak_type\n return device_buffer\n\n return _DeviceArray(aval, device, device_buffer)\n\n\ndef type_is_device_array(x):\n \"\"\"Returns `True` if `x` is a non-sharded DeviceArray.\n\n Use this function instead of `type(x) is Devicearray`.\n \"\"\"\n type_x = type(x)\n return type_x is _DeviceArray or type_x is xc.Buffer\n\n\ndef device_array_supports_weakrefs():\n try:\n weakref.ref(DeviceArray())\n return True\n except TypeError:\n return False\n\n\nclass _DeviceArray(DeviceArray): # type: ignore\n \"\"\"A DeviceArray is an ndarray backed by a single device memory buffer.\"\"\"\n # We don't subclass ndarray because that would open up a host of issues,\n # but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.\n __slots__ = [\n \"aval\", \"device_buffer\", \"_npy_value\", \"_device\", \"__weakref__\"\n ]\n __array_priority__ = 100\n\n # DeviceArray has methods that are dynamically populated in lax_numpy.py,\n # and this annotation is needed to make pytype happy.\n _HAS_DYNAMIC_ATTRIBUTES = True\n\n def __init__(self, aval: core.ShapedArray, device: Optional[Device],\n device_buffer: Buffer):\n \"\"\"Initializer.\n\n Args:\n aval: The abstract value associated to this array (shape+dtype+weak_type).\n device: The optional sticky device. See\n https://jax.readthedocs.io/en/latest/faq.html#controlling-data-and-computation-placement-on-devices\n device_buffer: The underlying buffer owning the on-device data.\n \"\"\"\n DeviceArray.__init__(self)\n self.aval = aval\n self.device_buffer = device_buffer\n self._device = device\n\n self._npy_value = None\n if config.jax_enable_checks:\n assert type(aval) is core.ShapedArray\n npy_value = self._value\n assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape, (\n aval, npy_value.shape, npy_value.dtype)\n assert (device is None) or device is device_buffer.device()\n\n def _check_if_deleted(self):\n if self.device_buffer is deleted_buffer:\n raise RuntimeError(\"DeviceArray has been deleted.\")\n\n @profiler.annotate_function\n def block_until_ready(self):\n \"\"\"Blocks the caller until the buffer's value has been computed on device.\n\n This method is mostly useful for timing microbenchmarks that wish to\n time how long a computation takes, without transferring the result back\n to the host.\n\n Returns the buffer object (`self`).\n \"\"\"\n self._check_if_deleted()\n self.device_buffer.block_host_until_ready() # pytype: disable=attribute-error\n return self\n\n @property\n def _value(self):\n self._check_if_deleted()\n if self._npy_value is None:\n self._npy_value = self.device_buffer.to_py() # pytype: disable=attribute-error # bind-properties\n self._npy_value.flags.writeable = False\n return self._npy_value\n\n @property\n def shape(self):\n return self.aval.shape\n\n @property\n def dtype(self):\n return self.aval.dtype\n\n @property\n def size(self):\n return util.prod(self.aval.shape)\n\n @property\n def ndim(self):\n return len(self.aval.shape)\n\n def device(self):\n self._check_if_deleted()\n return self.device_buffer.device() # pytype: disable=attribute-error\n\n def copy_to_host_async(self):\n \"\"\"Requests a copy of the buffer to the host.\"\"\"\n self._check_if_deleted()\n if self._npy_value is None:\n self.device_buffer.copy_to_host_async() # pytype: disable=attribute-error\n\n def delete(self):\n \"\"\"Deletes the device array and any cached copy on the host.\n\n It is an error to access the contents of a `DeviceArray` after it has\n been deleted.\n\n Use of this method is optional; device buffers will be reclaimed\n automatically by Python when a DeviceArray object is garbage collected.\n However, it is sometimes useful to have more explicit control over the\n time of deletion.\n \"\"\"\n self.device_buffer.delete() # pytype: disable=attribute-error\n self.device_buffer = deleted_buffer\n self._npy_value = None\n\n @property\n def __cuda_array_interface__(self):\n return self.device_buffer.__cuda_array_interface__ # pytype: disable=attribute-error # bind-properties\n\n\n# Adding methods dynamically to both _DeviceArray and xc.Buffer\n# pylint: disable=protected-access\nfor device_array in [DeviceArray]:\n\n\n def copy(self):\n \"\"\"Returns an ndarray (backed by host memory, not device memory).\"\"\"\n return np.asarray(self)\n setattr(device_array, \"copy\", copy)\n\n def __repr__(self):\n line_width = np.get_printoptions()[\"linewidth\"]\n prefix = '{}('.format(self.__class__.__name__.lstrip('_'))\n s = np.array2string(self._value, prefix=prefix, suffix=',',\n separator=', ', max_line_width=line_width)\n if self.aval is not None and self.aval.weak_type:\n dtype_str = f'dtype={self.dtype.name}, weak_type=True)'\n else:\n dtype_str = f'dtype={self.dtype.name})'\n last_line_len = len(s) - s.rfind('\\n') + 1\n sep = ' '\n if last_line_len + len(dtype_str) + 1 > line_width:\n sep = ' ' * len(prefix)\n return \"{}{},{}{}\".format(prefix, s, sep, dtype_str)\n\n setattr(device_array, \"__repr__\", __repr__)\n\n def item(self):\n if dtypes.issubdtype(self.dtype, np.complexfloating):\n return complex(self)\n elif dtypes.issubdtype(self.dtype, np.floating):\n return float(self)\n elif dtypes.issubdtype(self.dtype, np.integer):\n return int(self)\n elif dtypes.issubdtype(self.dtype, np.bool_):\n return bool(self)\n else:\n raise TypeError(self.dtype)\n\n setattr(device_array, \"item\", item)\n\n def __len__(self):\n try:\n return self.aval.shape[0]\n except IndexError as err:\n raise TypeError(\"len() of unsized object\") from err # same as numpy error\n\n setattr(device_array, \"__len__\", __len__)\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (sl for chunk in self._chunk_iter(100) for sl in chunk._unstack())\n\n setattr(device_array, \"__iter__\", __iter__)\n\n def __reversed__(self):\n return iter(self[::-1])\n\n setattr(device_array, \"__reversed__\", __reversed__)\n\n def __format__(self, format_spec):\n # Simulates behavior of https://github.com/numpy/numpy/pull/9883\n if self.ndim == 0:\n return format(self._value[()], format_spec)\n else:\n return format(self._value, format_spec)\n\n setattr(device_array, \"__format__\", __format__)\n\n def __array__(self, dtype=None, context=None):\n return np.asarray(self._value, dtype=dtype)\n\n setattr(device_array, \"__array__\", __array__)\n\n setattr(device_array, \"__str__\", partialmethod(_forward_to_value, str))\n setattr(device_array, \"__bool__\", partialmethod(_forward_to_value, bool))\n setattr(device_array, \"__nonzero__\", partialmethod(_forward_to_value, bool))\n setattr(device_array, \"__float__\", lambda self: self._value.__float__())\n setattr(device_array, \"__int__\", lambda self: self._value.__int__())\n setattr(device_array, \"__complex__\", lambda self: self._value.__complex__())\n setattr(device_array, \"__hex__\", partialmethod(_forward_to_value, hex))\n setattr(device_array, \"__oct__\", partialmethod(_forward_to_value, oct))\n setattr(device_array, \"__index__\", partialmethod(_forward_to_value,\n operator.index))\n to_bytes = lambda self, order=\"C\": self._value.tobytes(order)\n setattr(device_array, \"tobytes\", to_bytes)\n del to_bytes\n setattr(device_array, \"tolist\", lambda self: self._value.tolist())\n\n # pickle saves and loads just like an ndarray\n setattr(device_array, \"__reduce__\",\n partialmethod(_forward_to_value, operator.methodcaller(\"__reduce__\")))\n\n # explicitly set to be unhashable.\n setattr(device_array, \"__hash__\", None)\n\n # clobbered when jax.numpy is imported, but useful in tests\n setattr(device_array, \"__eq__\", lambda self, other: self._value == other)\n\n # The following methods are dynamically overridden in lax_numpy.py.\n def raise_not_implemented():\n raise NotImplementedError\n\n setattr(device_array, \"__getitem__\", lambda self, i: raise_not_implemented())\n# pylint: enable=protected-access\n\n\nclass DeletedBuffer(object): pass\ndeleted_buffer = DeletedBuffer()\n\n\ndevice_array_types: List[type] = [xc.Buffer, _DeviceArray]\nfor _device_array in device_array_types:\n core.literalable_types.add(_device_array)\n core.pytype_aval_mappings[device_array] = abstract_arrays.canonical_concrete_aval\n",
"# coding=utf-8\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom functools import partial\n\nimport numpy as np\n\nfrom jax._src.numpy import lax_numpy as jnp\nfrom jax._src.numpy.vectorize import vectorize\nfrom jax._src import ad_util\nfrom jax._src import api\nfrom jax import lax\nfrom jax._src import dtypes\nfrom jax.interpreters import xla\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax._src.util import prod\nfrom jax.core import Primitive, ShapedArray, raise_to_shaped\nfrom jax._src.lax.lax import (\n standard_primitive, standard_unop, naryop_dtype_rule, _float, _complex,\n _input_dtype, _broadcasting_select)\nfrom jax._src.lax import lax as lax_internal\nfrom jax._src.lib import lapack\n\nfrom jax._src.lib import cuda_linalg\nfrom jax._src.lib import cusolver\nfrom jax._src.lib import cusparse\nfrom jax._src.lib import rocsolver\n\nfrom jax._src.lib import xla_client\n\nxops = xla_client.ops\n\n\n# traceables\n\ndef cholesky(x, symmetrize_input: bool = True):\n \"\"\"Cholesky decomposition.\n\n Computes the Cholesky decomposition\n\n .. math::\n A = L . L^H\n\n of square matrices, :math:`A`, such that :math:`L`\n is lower triangular. The matrices of :math:`A` must be positive-definite and\n either Hermitian, if complex, or symmetric, if real.\n\n Args:\n x: A batch of square Hermitian (symmetric if real) positive-definite\n matrices with shape ``[..., n, n]``.\n symmetrize_input: If ``True``, the matrix is symmetrized before Cholesky\n decomposition by computing :math:`\\\\frac{1}{2}(x + x^H)`. If ``False``,\n only the lower triangle of ``x`` is used; the upper triangle is ignored\n and not accessed.\n\n Returns:\n The Cholesky decomposition as a matrix with the same dtype as ``x`` and\n shape ``[..., n, n]``. If Cholesky decomposition fails, returns a matrix\n full of NaNs. The behavior on failure may change in the future.\n \"\"\"\n if symmetrize_input:\n x = symmetrize(x)\n return jnp.tril(cholesky_p.bind(x))\n\ndef eig(x, compute_left_eigenvectors=True, compute_right_eigenvectors=True):\n \"\"\"Eigendecomposition of a general matrix.\n\n Nonsymmetric eigendecomposition is at present only implemented on CPU.\n \"\"\"\n return eig_p.bind(x, compute_left_eigenvectors=compute_left_eigenvectors,\n compute_right_eigenvectors=compute_right_eigenvectors)\n\ndef eigh(x, lower: bool = True, symmetrize_input: bool = True):\n \"\"\"Eigendecomposition of a Hermitian matrix.\n\n Computes the eigenvectors and eigenvalues of a complex Hermitian or real\n symmetric square matrix.\n\n Args:\n x: A batch of square complex Hermitian or real symmetric matrices with shape\n ``[..., n, n]``.\n lower: If ``symmetrize_input`` is ``False``, describes which triangle of the\n input matrix to use. If ``symmetrize_input`` is ``False``, only the\n triangle given by ``lower`` is accessed; the other triangle is ignored and\n not accessed.\n symmetrize_input: If ``True``, the matrix is symmetrized before the\n eigendecomposition by computing :math:`\\\\frac{1}{2}(x + x^H)`.\n\n Returns:\n A tuple ``(w, v)``.\n\n ``w`` is an array with the same dtype as ``x`` such that ``w[..., :, i]`` is\n the eigenvector corresponding to ``v[..., i]``.\n\n ``v`` is an array with the same dtype as ``x`` (or its real counterpart if\n complex) with shape ``[..., n]`` containing the eigenvalues of ``x``.\n \"\"\"\n if symmetrize_input:\n x = symmetrize(x)\n v, w = eigh_p.bind(x, lower=lower)\n return v, w\n\n\ndef lu_pivots_to_permutation(pivots, permutation_size: int):\n \"\"\"Converts the pivots (row swaps) returned by LU to a permutation.\n\n We build a permutation rather than applying `pivots` directly to the rows\n of a matrix because lax loops aren't differentiable.\n\n Args:\n pivots: an int32 array of shape (..., k) of row swaps to perform\n permutation_size: the size of the output permutation. Has to be >= k.\n\n Returns:\n An int32 array of shape (..., permutation_size).\n \"\"\"\n permutation = lu_pivots_to_permutation_p.bind(\n pivots, permutation_size=int(permutation_size))\n return permutation\n\n\ndef lu(x):\n \"\"\"LU decomposition with partial pivoting.\n\n Computes the matrix decomposition:\n\n .. math::\n P.A = L.U\n\n where :math:`P` is a permutation of the rows of :math:`A`, :math:`L` is a\n lower-triangular matrix with unit-diagonal elements, and :math:`U` is an\n upper-triangular matrix.\n\n Args:\n x: A batch of matrices with shape ``[..., m, n]``.\n\n Returns:\n A tuple ``(lu, pivots, permutation)``.\n\n ``lu`` is a batch of matrices with the same shape and dtype as ``x``\n containing the :math:`L` matrix in its lower triangle and the :math:`U`\n matrix in its upper triangle. The (unit) diagonal elements of :math:`L` are\n not represented explicitly.\n\n ``pivots`` is an int32 array with shape ``[..., min(m, n)]`` representing a\n sequence of row swaps that should be performed on :math:`A`.\n\n ``permutation`` is an alternative representation of the sequence of row\n swaps as a permutation, represented as an int32 array with shape\n ``[..., m]``.\n \"\"\"\n lu, pivots, permutation = lu_p.bind(x)\n return lu, pivots, permutation\n\ndef qr(x, full_matrices: bool = True):\n \"\"\"QR decomposition.\n\n Computes the QR decomposition\n\n .. math::\n A = Q . R\n\n of matrices :math:`A`, such that :math:`Q` is a unitary (orthogonal) matrix,\n and :math:`R` is an upper-triangular matrix.\n\n Args:\n x: A batch of matrices with shape ``[..., m, n]``.\n full_matrices: Determines if full or reduced matrices are returned; see\n below.\n\n Returns:\n A pair of arrays ``(q, r)``.\n\n Array ``q`` is a unitary (orthogonal) matrix,\n with shape ``[..., m, m]`` if ``full_matrices=True``, or\n ``[..., m, min(m, n)]`` if ``full_matrices=False``.\n\n Array ``r`` is an upper-triangular matrix with shape ``[..., m, n]`` if\n ``full_matrices=True``, or ``[..., min(m, n), n]`` if\n ``full_matrices=False``.\n \"\"\"\n q, r = qr_p.bind(x, full_matrices=full_matrices)\n return q, r\n\ndef svd(x, full_matrices=True, compute_uv=True):\n \"\"\"Singular value decomposition.\n\n Returns the singular values if compute_uv is False, otherwise returns a triple\n containing the left singular vectors, the singular values and the adjoint of\n the right singular vectors.\n \"\"\"\n result = svd_p.bind(x, full_matrices=full_matrices, compute_uv=compute_uv)\n if compute_uv:\n s, u, v = result\n return u, s, v\n else:\n s, = result\n return s\n\ndef triangular_solve(a, b, left_side: bool = False, lower: bool = False,\n transpose_a: bool = False, conjugate_a: bool = False,\n unit_diagonal: bool = False):\n r\"\"\"Triangular solve.\n\n Solves either the matrix equation\n\n .. math::\n \\mathit{op}(A) . X = B\n\n if ``left_side`` is ``True`` or\n\n .. math::\n X . \\mathit{op}(A) = B\n\n if ``left_side`` is ``False``.\n\n ``A`` must be a lower or upper triangular square matrix, and where\n :math:`\\mathit{op}(A)` may either transpose :math:`A` if ``transpose_a``\n is ``True`` and/or take its complex conjugate if ``conjugate_a`` is ``True``.\n\n Args:\n a: A batch of matrices with shape ``[..., m, m]``.\n b: A batch of matrices with shape ``[..., m, n]`` if ``left_side`` is\n ``True`` or shape ``[..., n, m]`` otherwise.\n left_side: describes which of the two matrix equations to solve; see above.\n lower: describes which triangle of ``a`` should be used. The other triangle\n is ignored.\n transpose_a: if ``True``, the value of ``a`` is transposed.\n conjugate_a: if ``True``, the complex conjugate of ``a`` is used in the\n solve. Has no effect if ``a`` is real.\n unit_diagonal: if ``True``, the diagonal of ``a`` is assumed to be unit\n (all 1s) and not accessed.\n\n Returns:\n A batch of matrices the same shape and dtype as ``b``.\n \"\"\"\n conjugate_a = conjugate_a and jnp.issubdtype(lax.dtype(a), jnp.complexfloating)\n singleton = jnp.ndim(b) == jnp.ndim(a) - 1\n if singleton:\n b = jnp.expand_dims(b, -1 if left_side else -2)\n out = triangular_solve_p.bind(\n a, b, left_side=left_side, lower=lower, transpose_a=transpose_a,\n conjugate_a=conjugate_a, unit_diagonal=unit_diagonal)\n if singleton:\n out = out[..., 0] if left_side else out[..., 0, :]\n return out\n\n\n# utilities\n@partial(vectorize, signature='(n,m),(m)->(n)')\ndef _matvec_multiply(a, b):\n return lax.dot(a, b, precision=lax.Precision.HIGHEST)\n\ndef _check_solve_shapes(a, b):\n if not (a.ndim >= 2 and b.ndim in [a.ndim, a.ndim - 1] and\n a.shape[-1] == a.shape[-2] == b.shape[a.ndim - 2]):\n raise ValueError(\n \"The arguments to solve must have shapes a=[..., m, m] and \"\n f\"b=[..., m, k] or b=[..., m]; got a={a.shape} and b={b.shape}\")\n\ndef _solve(a, b):\n _check_solve_shapes(a, b)\n\n # Broadcast leading dimensions of b to the shape of a, as is required by\n # custom_linear_solve.\n out_shape = tuple(d_a if d_b == 1 else d_b\n for d_a, d_b in zip(a.shape[:-1] + (1,), b.shape))\n b = jnp.broadcast_to(b, out_shape)\n\n # With custom_linear_solve, we can reuse the same factorization when\n # computing sensitivities. This is considerably faster.\n lu_, _, permutation = lu(lax.stop_gradient(a))\n custom_solve = partial(\n lax.custom_linear_solve,\n lambda x: _matvec_multiply(a, x),\n solve=lambda _, x: lu_solve(lu_, permutation, x, trans=0),\n transpose_solve=lambda _, x: lu_solve(lu_, permutation, x, trans=1))\n if a.ndim == b.ndim + 1:\n # b.shape == [..., m]\n return custom_solve(b)\n else:\n # b.shape == [..., m, k]\n return api.vmap(custom_solve, b.ndim - 1, max(a.ndim, b.ndim) - 1)(b)\n\ndef _T(x): return jnp.swapaxes(x, -1, -2)\ndef _H(x): return jnp.conj(_T(x))\ndef symmetrize(x): return (x + _H(x)) / 2\n\ndef _unpack_tuple(f, n):\n def g(c, *args, **kwargs):\n t = f(c, *args, **kwargs)\n return (xops.GetTupleElement(t, i) for i in range(n))\n return g\n\n# primitives\n\n_cpu_lapack_types = {np.dtype(np.float32), np.dtype(np.float64),\n np.dtype(np.complex64), np.dtype(np.complex128)}\n\n# Cholesky decomposition\n\ndef cholesky_jvp_rule(primals, tangents):\n x, = primals\n sigma_dot, = tangents\n L = jnp.tril(cholesky_p.bind(x))\n\n # Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf\n def phi(X):\n l = jnp.tril(X)\n return l / lax.expand_dims(jnp._constant_like(X, 1) + jnp.eye(X.shape[-1], dtype=X.dtype),\n range(l.ndim - 2))\n\n tmp = triangular_solve(L, sigma_dot, left_side=False, transpose_a=True,\n conjugate_a=True, lower=True)\n L_dot = lax.batch_matmul(L, phi(triangular_solve(\n L, tmp, left_side=True, transpose_a=False, lower=True)),\n precision=lax.Precision.HIGHEST)\n return L, L_dot\n\ndef cholesky_batching_rule(batched_args, batch_dims):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return cholesky(x), 0\n\ncholesky_p = standard_unop(_float | _complex, 'cholesky')\nad.primitive_jvps[cholesky_p] = cholesky_jvp_rule\nbatching.primitive_batchers[cholesky_p] = cholesky_batching_rule\n\ndef _nan_like(c, operand):\n shape = c.get_shape(operand)\n dtype = shape.element_type()\n if jnp.issubdtype(dtype, np.complexfloating):\n nan = xops.Constant(c, np.array(np.nan * (1. + 1j), dtype=dtype))\n else:\n nan = xops.Constant(c, np.array(np.nan, dtype=dtype))\n return xops.Broadcast(nan, shape.dimensions())\n\ndef _cholesky_cpu_gpu_translation_rule(potrf_impl, ctx, avals_in, avals_out,\n operand):\n operand_aval, = avals_in\n c = ctx.builder\n batch_dims = operand_aval.shape[:-2]\n result, info = potrf_impl(c, operand, lower=True)\n ok = xops.Eq(info, xops.Constant(c, np.array(0, np.int32)))\n return [_broadcasting_select(c,\n xops.Reshape(ok, batch_dims + (1, 1)), result,\n _nan_like(c, result))]\n\nxla.register_translation(\n cholesky_p,\n partial(_cholesky_cpu_gpu_translation_rule, lapack.potrf),\n platform='cpu')\n\nif cusolver is not None:\n xla.register_translation(\n cholesky_p,\n partial(_cholesky_cpu_gpu_translation_rule, cusolver.potrf),\n platform='gpu')\n\nif rocsolver is not None:\n xla.register_translation(\n cholesky_p,\n partial(_cholesky_cpu_gpu_translation_rule, rocsolver.potrf),\n platform='gpu')\n\n# Asymmetric eigendecomposition\n\ndef eig_impl(operand, *, compute_left_eigenvectors, compute_right_eigenvectors):\n return (\n xla.apply_primitive(eig_p, operand,\n compute_left_eigenvectors=compute_left_eigenvectors,\n compute_right_eigenvectors=compute_right_eigenvectors))\n\ndef eig_translation_rule(ctx, avals_in, avals_out, operand, *,\n compute_left_eigenvectors, compute_right_eigenvectors):\n raise NotImplementedError(\n \"Nonsymmetric eigendecomposition is only implemented on the CPU backend\")\n\ndef eig_abstract_eval(operand, *, compute_left_eigenvectors,\n compute_right_eigenvectors):\n if isinstance(operand, ShapedArray):\n if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]:\n raise ValueError(\"Argument to nonsymmetric eigendecomposition must have \"\n \"shape [..., n, n], got shape {}\".format(operand.shape))\n\n batch_dims = operand.shape[:-2]\n n = operand.shape[-1]\n dtype = np.complex64 if dtypes.finfo(operand.dtype).bits == 32 else np.complex128\n dtype = dtypes.canonicalize_dtype(dtype)\n vl = vr = operand.update(shape=batch_dims + (n, n), dtype=dtype)\n w = operand.update(shape=batch_dims + (n,), dtype=dtype)\n else:\n raise NotImplementedError\n\n output = [w]\n if compute_left_eigenvectors:\n output.append(vl)\n if compute_right_eigenvectors:\n output.append(vr)\n\n return tuple(output)\n\n_cpu_geev = lapack.geev\n\ndef _eig_cpu_translation_rule(ctx, avals_in, avals_out, operand, *,\n compute_left_eigenvectors,\n compute_right_eigenvectors):\n operand_aval, = avals_in\n batch_dims = operand_aval.shape[:-2]\n c = ctx.builder\n\n w, vl, vr, info = _cpu_geev(c, operand, jobvl=compute_left_eigenvectors,\n jobvr=compute_right_eigenvectors)\n\n ok = xops.Eq(info, xops.Constant(c, np.array(0, np.int32)))\n w = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1,)), w,\n _nan_like(c, w))\n output = [w]\n\n if compute_left_eigenvectors:\n vl = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vl,\n _nan_like(c, vl))\n output.append(vl)\n\n if compute_right_eigenvectors:\n vr = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vr,\n _nan_like(c, vr))\n output.append(vr)\n\n return output\n\ndef eig_batching_rule(batched_args, batch_dims, *, compute_left_eigenvectors,\n compute_right_eigenvectors):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n\n return (eig_p.bind(x, compute_left_eigenvectors=compute_left_eigenvectors,\n compute_right_eigenvectors=compute_right_eigenvectors),\n (0,) * (1 + compute_left_eigenvectors + compute_right_eigenvectors))\n\ndef eig_jvp_rule(primals, tangents, *, compute_left_eigenvectors,\n compute_right_eigenvectors):\n if compute_left_eigenvectors or compute_right_eigenvectors:\n raise NotImplementedError(\n 'The derivatives of eigenvectors are not implemented, only '\n 'eigenvalues. See '\n 'https://github.com/google/jax/issues/2748 for discussion.')\n # Formula for derivative of eigenvalues w.r.t. a is eqn 4.60 in\n # https://arxiv.org/abs/1701.00392\n a, = primals\n da, = tangents\n l, v = eig(a, compute_left_eigenvectors=False)\n return [l], [jnp.sum(_solve(v, da.astype(v.dtype)) * _T(v), -1)]\n\neig_p = Primitive('eig')\neig_p.multiple_results = True\neig_p.def_impl(eig_impl)\neig_p.def_abstract_eval(eig_abstract_eval)\nxla.register_translation(eig_p, eig_translation_rule)\nxla.register_translation(eig_p, _eig_cpu_translation_rule, platform='cpu')\nbatching.primitive_batchers[eig_p] = eig_batching_rule\nad.primitive_jvps[eig_p] = eig_jvp_rule\n\n\n# Symmetric/Hermitian eigendecomposition\n\ndef eigh_impl(operand, lower):\n v, w = xla.apply_primitive(eigh_p, operand, lower=lower)\n return v, w\n\ndef _eigh_translation_rule(ctx, avals_in, avals_out, operand, *, lower):\n operand_aval, = avals_in\n if operand_aval.shape[-1] == 0:\n return [operand, xops.Real(xops.Reshape(operand, operand_aval.shape[:-1]))]\n return xops.Eigh(operand, lower=lower)\n\ndef eigh_abstract_eval(operand, lower):\n if isinstance(operand, ShapedArray):\n if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]:\n raise ValueError(\n \"Argument to symmetric eigendecomposition must have shape [..., n, n],\"\n \"got shape {}\".format(operand.shape))\n\n batch_dims = operand.shape[:-2]\n n = operand.shape[-1]\n v = operand.update(shape=batch_dims + (n, n))\n w = operand.update(shape=batch_dims + (n,),\n dtype=lax_internal._complex_basetype(operand.dtype))\n else:\n v, w = operand, operand\n return v, w\n\ndef _eigh_cpu_gpu_translation_rule(syevd_impl, ctx, avals_in, avals_out,\n operand, *, lower):\n operand_aval, = avals_in\n batch_dims = operand_aval.shape[:-2]\n c = ctx.builder\n v, w, info = syevd_impl(c, operand, lower=lower)\n ok = xops.Eq(info, xops.Constant(c, np.array(0, np.int32)))\n v = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), v,\n _nan_like(c, v))\n w = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1,)), w,\n _nan_like(c, w))\n return [v, w]\n\ndef eigh_jvp_rule(primals, tangents, lower):\n # Derivative for eigh in the simplest case of distinct eigenvalues.\n # This is classic nondegenerate perurbation theory, but also see\n # https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\n # The general solution treating the case of degenerate eigenvalues is\n # considerably more complicated. Ambitious readers may refer to the general\n # methods below or refer to degenerate perturbation theory in physics.\n # https://www.win.tue.nl/analysis/reports/rana06-33.pdf and\n # https://people.orie.cornell.edu/aslewis/publications/99-clarke.pdf\n a, = primals\n a_dot, = tangents\n\n v, w_real = eigh_p.bind(symmetrize(a), lower=lower)\n\n # for complex numbers we need eigenvalues to be full dtype of v, a:\n w = w_real.astype(a.dtype)\n eye_n = jnp.eye(a.shape[-1], dtype=a.dtype)\n # carefully build reciprocal delta-eigenvalue matrix, avoiding NaNs.\n Fmat = jnp.reciprocal(eye_n + w[..., jnp.newaxis, :] - w[..., jnp.newaxis]) - eye_n\n # eigh impl doesn't support batch dims, but future-proof the grad.\n dot = partial(lax.dot if a.ndim == 2 else lax.batch_matmul,\n precision=lax.Precision.HIGHEST)\n vdag_adot_v = dot(dot(_H(v), a_dot), v)\n dv = dot(v, jnp.multiply(Fmat, vdag_adot_v))\n dw = jnp.real(jnp.diagonal(vdag_adot_v, axis1=-2, axis2=-1))\n return (v, w_real), (dv, dw)\n\ndef eigh_batching_rule(batched_args, batch_dims, lower):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return eigh_p.bind(x, lower=lower), (0, 0)\n\neigh_p = Primitive('eigh')\neigh_p.multiple_results = True\neigh_p.def_impl(eigh_impl)\neigh_p.def_abstract_eval(eigh_abstract_eval)\nxla.register_translation(eigh_p, _eigh_translation_rule)\nad.primitive_jvps[eigh_p] = eigh_jvp_rule\nbatching.primitive_batchers[eigh_p] = eigh_batching_rule\n\n_cpu_syevd = lapack.syevd\n\nxla.register_translation(\n eigh_p, partial(_eigh_cpu_gpu_translation_rule, _cpu_syevd),\n platform='cpu')\n\nif cusolver is not None:\n xla.register_translation(\n eigh_p, partial(_eigh_cpu_gpu_translation_rule, cusolver.syevd),\n platform='gpu')\n\nif rocsolver is not None:\n xla.register_translation(\n eigh_p, partial(_eigh_cpu_gpu_translation_rule, rocsolver.syevd),\n platform='gpu')\n\n\ntriangular_solve_dtype_rule = partial(\n naryop_dtype_rule, _input_dtype, (_float | _complex, _float | _complex),\n 'triangular_solve')\n\ndef triangular_solve_shape_rule(a, b, left_side=False, **unused_kwargs):\n if a.ndim < 2:\n msg = \"triangular_solve requires a.ndim to be at least 2, got {}.\"\n raise TypeError(msg.format(a.ndim))\n if b.ndim < 2:\n msg = \"triangular_solve requires b.ndim to be at least 2, got {}.\"\n raise TypeError(msg.format(b.ndim))\n if a.shape[-1] != a.shape[-2]:\n msg = (\"triangular_solve requires the last two dimensions of a to be equal \"\n \"in size, got a.shape of {}.\")\n raise TypeError(msg.format(a.shape))\n if a.shape[:-2] != b.shape[:-2]:\n msg = (\"triangular_solve requires both arguments to have the same number \"\n \"of dimensions and equal batch dimensions, got {} and {}.\")\n raise TypeError(msg.format(a.shape, b.shape))\n common_dim = -2 if left_side else -1\n if a.shape[-1] != b.shape[common_dim]:\n msg = \"Incompatible shapes for arguments to triangular_solve: {} and {}.\"\n raise TypeError(msg.format(a.shape, b.shape))\n return b.shape\n\ndef triangular_solve_jvp_rule_a(\n g_a, ans, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal):\n m, n = b.shape[-2:]\n k = 1 if unit_diagonal else 0\n g_a = jnp.tril(g_a, k=-k) if lower else jnp.triu(g_a, k=k)\n g_a = lax.neg(g_a)\n g_a = jnp.swapaxes(g_a, -1, -2) if transpose_a else g_a\n g_a = jnp.conj(g_a) if conjugate_a else g_a\n dot = partial(lax.dot if g_a.ndim == 2 else lax.batch_matmul,\n precision=lax.Precision.HIGHEST)\n\n def a_inverse(rhs):\n return triangular_solve(a, rhs, left_side, lower, transpose_a, conjugate_a,\n unit_diagonal)\n\n # triangular_solve is about the same cost as matrix multplication (~n^2 FLOPs\n # for matrix/vector inputs). Order these operations in whichever order is\n # cheaper.\n if left_side:\n assert g_a.shape[-2:] == a.shape[-2:] == (m, m) and ans.shape[-2:] == (m, n)\n if m > n:\n return a_inverse(dot(g_a, ans)) # A^{-1} (∂A X)\n else:\n return dot(a_inverse(g_a), ans) # (A^{-1} ∂A) X\n else:\n assert g_a.shape[-2:] == a.shape[-2:] == (n, n) and ans.shape[-2:] == (m, n)\n if m < n:\n return a_inverse(dot(ans, g_a)) # (X ∂A) A^{-1}\n else:\n return dot(ans, a_inverse(g_a)) # X (∂A A^{-1})\n\ndef triangular_solve_transpose_rule(\n cotangent, a, b, left_side, lower, transpose_a, conjugate_a,\n unit_diagonal):\n # Triangular solve is nonlinear in its first argument and linear in its second\n # argument, analogous to `div` but swapped.\n assert not ad.is_undefined_primal(a) and ad.is_undefined_primal(b)\n if type(cotangent) is ad_util.Zero:\n cotangent_b = ad_util.Zero(b.aval)\n else:\n cotangent_b = triangular_solve(a, cotangent, left_side, lower,\n not transpose_a, conjugate_a, unit_diagonal)\n return [None, cotangent_b]\n\n\ndef triangular_solve_batching_rule(batched_args, batch_dims, left_side,\n lower, transpose_a, conjugate_a,\n unit_diagonal):\n x, y = batched_args\n bx, by = batch_dims\n if bx is batching.not_mapped:\n if left_side:\n y = batching.moveaxis(y, by, -1)\n y_flat = y.reshape(y.shape[:-2] + (y.shape[-2] * y.shape[-1],))\n bdim_out = y.ndim - 1\n else:\n y = batching.moveaxis(y, by, -2)\n y_flat = y.reshape(y.shape[:-3] + (y.shape[-3] * y.shape[-2], y.shape[-1]))\n bdim_out = y.ndim - 2\n out_flat = triangular_solve(\n x, y_flat, left_side=left_side, lower=lower,\n transpose_a=transpose_a, conjugate_a=conjugate_a,\n unit_diagonal=unit_diagonal)\n return out_flat.reshape(y.shape), bdim_out\n else:\n size = next(t.shape[i] for t, i in zip(batched_args, batch_dims)\n if i is not None)\n x = batching.bdim_at_front(x, bx, size)\n y = batching.bdim_at_front(y, by, size)\n return triangular_solve(x, y, left_side=left_side, lower=lower,\n transpose_a=transpose_a, conjugate_a=conjugate_a,\n unit_diagonal=unit_diagonal), 0\n\ndef _triangular_solve_translation_rule(\n ctx, avals_in, avals_out, a, b, *, left_side, lower, transpose_a,\n conjugate_a, unit_diagonal):\n if conjugate_a and not transpose_a:\n a = xops.Conj(a)\n conjugate_a = False\n if not transpose_a:\n transpose = xops.TriangularSolveOptions_Transpose.NO_TRANSPOSE\n else:\n transpose = (xops.TriangularSolveOptions_Transpose.ADJOINT if conjugate_a\n else xops.TriangularSolveOptions_Transpose.TRANSPOSE)\n return [\n xops.TriangularSolve(a, b, left_side, lower, unit_diagonal, transpose)]\n\ntriangular_solve_p = standard_primitive(\n triangular_solve_shape_rule, triangular_solve_dtype_rule,\n 'triangular_solve', translation_rule=_triangular_solve_translation_rule)\nad.defjvp2(triangular_solve_p,\n triangular_solve_jvp_rule_a,\n lambda g_b, _, a, b, **kws: triangular_solve(a, g_b, **kws))\nad.primitive_transposes[triangular_solve_p] = triangular_solve_transpose_rule\nbatching.primitive_batchers[triangular_solve_p] = triangular_solve_batching_rule\n\n\ndef _triangular_solve_cpu_translation_rule(\n ctx, avals_in, avals_out, a, b, *, left_side, lower, transpose_a,\n conjugate_a, unit_diagonal):\n a_aval, _ = avals_in\n c = ctx.builder\n\n if conjugate_a and not transpose_a:\n a = xops.Conj(a)\n conjugate_a = False\n if len(a_aval.shape) == 2 and np.dtype(a_aval.dtype) in _cpu_lapack_types:\n return [lapack.jax_trsm(\n c, xops.Constant(c, np.array(1, dtype=a_aval.dtype)),\n a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal)]\n else:\n # Fall back to the HLO implementation for unsupported types or batching.\n # TODO: Consider swapping XLA for LAPACK in batched case\n if not transpose_a:\n transpose = xops.TriangularSolveOptions_Transpose.NO_TRANSPOSE\n else:\n transpose = (xops.TriangularSolveOptions_Transpose.ADJOINT if conjugate_a\n else xops.TriangularSolveOptions_Transpose.TRANSPOSE)\n return [xops.TriangularSolve(a, b, left_side, lower, unit_diagonal,\n transpose)]\n\nxla.register_translation(triangular_solve_p,\n _triangular_solve_cpu_translation_rule,\n platform='cpu')\n\ndef _triangular_solve_gpu_translation_rule(\n trsm_impl, ctx, avals_in, avals_out, a, b, *, left_side, lower, transpose_a,\n conjugate_a, unit_diagonal):\n c = ctx.builder\n a_aval, _ = avals_in\n m, n = a_aval.shape[-2:]\n batch = prod(a_aval.shape[:-2])\n if conjugate_a and not transpose_a:\n a = xops.Conj(a)\n conjugate_a = False\n if batch > 1 and m <= 256 and n <= 256:\n return [trsm_impl(c, a, b, left_side, lower, transpose_a,\n conjugate_a, unit_diagonal)]\n else:\n # Use the XLA implementation for unbatched triangular_solve.\n if not transpose_a:\n transpose = xops.TriangularSolveOptions_Transpose.NO_TRANSPOSE\n else:\n transpose = (xops.TriangularSolveOptions_Transpose.ADJOINT if conjugate_a\n else xops.TriangularSolveOptions_Transpose.TRANSPOSE)\n return [xops.TriangularSolve(a, b, left_side, lower, unit_diagonal,\n transpose)]\n\nif cusolver is not None:\n xla.register_translation(\n triangular_solve_p,\n partial(_triangular_solve_gpu_translation_rule, cusolver.trsm),\n platform='gpu')\n\nif rocsolver is not None:\n xla.register_translation(\n triangular_solve_p,\n partial(_triangular_solve_gpu_translation_rule, rocsolver.trsm),\n platform='gpu')\n\n# Support operation for LU decomposition: Transformation of the pivots returned\n# by LU decomposition into permutations.\n\n\n# Define this outside lu_pivots_to_permutation to ensure fori_loop cache hits\ndef _lu_pivots_body_fn(i, permutation_and_swaps):\n permutation, swaps = permutation_and_swaps\n batch_dims = swaps.shape[:-1]\n j = swaps[..., i]\n iotas = jnp.ix_(*(lax.iota(jnp.int32, b) for b in batch_dims))\n x = permutation[..., i]\n y = permutation[iotas + (j,)]\n permutation = permutation.at[..., i].set(y)\n return permutation.at[iotas + (j,)].set(x), swaps\n\n\ndef _generic_lu_pivots_to_permutation(swaps, permutation_size):\n \"\"\"Converts the pivots (row swaps) returned by LU to a permutation.\n\n We build a permutation rather than applying `swaps` directly to the rows\n of a matrix because lax loops aren't differentiable.\n\n Args:\n swaps: an array of shape (..., k) of row swaps to perform\n permutation_size: the size of the output permutation. Should be >= k.\n Returns:\n An int32 array of shape (..., m).\n \"\"\"\n assert len(swaps.shape) >= 1\n batch_dims = swaps.shape[:-1]\n k = swaps.shape[-1]\n m = permutation_size\n\n permutation = lax.broadcasted_iota(jnp.int32, batch_dims + (m,),\n len(batch_dims))\n if m == 0:\n return permutation\n result, _ = lax.fori_loop(np.array(0, np.int32), np.array(k, np.int32),\n _lu_pivots_body_fn, (permutation, swaps))\n return result\n\n\ndef _lu_pivots_to_permutation_abstract_eval(pivots, *, permutation_size):\n pivots = raise_to_shaped(pivots)\n if isinstance(pivots, ShapedArray):\n if pivots.ndim < 1 or pivots.dtype != np.dtype(np.int32):\n raise ValueError(\n 'Argument to lu_pivots_to_permutation must have rank >= 1 and dtype '\n 'int32. Got shape={} and dtype={}'.format(pivots.shape, pivots.dtype))\n\n if permutation_size < pivots.shape[-1]:\n raise ValueError(\n 'Output permutation size {} has to exceed the trailing dimension of '\n 'the pivots. Got shape {}'.format(permutation_size, pivots.shape))\n\n batch_dims = pivots.shape[:-1]\n permutations = pivots.update(shape=batch_dims + (permutation_size,))\n else:\n permutations = pivots\n\n return permutations\n\n\ndef _lu_pivots_to_permutation_batching_rule(batched_args, batch_dims, *,\n permutation_size):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return lu_pivots_to_permutation_p.bind(\n x, permutation_size=permutation_size), 0\n\ndef _lu_pivots_to_permutation_gpu(ctx, avals_in, avals_out, pivots, *,\n permutation_size):\n return [cuda_linalg.lu_pivots_to_permutation(\n ctx.builder, pivots, permutation_size=permutation_size)]\n\nlu_pivots_to_permutation_p = Primitive('lu_pivots_to_permutation')\nlu_pivots_to_permutation_p.multiple_results = False\nlu_pivots_to_permutation_p.def_impl(\n partial(xla.apply_primitive, lu_pivots_to_permutation_p))\nlu_pivots_to_permutation_p.def_abstract_eval(\n _lu_pivots_to_permutation_abstract_eval)\nbatching.primitive_batchers[lu_pivots_to_permutation_p] = (\n _lu_pivots_to_permutation_batching_rule)\nxla.register_translation(\n lu_pivots_to_permutation_p,\n xla.lower_fun(_generic_lu_pivots_to_permutation, multiple_results=False,\n new_style=True))\n\nif cuda_linalg:\n xla.register_translation(lu_pivots_to_permutation_p,\n _lu_pivots_to_permutation_gpu,\n platform='gpu')\n\n# LU decomposition\n\n# Computes a pivoted LU decomposition such that\n# PA = LU\n# In the style of LAPACK, LU are stored in the same matrix.\n\ndef _lu_unblocked(a):\n \"\"\"Unblocked LU decomposition, as a rolled loop.\"\"\"\n m, n = a.shape\n def body(k, state):\n pivot, perm, a = state\n m_idx = jnp.arange(m)\n n_idx = jnp.arange(n)\n\n if jnp.issubdtype(a.dtype, jnp.complexfloating):\n t = a[:, k]\n magnitude = jnp.abs(jnp.real(t)) + jnp.abs(jnp.imag(t))\n else:\n magnitude = jnp.abs(a[:, k])\n i = jnp.argmax(jnp.where(m_idx >= k, magnitude, -jnp.inf))\n pivot = pivot.at[k].set(i)\n a = a.at[[k, i],].set(a[[i, k],])\n perm = perm.at[[i, k],].set(perm[[k, i],])\n\n # a[k+1:, k] /= a[k, k], adapted for loop-invariant shapes\n x = a[k, k]\n a = a.at[:, k].set(jnp.where(m_idx > k, a[:, k] / x, a[:, k]))\n\n # a[k+1:, k+1:] -= jnp.outer(a[k+1:, k], a[k, k+1:])\n a = a - jnp.where((m_idx[:, None] > k) & (n_idx > k),\n jnp.outer(a[:, k], a[k, :]), jnp.array(0, dtype=a.dtype))\n return pivot, perm, a\n\n pivot = jnp.zeros((min(m, n),), dtype=jnp.int32)\n perm = jnp.arange(m, dtype=jnp.int32)\n if m == 0 and n == 0:\n # If the array is empty, the loop body never executes but tracing it to a\n # jaxpr fails because the indexing cannot succeed.\n return (pivot, perm, a)\n return lax.fori_loop(0, min(m, n), body, (pivot, perm, a))\n\n\ndef _lu_blocked(a, block_size=128):\n \"\"\"Blocked LU decomposition, as an unrolled loop.\"\"\"\n m, n = a.shape\n r = min(m, n)\n pivot = jnp.zeros((r,), dtype=jnp.int32)\n perm = jnp.arange(m, dtype=jnp.int32)\n for k in range(0, r, block_size):\n b = min(r - k, block_size)\n block_pivot, block_perm, lu_block = _lu_unblocked(a[k:, k:k+b])\n\n pivot = pivot.at[k:k+b].set(block_pivot + k)\n perm = perm.at[k:].set(perm[block_perm + k])\n a = a.at[k:, :].set(a[block_perm + k, :])\n a = a.at[k:, k:k+b].set(lu_block)\n\n if k + b < n:\n a = a.at[k:k+b, k+b:].set(\n triangular_solve(a[k:k+b, k:k+b], a[k:k+b, k+b:], left_side=True,\n lower=True, unit_diagonal=True))\n a = a.at[k+b:, k+b:].add(-lax.dot(a[k+b:, k:k+b], a[k:k+b, k+b:],\n precision=lax.Precision.HIGHEST))\n return a, pivot, perm\n\ndef _lu_python(x):\n \"\"\"Default LU decomposition in Python, where no better version exists.\"\"\"\n m, n = x.shape[-2:]\n batch_dims = x.shape[:-2]\n if len(batch_dims) > 0:\n batch_size = np.prod(batch_dims, dtype=np.int64)\n lu, pivot, perm = api.vmap(_lu_blocked)(lax.reshape(x, (batch_size, m, n)))\n lu = lax.reshape(lu, batch_dims + (m, n))\n pivot = lax.reshape(pivot, batch_dims + (min(m, n),))\n perm = lax.reshape(perm, batch_dims + (m,))\n else:\n lu, pivot, perm = _lu_blocked(x)\n return lu, pivot, perm\n\ndef _lu_impl(operand):\n lu, pivot, perm = xla.apply_primitive(lu_p, operand)\n return lu, pivot, perm\n\ndef _lu_abstract_eval(operand):\n operand = raise_to_shaped(operand)\n if isinstance(operand, ShapedArray):\n if operand.ndim < 2:\n raise ValueError(\"Argument to LU decomposition must have ndims >= 2\")\n\n batch_dims = operand.shape[:-2]\n m = operand.shape[-2]\n n = operand.shape[-1]\n pivot = operand.update(shape=batch_dims + (min(m, n),), dtype=jnp.int32)\n perm = operand.update(shape=batch_dims + (m,), dtype=jnp.int32)\n else:\n pivot = operand\n perm = operand\n return operand, pivot, perm\n\ndef _lu_jvp_rule(primals, tangents):\n a, = primals\n a_dot, = tangents\n lu, pivots, permutation = lu_p.bind(a)\n\n a_shape = jnp.shape(a)\n m, n = a_shape[-2:]\n dtype = lax.dtype(a)\n k = min(m, n)\n\n batch_dims = a_shape[:-2]\n iotas = jnp.ix_(*(lax.iota(jnp.int32, b) for b in batch_dims + (1,)))\n x = a_dot[iotas[:-1] + (permutation, slice(None))]\n\n # Differentiation of Matrix Functionals Using Triangular Factorization\n # F. R. De Hoog, R. S. Anderssen, and M. A. Lukas\n #\n # LU = A\n # ==> L'U + LU' = A'\n # ==> inv(L) . L' + U' . inv(U) = inv(L) A' inv(U)\n # ==> L' = L . tril(inv(L) . A' . inv(U), -1)\n # U' = triu(inv(L) . A' . inv(U)) . U\n\n ndims = len(a_shape)\n l_padding = [(0, 0, 0)] * ndims\n l_padding[-1] = (0, m - k, 0)\n zero = jnp._constant_like(lu, 0)\n l = lax.pad(jnp.tril(lu[..., :, :k], -1), zero, l_padding)\n l = l + lax.expand_dims(jnp.eye(m, m, dtype=dtype), range(l.ndim - 2))\n\n u_eye = lax.pad(jnp.eye(n - k, n - k, dtype=dtype), zero,\n ((k, 0, 0), (k, 0, 0)))\n u_padding = [(0, 0, 0)] * ndims\n u_padding[-2] = (0, n - k, 0)\n u = lax.pad(jnp.triu(lu[..., :k, :]), zero, u_padding) + lax.expand_dims(u_eye, range(lu.ndim - 2))\n\n la = triangular_solve(l, x, left_side=True, transpose_a=False, lower=True,\n unit_diagonal=True)\n lau = triangular_solve(u, la, left_side=False, transpose_a=False,\n lower=False)\n\n l_dot = jnp.matmul(l, jnp.tril(lau, -1))\n u_dot = jnp.matmul(jnp.triu(lau), u)\n lu_dot = l_dot + u_dot\n return (lu, pivots, permutation), (lu_dot, ad_util.Zero.from_value(pivots),\n ad_util.Zero.from_value(permutation))\n\n\ndef _lu_batching_rule(batched_args, batch_dims):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return lu_p.bind(x), (0, 0, 0)\n\ndef _lu_cpu_gpu_translation_rule(getrf_impl, ctx, avals_in, avals_out, operand):\n operand_aval, = avals_in\n c = ctx.builder\n batch_dims = operand_aval.shape[:-2]\n m = operand_aval.shape[-2]\n lu, pivot, info = getrf_impl(c, operand)\n # Subtract 1 from the pivot to get 0-based indices.\n pivot = xops.Sub(pivot, xops.Constant(c, np.array(1, np.int32)))\n ok = xops.Ge(info, xops.Constant(c, np.array(0, np.int32)))\n lu = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), lu,\n _nan_like(c, lu))\n perm = xla.lower_fun(lambda x: lu_pivots_to_permutation(x, m),\n multiple_results=False, backend=ctx.platform)(c, pivot)\n return [lu, pivot, perm]\n\n\ndef _lu_tpu_translation_rule(ctx, avals_in, avals_out, operand):\n return xops.LU(operand)\n\n\nlu_p = Primitive('lu')\nlu_p.multiple_results = True\nlu_p.def_impl(_lu_impl)\nlu_p.def_abstract_eval(_lu_abstract_eval)\nxla.register_translation(lu_p, xla.lower_fun(_lu_python, multiple_results=True,\n new_style=True))\nad.primitive_jvps[lu_p] = _lu_jvp_rule\nbatching.primitive_batchers[lu_p] = _lu_batching_rule\n\nxla.register_translation(lu_p,\n partial(_lu_cpu_gpu_translation_rule, lapack.getrf),\n platform='cpu')\n\nif cusolver is not None:\n xla.register_translation(\n lu_p, partial(_lu_cpu_gpu_translation_rule, cusolver.getrf),\n platform='gpu')\n\nif rocsolver is not None:\n xla.register_translation(\n lu_p, partial(_lu_cpu_gpu_translation_rule, rocsolver.getrf),\n platform='gpu')\n\nxla.register_translation(lu_p, _lu_tpu_translation_rule, platform='tpu')\n\n\n@partial(vectorize, excluded={3}, signature='(n,n),(n),(n,k)->(n,k)')\ndef _lu_solve_core(lu, permutation, b, trans):\n m = lu.shape[0]\n x = jnp.reshape(b, (m, np.prod(b.shape[1:])))\n if trans == 0:\n x = x[permutation, :]\n x = triangular_solve(lu, x, left_side=True, lower=True, unit_diagonal=True)\n x = triangular_solve(lu, x, left_side=True, lower=False)\n elif trans == 1 or trans == 2:\n conj = trans == 2\n x = triangular_solve(lu, x, left_side=True, lower=False, transpose_a=True,\n conjugate_a=conj)\n x = triangular_solve(lu, x, left_side=True, lower=True, unit_diagonal=True,\n transpose_a=True, conjugate_a=conj)\n x = x[jnp.argsort(permutation), :]\n else:\n raise ValueError(\"'trans' value must be 0, 1, or 2, got {}\".format(trans))\n return lax.reshape(x, b.shape)\n\n\n@partial(api.jit, static_argnums=(3,))\ndef _lu_solve(lu, permutation, b, trans):\n if len(lu.shape) < 2 or lu.shape[-1] != lu.shape[-2]:\n raise ValueError(\"last two dimensions of LU decomposition must be equal, \"\n \"got shape {}\".format(lu.shape))\n if len(b.shape) < 1:\n raise ValueError(\"b matrix must have rank >= 1, got shape {}\"\n .format(b.shape))\n # Broadcasting follows NumPy's convention for linalg.solve: the RHS is\n # treated as a (batched) vector if the number of dimensions differ by 1.\n # Otherwise, broadcasting rules apply.\n rhs_vector = lu.ndim == b.ndim + 1\n if rhs_vector:\n if b.shape[-1] != lu.shape[-1]:\n raise ValueError(\"When LU decomposition matrix and b have the same \"\n \"number of dimensions, last axis of LU decomposition \"\n \"matrix (shape {}) and b array (shape {}) must match\"\n .format(lu.shape, b.shape))\n b = b[..., jnp.newaxis]\n else:\n if b.shape[-2] != lu.shape[-1]:\n raise ValueError(\"When LU decomposition matrix and b different \"\n \"numbers of dimensions, last axis of LU decomposition \"\n \"matrix (shape {}) and second to last axis of b array \"\n \"(shape {}) must match\"\n .format(lu.shape, b.shape))\n x = _lu_solve_core(lu, permutation, b, trans)\n return x[..., 0] if rhs_vector else x\n\n\ndef lu_solve(lu, permutation, b, trans=0):\n \"\"\"LU solve with broadcasting.\"\"\"\n return _lu_solve(lu, permutation, b, trans)\n\n\n# QR decomposition\n\ndef qr_impl(operand, full_matrices):\n q, r = xla.apply_primitive(qr_p, operand, full_matrices=full_matrices)\n return q, r\n\ndef _qr_translation_rule(ctx, avals_in, avals_out, operand, *, full_matrices):\n return xops.QR(operand, full_matrices)\n\ndef qr_abstract_eval(operand, full_matrices):\n if isinstance(operand, ShapedArray):\n if operand.ndim < 2:\n raise ValueError(\"Argument to QR decomposition must have ndims >= 2\")\n batch_dims = operand.shape[:-2]\n m = operand.shape[-2]\n n = operand.shape[-1]\n k = m if full_matrices else min(m, n)\n q = operand.update(shape=batch_dims + (m, k))\n r = operand.update(shape=batch_dims + (k, n))\n else:\n q = operand\n r = operand\n return q, r\n\ndef qr_jvp_rule(primals, tangents, full_matrices):\n # See j-towns.github.io/papers/qr-derivative.pdf for a terse derivation.\n x, = primals\n dx, = tangents\n q, r = qr_p.bind(x, full_matrices=False)\n *_, m, n = x.shape\n if full_matrices or m < n:\n raise NotImplementedError(\n \"Unimplemented case of QR decomposition derivative\")\n dx_rinv = triangular_solve(r, dx) # Right side solve by default\n qt_dx_rinv = jnp.matmul(_H(q), dx_rinv)\n qt_dx_rinv_lower = jnp.tril(qt_dx_rinv, -1)\n do = qt_dx_rinv_lower - _H(qt_dx_rinv_lower) # This is skew-symmetric\n # The following correction is necessary for complex inputs\n I = lax.expand_dims(jnp.eye(n, dtype=do.dtype), range(qt_dx_rinv.ndim - 2))\n do = do + I * (qt_dx_rinv - jnp.real(qt_dx_rinv))\n dq = jnp.matmul(q, do - qt_dx_rinv) + dx_rinv\n dr = jnp.matmul(qt_dx_rinv - do, r)\n return (q, r), (dq, dr)\n\ndef qr_batching_rule(batched_args, batch_dims, full_matrices):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return qr_p.bind(x, full_matrices=full_matrices), (0, 0)\n\ndef _qr_cpu_gpu_translation_rule(geqrf_impl, orgqr_impl, ctx, avals_in,\n avals_out, operand, *, full_matrices):\n c = ctx.builder\n operand_aval, = avals_in\n dims = operand_aval.shape\n m, n = dims[-2:]\n batch_dims = dims[:-2]\n r, tau, info_geqrf = geqrf_impl(c, operand)\n if m < n:\n q = xops.Slice(r, [0] * len(dims), list(batch_dims) + [m, m],\n [1] * len(dims))\n q, info_orgqr = orgqr_impl(c, q, tau)\n elif not full_matrices:\n q, info_orgqr = orgqr_impl(c, r, tau)\n r = xops.Slice(r, [0] * len(dims), list(batch_dims) + [n, n],\n [1] * len(dims))\n else:\n padding_config = [(0, 0, 0)] * len(dims)\n padding_config[-1] = (0, m - n, 0)\n q = xops.Pad(r, xops.Constant(c, np.array(0, dtype=operand_aval.dtype)),\n xla_client.make_padding_config(padding_config))\n q, info_orgqr = orgqr_impl(c, q, tau)\n if info_geqrf is not None:\n ok = xops.And(\n xops.Eq(info_geqrf, xops.Constant(c, np.array(0, np.int32))),\n xops.Eq(info_orgqr, xops.Constant(c, np.array(0, np.int32))))\n q = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), q,\n _nan_like(c, q))\n r = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), r,\n _nan_like(c, r))\n else:\n pass # rocsolver does not return info\n\n r = xla.lower_fun(jnp.triu, multiple_results=False,\n backend=ctx.platform)(c, r)\n return [q, r]\n\nqr_p = Primitive('qr')\nqr_p.multiple_results = True\nqr_p.def_impl(qr_impl)\nqr_p.def_abstract_eval(qr_abstract_eval)\nxla.register_translation(qr_p, _qr_translation_rule)\nad.primitive_jvps[qr_p] = qr_jvp_rule\nbatching.primitive_batchers[qr_p] = qr_batching_rule\n\nxla.register_translation(\n qr_p, partial(_qr_cpu_gpu_translation_rule, lapack.geqrf, lapack.orgqr),\n platform='cpu')\n\nif cusolver is not None:\n xla.register_translation(\n qr_p,\n partial(_qr_cpu_gpu_translation_rule, cusolver.geqrf, cusolver.orgqr),\n platform='gpu')\n\nif rocsolver is not None:\n xla.register_translation(\n qr_p,\n partial(_qr_cpu_gpu_translation_rule, rocsolver.geqrf, rocsolver.orgqr),\n platform='gpu')\n\n\n# Singular value decomposition\n\ndef svd_impl(operand, full_matrices, compute_uv):\n return xla.apply_primitive(svd_p, operand, full_matrices=full_matrices,\n compute_uv=compute_uv)\n\ndef _svd_translation_rule(ctx, avals_in, avals_out, operand, *, full_matrices,\n compute_uv):\n operand_aval, = avals_in\n shape = operand_aval.shape\n m, n = shape[-2:]\n if m == 0 or n == 0:\n return xla.lower_fun(_empty_svd, multiple_results=True, new_style=True)(\n ctx, avals_in, avals_out, operand, full_matrices=full_matrices,\n compute_uv=compute_uv)\n\n u, s, v = xops.SVD(operand)\n permutation = list(range(len(shape)))\n permutation[-1], permutation[-2] = permutation[-2], permutation[-1]\n vt = xops.Transpose(v, permutation)\n if not full_matrices and m != n:\n u = xops.SliceInDim(u, 0, min(m, n), stride=1, dimno=len(shape) - 1)\n vt = xops.SliceInDim(vt, 0, min(m, n), stride=1, dimno=len(shape) - 2)\n\n if not compute_uv:\n return [s]\n else:\n return [s, u, vt]\n\n\ndef svd_abstract_eval(operand, full_matrices, compute_uv):\n if isinstance(operand, ShapedArray):\n if operand.ndim < 2:\n raise ValueError(\"Argument to singular value decomposition must have ndims >= 2\")\n\n batch_dims = operand.shape[:-2]\n m = operand.shape[-2]\n n = operand.shape[-1]\n s = operand.update(shape=batch_dims + (min(m, n),),\n dtype=lax_internal._complex_basetype(operand.dtype))\n if compute_uv:\n u = operand.update(shape=batch_dims + (m, m if full_matrices else min(m, n)))\n vt = operand.update(shape=batch_dims + (n if full_matrices else min(m, n), n))\n return s, u, vt\n else:\n return s,\n else:\n raise NotImplementedError\n\ndef svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\n A, = primals\n dA, = tangents\n s, U, Vt = svd_p.bind(A, full_matrices=False, compute_uv=True)\n\n if compute_uv and full_matrices:\n # TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\n raise NotImplementedError(\n \"Singular value decomposition JVP not implemented for full matrices\")\n\n Ut, V = _H(U), _H(Vt)\n s_dim = s[..., None, :]\n dS = jnp.matmul(jnp.matmul(Ut, dA), V)\n ds = jnp.real(jnp.diagonal(dS, 0, -2, -1))\n\n if not compute_uv:\n return (s,), (ds,)\n\n s_diffs = (s_dim + _T(s_dim)) * (s_dim - _T(s_dim))\n s_diffs_zeros = jnp.eye(s.shape[-1], dtype=s.dtype) # jnp.ones((), dtype=A.dtype) * (s_diffs == 0.) # is 1. where s_diffs is 0. and is 0. everywhere else\n s_diffs_zeros = lax.expand_dims(s_diffs_zeros, range(s_diffs.ndim - 2))\n F = 1 / (s_diffs + s_diffs_zeros) - s_diffs_zeros\n dSS = s_dim * dS # dS.dot(jnp.diag(s))\n SdS = _T(s_dim) * dS # jnp.diag(s).dot(dS)\n\n s_zeros = jnp.ones((), dtype=A.dtype) * (s == 0.)\n s_inv = 1 / (s + s_zeros) - s_zeros\n s_inv_mat = jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(s_inv)\n dUdV_diag = .5 * (dS - _H(dS)) * s_inv_mat\n dU = jnp.matmul(U, F * (dSS + _H(dSS)) + dUdV_diag)\n dV = jnp.matmul(V, F * (SdS + _H(SdS)))\n\n m, n = A.shape[-2:]\n if m > n:\n I = lax.expand_dims(jnp.eye(m, dtype=A.dtype), range(U.ndim - 2))\n dU = dU + jnp.matmul(I - jnp.matmul(U, Ut), jnp.matmul(dA, V)) / s_dim\n if n > m:\n I = lax.expand_dims(jnp.eye(n, dtype=A.dtype), range(V.ndim - 2))\n dV = dV + jnp.matmul(I - jnp.matmul(V, Vt), jnp.matmul(_H(dA), U)) / s_dim\n\n return (s, U, Vt), (ds, dU, _H(dV))\n\ndef _empty_svd(a, *, full_matrices, compute_uv):\n batch_shape = a.shape[:-2]\n m, n = a.shape[-2:]\n s = jnp.empty(batch_shape + (0,), dtype=lax_internal._complex_basetype(a.dtype))\n if not compute_uv:\n return (s,)\n if full_matrices:\n size = max(m, n)\n u = jnp.broadcast_to(jnp.eye(size, dtype=a.dtype), batch_shape + (size, size))\n else:\n u = jnp.empty(batch_shape + (m, n), dtype=a.dtype)\n v = jnp.empty(batch_shape + (0, 0), dtype=a.dtype)\n if m < n:\n u, v = v, u\n return s, u, v\n\ndef _svd_cpu_gpu_translation_rule(gesvd_impl, ctx, avals_in, avals_out, operand,\n *, full_matrices, compute_uv):\n operand_aval, = avals_in\n m, n = operand_aval.shape[-2:]\n batch_dims = operand_aval.shape[:-2]\n c = ctx.builder\n\n if m == 0 or n == 0:\n return xla.lower_fun(_empty_svd, multiple_results=True, new_style=True)(\n ctx, avals_in, avals_out, operand, full_matrices=full_matrices,\n compute_uv=compute_uv)\n\n s, u, vt, info = gesvd_impl(c, operand,\n full_matrices=full_matrices,\n compute_uv=compute_uv)\n ok = xops.Eq(info, xops.Constant(c, np.array(0, np.int32)))\n s = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1,)), s,\n _nan_like(c, s))\n\n result = [s]\n\n if compute_uv:\n u = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), u,\n _nan_like(c, u))\n vt = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vt,\n _nan_like(c, vt))\n result += [u, vt]\n\n return result\n\ndef svd_batching_rule(batched_args, batch_dims, full_matrices, compute_uv):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n outs = svd_p.bind(x, full_matrices=full_matrices, compute_uv=compute_uv)\n\n if compute_uv:\n return outs, (0, 0, 0)\n else:\n return outs, (0,)\n\nsvd_p = Primitive('svd')\nsvd_p.multiple_results = True\nsvd_p.def_impl(svd_impl)\nsvd_p.def_abstract_eval(svd_abstract_eval)\nad.primitive_jvps[svd_p] = svd_jvp_rule\nbatching.primitive_batchers[svd_p] = svd_batching_rule\nxla.register_translation(svd_p, _svd_translation_rule)\n\nxla.register_translation(\n svd_p, partial(_svd_cpu_gpu_translation_rule, lapack.gesdd),\n platform='cpu')\n\nif cusolver is not None:\n xla.register_translation(\n svd_p, partial(_svd_cpu_gpu_translation_rule, cusolver.gesvd),\n platform='gpu')\n\nif rocsolver is not None:\n xla.register_translation(\n svd_p, partial(_svd_cpu_gpu_translation_rule, rocsolver.gesvd),\n platform='gpu')\n\n\ndef _tridiagonal_solve_gpu_translation_rule(ctx, avals_in, avals_out, dl, d, du,\n b, *, m, n, ldb, t):\n return [cusparse.gtsv2(ctx.builder, dl, d, du, b, m=m, n=n, ldb=ldb, t=t)]\n\ntridiagonal_solve_p = Primitive('tridiagonal_solve')\ntridiagonal_solve_p.multiple_results = False\ntridiagonal_solve_p.def_impl(\n functools.partial(xla.apply_primitive, tridiagonal_solve_p))\ntridiagonal_solve_p.def_abstract_eval(lambda dl, d, du, b, *, m, n, ldb, t: b)\n# TODO(tomhennigan): Consider AD rules using lax.custom_linear_solve?\nif cusparse is not None and hasattr(cusparse, \"gtsv2\"):\n xla.register_translation(tridiagonal_solve_p,\n _tridiagonal_solve_gpu_translation_rule,\n platform='gpu')\n\ndef _tridiagonal_solve_jax(dl, d, du, b, **kw):\n \"\"\"Pure JAX implementation of `tridiagonal_solve`.\"\"\"\n prepend_zero = lambda x: jnp.append(jnp.zeros([1], dtype=x.dtype), x[:-1])\n fwd1 = lambda tu_, x: x[1] / (x[0] - x[2] * tu_)\n fwd2 = lambda b_, x: (x[0] - x[3] * b_) / (x[1] - x[3] * x[2])\n bwd1 = lambda x_, x: x[0] - x[1] * x_\n double = lambda f, args: (f(*args), f(*args))\n\n # Forward pass.\n _, tu_ = lax.scan(lambda tu_, x: double(fwd1, (tu_, x)),\n du[0] / d[0],\n (d, du, dl),\n unroll=32)\n\n _, b_ = lax.scan(lambda b_, x: double(fwd2, (b_, x)),\n b[0] / d[0],\n (b, d, prepend_zero(tu_), dl),\n unroll=32)\n\n # Backsubstitution.\n _, x_ = lax.scan(lambda x_, x: double(bwd1, (x_, x)),\n b_[-1],\n (b_[::-1], tu_[::-1]),\n unroll=32)\n\n return x_[::-1]\n\n\nxla.register_translation(tridiagonal_solve_p, xla.lower_fun(\n _tridiagonal_solve_jax, multiple_results=False, new_style=True))\n\n\ndef tridiagonal_solve(dl, d, du, b):\n r\"\"\"Computes the solution of a tridiagonal linear system.\n\n This function computes the solution of a tridiagonal linear system::\n\n .. math::\n A . X = B\n\n Args:\n dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``.\n Note that ``dl[0] = 0``.\n d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``.\n du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``.\n Note that ``dl[m - 1] = 0``.\n b: Right hand side matrix.\n\n Returns:\n Solution ``X`` of tridiagonal system.\n \"\"\"\n if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1:\n raise ValueError('dl, d and du must be vectors')\n\n if dl.shape != d.shape or d.shape != du.shape:\n raise ValueError(\n f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`')\n\n if b.ndim != 2:\n raise ValueError(f'b={b.shape} must be a matrix')\n\n m, = dl.shape\n if m < 3:\n raise ValueError(f'm ({m}) must be >= 3')\n\n ldb, n = b.shape\n if ldb < max(1, m):\n raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})')\n\n if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype:\n raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and '\n f'b={b.dtype} must be the same dtype,')\n\n t = dl.dtype\n if t not in (np.float32, np.float64):\n raise ValueError(f'Only f32/f64 are supported, got {t}')\n\n return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t)\n\n\n# Schur Decomposition\n\n\ndef schur(x,\n compute_schur_vectors=True,\n sort_eig_vals=False,\n select_callable=None):\n return schur_p.bind(\n x,\n compute_schur_vectors=compute_schur_vectors,\n sort_eig_vals=sort_eig_vals,\n select_callable=select_callable)\n\n\ndef _schur_impl(operand, *, compute_schur_vectors, sort_eig_vals,\n select_callable):\n return xla.apply_primitive(\n schur_p,\n operand,\n compute_schur_vectors=compute_schur_vectors,\n sort_eig_vals=sort_eig_vals,\n select_callable=select_callable)\n\n\ndef _schur_translation_rule(ctx, avals_in, avals_out, operand, *,\n compute_schur_vectors, sort_eig_vals):\n raise NotImplementedError(\n \"Schur decomposition is only implemented on the CPU backend.\")\n\n\ndef _schur_abstract_eval(operand, *, compute_schur_vectors, sort_eig_vals,\n select_callable):\n\n if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]:\n raise ValueError(\"Argument to Schur decomposition must have \"\n \"shape [..., n, n], got shape {}\".format(operand.shape))\n\n batch_dims = operand.shape[:-2]\n n = operand.shape[-1]\n dtype = operand.dtype\n dtype = dtypes.canonicalize_dtype(dtype)\n T = operand.update(shape=batch_dims + (n, n), dtype=dtype)\n vs = operand.update(shape=batch_dims + (n, n), dtype=dtype)\n\n return (T, vs) if compute_schur_vectors else (T,)\n\n\ndef _schur_cpu_translation_rule(ctx, avals_in, avals_out, operand, *,\n compute_schur_vectors, sort_eig_vals,\n select_callable):\n operand_aval, = avals_in\n batch_dims = operand_aval.shape[:-2]\n c = ctx.builder\n\n _cpu_gees = lapack.gees\n\n if sort_eig_vals:\n T, vs, sdim, info = _cpu_gees(\n c,\n operand,\n jobvs=compute_schur_vectors,\n sort=sort_eig_vals,\n select=select_callable)\n else:\n T, vs, info = _cpu_gees(\n c,\n operand,\n jobvs=compute_schur_vectors,\n sort=sort_eig_vals,\n select=select_callable)\n\n ok = xops.Eq(info, xops.Constant(c, np.array(0, np.int32)))\n T = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), T,\n _nan_like(c, T))\n output = [T]\n if compute_schur_vectors:\n vs = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vs,\n _nan_like(c, vs))\n\n output.append(vs)\n\n return output\n\n\ndef _schur_batching_rule(batched_args, batch_dims, *, compute_schur_vectors,\n sort_eig_vals, select_callable):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n\n return schur_p.bind(\n x,\n compute_schur_vectors=compute_schur_vectors,\n sort_eig_vals=sort_eig_vals,\n select_callable=select_callable), (0,) * (1 + compute_schur_vectors)\n\n\ndef _schur_jvp_rule(primals, tangents, *, compute_schur_vectors, sort_eig_vals):\n raise NotImplementedError(\n 'The differentiation rules for the Schur factorization have not been implemented.'\n )\n\n\nschur_p = Primitive('schur')\nschur_p.multiple_results = True\nschur_p.def_impl(_schur_impl)\nschur_p.def_abstract_eval(_schur_abstract_eval)\nxla.register_translation(schur_p, _schur_translation_rule)\nxla.register_translation(schur_p, _schur_cpu_translation_rule, platform='cpu')\nbatching.primitive_batchers[schur_p] = _schur_batching_rule\nad.primitive_jvps[schur_p] = _schur_jvp_rule\n"
] | [
[
"numpy.asarray",
"numpy.array2string",
"numpy.get_printoptions"
],
[
"numpy.prod",
"numpy.array",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
983632847/video_analyst | [
"01b7ad278b828a3f7ff7a0488c5ca8f055240192",
"01b7ad278b828a3f7ff7a0488c5ca8f055240192"
] | [
"videoanalyst/model/task_model/taskmodel_impl/siamese_track.py",
"videoanalyst/evaluation/got_benchmark/experiments/got10k.py"
] | [
"# -*- coding: utf-8 -*\n\nimport numpy as np\nfrom loguru import logger\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom videoanalyst.model.common_opr.common_block import (conv_bn_relu,\n xcorr_depthwise)\nfrom videoanalyst.model.module_base import ModuleBase\nfrom videoanalyst.model.task_model.taskmodel_base import (TRACK_TASKMODELS,\n VOS_TASKMODELS)\n\ntorch.set_printoptions(precision=8)\n\n\n@TRACK_TASKMODELS.register\n@VOS_TASKMODELS.register\nclass SiamTrack(ModuleBase):\n r\"\"\"\n SiamTrack model for tracking\n\n Hyper-Parameters\n ----------------\n pretrain_model_path: string\n path to parameter to be loaded into module\n head_width: int\n feature width in head structure\n \"\"\"\n\n default_hyper_params = dict(pretrain_model_path=\"\",\n head_width=256,\n conv_weight_std=0.01,\n neck_conv_bias=[True, True, True, True],\n corr_fea_output=False,\n trt_mode=False,\n trt_fea_model_path=\"\",\n trt_track_model_path=\"\")\n\n support_phases = [\"train\", \"feature\", \"track\", \"freeze_track_fea\"]\n\n def __init__(self, backbone, head, loss=None):\n super(SiamTrack, self).__init__()\n self.basemodel = backbone\n self.head = head\n self.loss = loss\n self.trt_fea_model = None\n self.trt_track_model = None\n self._phase = \"train\"\n\n @property\n def phase(self):\n return self._phase\n\n @phase.setter\n def phase(self, p):\n assert p in self.support_phases\n self._phase = p\n\n def forward(self, *args, phase=None):\n r\"\"\"\n Perform tracking process for different phases (e.g. train / init / track)\n\n Arguments\n ---------\n target_img: torch.Tensor\n target template image patch\n search_img: torch.Tensor\n search region image patch\n\n Returns\n -------\n fcos_score_final: torch.Tensor\n predicted score for bboxes, shape=(B, HW, 1)\n fcos_bbox_final: torch.Tensor\n predicted bbox in the crop, shape=(B, HW, 4)\n fcos_cls_prob_final: torch.Tensor\n classification score, shape=(B, HW, 1)\n fcos_ctr_prob_final: torch.Tensor\n center-ness score, shape=(B, HW, 1)\n \"\"\"\n if phase is None:\n phase = self._phase\n # used during training\n if phase == 'train':\n # resolve training data\n training_data = args[0]\n target_img = training_data[\"im_z\"]\n search_img = training_data[\"im_x\"]\n # backbone feature\n f_z = self.basemodel(target_img)\n f_x = self.basemodel(search_img)\n # feature adjustment\n c_z_k = self.c_z_k(f_z)\n r_z_k = self.r_z_k(f_z)\n c_x = self.c_x(f_x)\n r_x = self.r_x(f_x)\n # feature matching\n r_out = xcorr_depthwise(r_x, r_z_k)\n c_out = xcorr_depthwise(c_x, c_z_k)\n # head\n fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(\n c_out, r_out)\n predict_data = dict(\n cls_pred=fcos_cls_score_final,\n ctr_pred=fcos_ctr_score_final,\n box_pred=fcos_bbox_final,\n )\n if self._hyper_params[\"corr_fea_output\"]:\n predict_data[\"corr_fea\"] = corr_fea\n return predict_data\n # used for template feature extraction (normal mode)\n elif phase == 'feature':\n target_img, = args\n if self._hyper_params[\"trt_mode\"]:\n # extract feature with trt model\n out_list = self.trt_fea_model(target_img)\n else:\n # backbone feature\n f_z = self.basemodel(target_img)\n # template as kernel\n c_z_k = self.c_z_k(f_z)\n r_z_k = self.r_z_k(f_z)\n # output\n out_list = [c_z_k, r_z_k]\n # used for template feature extraction (trt mode)\n elif phase == \"freeze_track_fea\":\n search_img, = args\n # backbone feature\n f_x = self.basemodel(search_img)\n # feature adjustment\n c_x = self.c_x(f_x)\n r_x = self.r_x(f_x)\n # head\n return [c_x, r_x]\n # [Broken] used for template feature extraction (trt mode)\n # currently broken due to following issue of \"torch2trt\" package\n # c.f. https://github.com/NVIDIA-AI-IOT/torch2trt/issues/251\n elif phase == \"freeze_track_head\":\n c_out, r_out = args\n # head\n outputs = self.head(c_out, r_out, 0, True)\n return outputs\n # used for tracking one frame during test\n elif phase == 'track':\n if len(args) == 3:\n search_img, c_z_k, r_z_k = args\n if self._hyper_params[\"trt_mode\"]:\n c_x, r_x = self.trt_track_model(search_img)\n else:\n # backbone feature\n f_x = self.basemodel(search_img)\n # feature adjustment\n c_x = self.c_x(f_x)\n r_x = self.r_x(f_x)\n elif len(args) == 4:\n # c_x, r_x already computed\n c_z_k, r_z_k, c_x, r_x = args\n else:\n raise ValueError(\"Illegal args length: %d\" % len(args))\n\n # feature matching\n r_out = xcorr_depthwise(r_x, r_z_k)\n c_out = xcorr_depthwise(c_x, c_z_k)\n # head\n fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(\n c_out, r_out, search_img.size(-1))\n # apply sigmoid\n fcos_cls_prob_final = torch.sigmoid(fcos_cls_score_final)\n fcos_ctr_prob_final = torch.sigmoid(fcos_ctr_score_final)\n # apply centerness correction\n fcos_score_final = fcos_cls_prob_final * fcos_ctr_prob_final\n # register extra output\n extra = dict(c_x=c_x, r_x=r_x, corr_fea=corr_fea)\n # output\n out_list = fcos_score_final, fcos_bbox_final, fcos_cls_prob_final, fcos_ctr_prob_final, extra\n else:\n raise ValueError(\"Phase non-implemented.\")\n\n return out_list\n\n def update_params(self):\n r\"\"\"\n Load model parameters\n \"\"\"\n self._make_convs()\n self._initialize_conv()\n super().update_params()\n if self._hyper_params[\"trt_mode\"]:\n logger.info(\"trt mode enable\")\n from torch2trt import TRTModule\n self.trt_fea_model = TRTModule()\n self.trt_fea_model.load_state_dict(\n torch.load(self._hyper_params[\"trt_fea_model_path\"]))\n self.trt_track_model = TRTModule()\n self.trt_track_model.load_state_dict(\n torch.load(self._hyper_params[\"trt_track_model_path\"]))\n logger.info(\"loading trt model succefully\")\n\n def _make_convs(self):\n head_width = self._hyper_params['head_width']\n\n # feature adjustment\n self.r_z_k = conv_bn_relu(head_width,\n head_width,\n 1,\n 3,\n 0,\n has_relu=False)\n self.c_z_k = conv_bn_relu(head_width,\n head_width,\n 1,\n 3,\n 0,\n has_relu=False)\n self.r_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)\n self.c_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)\n\n def _initialize_conv(self, ):\n conv_weight_std = self._hyper_params['conv_weight_std']\n conv_list = [\n self.r_z_k.conv, self.c_z_k.conv, self.r_x.conv, self.c_x.conv\n ]\n for ith in range(len(conv_list)):\n conv = conv_list[ith]\n torch.nn.init.normal_(conv.weight,\n std=conv_weight_std) # conv_weight_std=0.01\n\n def set_device(self, dev):\n if not isinstance(dev, torch.device):\n dev = torch.device(dev)\n self.to(dev)\n if self.loss is not None:\n for loss_name in self.loss:\n self.loss[loss_name].to(dev)\n",
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport numpy as np\nimport glob\nimport ast\nimport json\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom PIL import Image\nimport cv2\n\nfrom ..datasets import GOT10k\nfrom ..utils.metrics import rect_iou\nfrom ..utils.viz import show_frame\nfrom ..utils.ioutils import compress\n\n\nclass ExperimentGOT10k(object):\n r\"\"\"Experiment pipeline and evaluation toolkit for GOT-10k dataset.\n \n Args:\n root_dir (string): Root directory of GOT-10k dataset where\n ``train``, ``val`` and ``test`` folders exist.\n subset (string): Specify ``train``, ``val`` or ``test``\n subset of GOT-10k.\n list_file (string, optional): If provided, only run experiments on\n sequences specified by this file.\n result_dir (string, optional): Directory for storing tracking\n results. Default is ``./results``.\n report_dir (string, optional): Directory for storing performance\n evaluation results. Default is ``./reports``.\n \"\"\"\n def __init__(self,\n root_dir,\n subset='val',\n list_file=None,\n result_dir='results',\n report_dir='reports',\n use_dataset=True):\n super(ExperimentGOT10k, self).__init__()\n assert subset in ['val', 'test']\n self.subset = subset\n if use_dataset:\n self.dataset = GOT10k(root_dir, subset=subset, list_file=list_file)\n self.result_dir = os.path.join(result_dir, 'GOT-10k')\n self.report_dir = os.path.join(report_dir, 'GOT-10k')\n self.nbins_iou = 101\n self.repetitions = 3\n\n def run(self,\n tracker,\n visualize=False,\n save_video=False,\n overwrite_result=True,\n slicing_quantile=(0.0, 1.0)):\n if self.subset == 'test':\n print('\\033[93m[WARNING]:\\n' \\\n 'The groundtruths of GOT-10k\\'s test set is withholded.\\n' \\\n 'You will have to submit your results to\\n' \\\n '[http://got-10k.aitestunion.com/]' \\\n '\\nto access the performance.\\033[0m')\n time.sleep(2)\n\n print('Running tracker %s on GOT-10k...' % tracker.name)\n self.dataset.return_meta = False\n\n start_quantile, end_quantile = slicing_quantile\n len_dataset = len(self.dataset)\n start_idx = int(len_dataset * start_quantile)\n end_idx = int(len_dataset * end_quantile)\n\n # loop over the complete dataset\n # for s, (img_files, anno) in enumerate(self.dataset):\n for s in range(start_idx, end_idx):\n img_files, anno = self.dataset[s]\n seq_name = self.dataset.seq_names[s]\n print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))\n\n # run multiple repetitions for each sequence\n for r in range(self.repetitions):\n # check if the tracker is deterministic\n if r > 0 and tracker.is_deterministic:\n break\n elif r == 3 and self._check_deterministic(\n tracker.name, seq_name):\n print(' Detected a deterministic tracker, ' +\n 'skipping remaining trials.')\n break\n print(' Repetition: %d' % (r + 1))\n\n # skip if results exist\n record_file = os.path.join(self.result_dir, tracker.name,\n seq_name,\n '%s_%03d.txt' % (seq_name, r + 1))\n if os.path.exists(record_file) and not overwrite_result:\n print(' Found results, skipping', seq_name)\n continue\n\n # tracking loop\n boxes, times = tracker.track(img_files,\n anno[0, :],\n visualize=visualize)\n\n # record results\n self._record(record_file, boxes, times)\n\n # save videos\n if save_video:\n video_dir = os.path.join(\n os.path.dirname(os.path.dirname(self.result_dir)), 'videos',\n 'GOT-10k', tracker.name)\n video_file = os.path.join(video_dir, '%s.avi' % seq_name)\n\n if not os.path.isdir(video_dir):\n os.makedirs(video_dir)\n image = Image.open(img_files[0])\n img_W, img_H = image.size\n out_video = cv2.VideoWriter(video_file,\n cv2.VideoWriter_fourcc(*'MJPG'), 10,\n (img_W, img_H))\n for ith, (img_file, pred) in enumerate(zip(img_files, boxes)):\n image = Image.open(img_file)\n if not image.mode == 'RGB':\n image = image.convert('RGB')\n img = np.array(image)[:, :, ::-1].copy()\n pred = pred.astype(int)\n cv2.rectangle(img, (pred[0], pred[1]),\n (pred[0] + pred[2], pred[1] + pred[3]),\n self.color['pred'], 2)\n if ith < anno.shape[0]:\n gt = anno[ith].astype(int)\n cv2.rectangle(img, (gt[0], gt[1]),\n (gt[0] + gt[2], gt[1] + gt[3]),\n self.color['gt'], 2)\n out_video.write(img)\n out_video.release()\n print(' Videos saved at', video_file)\n\n def report(self, tracker_names, plot_curves=True):\n assert isinstance(tracker_names, (list, tuple))\n\n if self.subset == 'test':\n pwd = os.getcwd()\n\n # generate compressed submission file for each tracker\n for tracker_name in tracker_names:\n # compress all tracking results\n result_dir = os.path.join(self.result_dir, tracker_name)\n os.chdir(result_dir)\n save_file = '../%s' % tracker_name\n compress('.', save_file)\n print('Records saved at', os.path.realpath(save_file + '.zip'))\n\n # print submission guides\n print('\\033[93mLogin and follow instructions on')\n print('http://got-10k.aitestunion.com/submit_instructions')\n print('to upload and evaluate your tracking results\\033[0m')\n\n # switch back to previous working directory\n os.chdir(pwd)\n\n return None\n elif self.subset == 'val':\n # meta information is useful when evaluation\n self.dataset.return_meta = True\n\n # assume tracker_names[0] is your tracker\n report_dir = os.path.join(self.report_dir, tracker_names[0])\n if not os.path.exists(report_dir):\n os.makedirs(report_dir)\n report_file = os.path.join(report_dir, 'performance.json')\n\n # visible ratios of all sequences\n seq_names = self.dataset.seq_names\n covers = {s: self.dataset[s][2]['cover'][1:] for s in seq_names}\n\n performance = {}\n for name in tracker_names:\n print('Evaluating', name)\n ious = {}\n times = {}\n performance.update({name: {'overall': {}, 'seq_wise': {}}})\n\n for s, (_, anno, meta) in enumerate(self.dataset):\n seq_name = self.dataset.seq_names[s]\n record_files = glob.glob(\n os.path.join(self.result_dir, name, seq_name,\n '%s_[0-9]*.txt' % seq_name))\n if len(record_files) == 0:\n raise Exception('Results for sequence %s not found.' %\n seq_name)\n\n # read results of all repetitions\n boxes = [np.loadtxt(f, delimiter=',') for f in record_files]\n assert all([b.shape == anno.shape for b in boxes])\n\n # calculate and stack all ious\n bound = ast.literal_eval(meta['resolution'])\n seq_ious = [\n rect_iou(b[1:], anno[1:], bound=bound) for b in boxes\n ]\n # only consider valid frames where targets are visible\n seq_ious = [t[covers[seq_name] > 0] for t in seq_ious]\n seq_ious = np.concatenate(seq_ious)\n ious[seq_name] = seq_ious\n\n # stack all tracking times\n times[seq_name] = []\n time_file = os.path.join(self.result_dir, name, seq_name,\n '%s_time.txt' % seq_name)\n if os.path.exists(time_file):\n seq_times = np.loadtxt(time_file, delimiter=',')\n seq_times = seq_times[~np.isnan(seq_times)]\n seq_times = seq_times[seq_times > 0]\n if len(seq_times) > 0:\n times[seq_name] = seq_times\n\n # store sequence-wise performance\n ao, sr, speed, _ = self._evaluate(seq_ious, seq_times)\n performance[name]['seq_wise'].update({\n seq_name: {\n 'ao': ao,\n 'sr': sr,\n 'speed_fps': speed,\n 'length': len(anno) - 1\n }\n })\n\n ious = np.concatenate(list(ious.values()))\n times = np.concatenate(list(times.values()))\n\n # store overall performance\n ao, sr, speed, succ_curve = self._evaluate(ious, times)\n performance[name].update({\n 'overall': {\n 'ao': ao,\n 'sr': sr,\n 'speed_fps': speed,\n 'succ_curve': succ_curve.tolist()\n }\n })\n\n # save performance\n with open(report_file, 'w') as f:\n json.dump(performance, f, indent=4)\n # plot success curves\n if plot_curves:\n self.plot_curves([report_file], tracker_names)\n\n return performance\n\n def show(self, tracker_names, seq_names=None, play_speed=1):\n if seq_names is None:\n seq_names = self.dataset.seq_names\n elif isinstance(seq_names, str):\n seq_names = [seq_names]\n assert isinstance(tracker_names, (list, tuple))\n assert isinstance(seq_names, (list, tuple))\n\n play_speed = int(round(play_speed))\n assert play_speed > 0\n self.dataset.return_meta = False\n\n for s, seq_name in enumerate(seq_names):\n print('[%d/%d] Showing results on %s...' %\n (s + 1, len(seq_names), seq_name))\n\n # load all tracking results\n records = {}\n for name in tracker_names:\n record_file = os.path.join(self.result_dir, name, seq_name,\n '%s_001.txt' % seq_name)\n records[name] = np.loadtxt(record_file, delimiter=',')\n\n # loop over the sequence and display results\n img_files, anno = self.dataset[seq_name]\n for f, img_file in enumerate(img_files):\n if not f % play_speed == 0:\n continue\n image = Image.open(img_file)\n boxes = [anno[f]] + [records[name][f] for name in tracker_names]\n show_frame(image,\n boxes,\n legends=['GroundTruth'] + tracker_names,\n colors=[\n 'w', 'r', 'g', 'b', 'c', 'm', 'y', 'orange',\n 'purple', 'brown', 'pink'\n ])\n\n def _record(self, record_file, boxes, times):\n # record bounding boxes\n record_dir = os.path.dirname(record_file)\n if not os.path.isdir(record_dir):\n os.makedirs(record_dir)\n np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')\n while not os.path.exists(record_file):\n print('warning: recording failed, retrying...')\n np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')\n print(' Results recorded at', record_file)\n\n # record running times\n time_file = record_file[:record_file.rfind('_')] + '_time.txt'\n times = times[:, np.newaxis]\n if os.path.exists(time_file):\n exist_times = np.loadtxt(time_file, delimiter=',')\n if exist_times.ndim == 1:\n exist_times = exist_times[:, np.newaxis]\n times = np.concatenate((exist_times, times), axis=1)\n np.savetxt(time_file, times, fmt='%.8f', delimiter=',')\n\n def _check_deterministic(self, tracker_name, seq_name):\n record_dir = os.path.join(self.result_dir, tracker_name, seq_name)\n record_files = sorted(\n glob.glob(os.path.join(record_dir, '%s_[0-9]*.txt' % seq_name)))\n\n if len(record_files) < 3:\n return False\n\n records = []\n for record_file in record_files:\n with open(record_file, 'r') as f:\n records.append(f.read())\n\n return len(set(records)) == 1\n\n def _evaluate(self, ious, times):\n # AO, SR and tracking speed\n ao = np.mean(ious)\n sr = np.mean(ious > 0.5)\n if len(times) > 0:\n # times has to be an array of positive values\n speed_fps = np.mean(1. / times)\n else:\n speed_fps = -1\n\n # success curve\n # thr_iou = np.linspace(0, 1, 101)\n thr_iou = np.linspace(0, 1, self.nbins_iou)\n bin_iou = np.greater(ious[:, None], thr_iou[None, :])\n succ_curve = np.mean(bin_iou, axis=0)\n\n return ao, sr, speed_fps, succ_curve\n\n def plot_curves(self, report_files, tracker_names, extension='.png'):\n assert isinstance(report_files, list), \\\n 'Expected \"report_files\" to be a list, ' \\\n 'but got %s instead' % type(report_files)\n\n # assume tracker_names[0] is your tracker\n report_dir = os.path.join(self.report_dir, tracker_names[0])\n if not os.path.exists(report_dir):\n os.makedirs(report_dir)\n\n performance = {}\n for report_file in report_files:\n with open(report_file) as f:\n performance.update(json.load(f))\n\n succ_file = os.path.join(report_dir, 'success_plot' + extension)\n key = 'overall'\n\n # filter performance by tracker_names\n performance = {\n k: v\n for k, v in performance.items() if k in tracker_names\n }\n\n # sort trackers by AO\n tracker_names = list(performance.keys())\n aos = [t[key]['ao'] for t in performance.values()]\n inds = np.argsort(aos)[::-1]\n tracker_names = [tracker_names[i] for i in inds]\n\n # markers\n markers = ['-', '--', '-.']\n markers = [c + m for m in markers for c in [''] * 10]\n\n # plot success curves\n thr_iou = np.linspace(0, 1, self.nbins_iou)\n fig, ax = plt.subplots()\n lines = []\n legends = []\n for i, name in enumerate(tracker_names):\n line, = ax.plot(thr_iou, performance[name][key]['succ_curve'],\n markers[i % len(markers)])\n lines.append(line)\n legends.append('%s: [%.3f]' % (name, performance[name][key]['ao']))\n matplotlib.rcParams.update({'font.size': 7.4})\n legend = ax.legend(lines,\n legends,\n loc='lower left',\n bbox_to_anchor=(0., 0.))\n\n matplotlib.rcParams.update({'font.size': 9})\n ax.set(xlabel='Overlap threshold',\n ylabel='Success rate',\n xlim=(0, 1),\n ylim=(0, 1),\n title='Success plots on GOT-10k')\n ax.grid(True)\n fig.tight_layout()\n\n # control ratio\n # ax.set_aspect('equal', 'box')\n\n print('Saving success plots to', succ_file)\n fig.savefig(succ_file,\n bbox_extra_artists=(legend, ),\n bbox_inches='tight',\n dpi=300)\n"
] | [
[
"torch.sigmoid",
"torch.load",
"torch.set_printoptions",
"torch.nn.init.normal_",
"torch.device"
],
[
"numpy.greater",
"numpy.linspace",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"numpy.mean",
"matplotlib.rcParams.update",
"numpy.savetxt",
"numpy.argsort",
"numpy.array",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ysglh/DeepVideoAnalytics | [
"e401b3273782409b2604657514bec293d6aa75b0"
] | [
"repos/tf_ctpn_cpu/lib/utils/setup.py"
] | [
"from Cython.Build import cythonize\nimport numpy as np\nfrom distutils.core import setup\n\n\nsetup(ext_modules=cythonize([\"bbox.pyx\",\"cython_nms.pyx\"],include_path=[np.get_include()]\n ))\n\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
constracktor/testing-python-exercise | [
"70b15a9d8e193fc518e46996cbc3e9f52cb1336d"
] | [
"tests/unit/test_diffusion2d_functions.py"
] | [
"\"\"\"\nTests for functions in class SolveDiffusion2D\n\"\"\"\nimport numpy as np\n#import pytest\nfrom diffusion2d import SolveDiffusion2D\n\nfrom unittest import TestCase\n\n\nclass TestOperations(TestCase):\n \"\"\"\n Test suite for mathematical operations functions.\n \"\"\"\n def setUp(self):\n # Fixture\n self.w = 12.\n self.h = 20.\n self.dx = 0.4\n self.dy = 0.2\n self.D = 0.5\n self.T_cold = 300.\n self.T_hot = 700.\n\n def test_initialize_domain(self):\n \"\"\"\n Check function SolveDiffusion2D.initialize_domain\n \"\"\"\n solver = SolveDiffusion2D()\n\n expected_nx = 30 #int(self.w / self.dx)\n expected_ny = 100 #int(self.h / self.dy)\n\n solver.initialize_domain(self.w,self.h,self.dx,self.dy)\n\n self.assertEqual(solver.nx, expected_nx)\n self.assertEqual(solver.ny, expected_ny)\n\n def test_initialize_physical_parameters(self):\n \"\"\"\n Checks function SolveDiffusion2D.initialize_domain\n \"\"\"\n solver = SolveDiffusion2D()\n solver.dx = self.dx\n solver.dy = self.dy\n\n #dx**2 * dy**2 / (2 * d * (dx**2 + dy**2))\n expected_dt = 0.032\n\n solver.initialize_physical_parameters(self.D)\n\n self.assertAlmostEqual(solver.dt, expected_dt, 6)\n\n def test_get_initial_condition(self):\n \"\"\"\n Checks function SolveDiffusion2D.get_initial_function\n \"\"\"\n solver = SolveDiffusion2D()\n solver.T_cold = self.T_cold\n solver.T_hot = self.T_hot\n solver.initialize_domain(self.w,self.h,self.dx,self.dy)\n\n expected_u = self.T_cold * np.ones((solver.nx, solver.ny))\n\n # Initial conditions - circle of radius r centred at (cx,cy) (mm)\n r, cx, cy = 2, 5, 5\n r2 = r ** 2\n for i in range(solver.nx):\n for j in range(solver.ny):\n p2 = (i * solver.dx - cx) ** 2 + (j * solver.dy - cy) ** 2\n if p2 < r2:\n expected_u[i, j] = self.T_hot\n\n actual_u = solver.get_initial_condition()\n\n for i in range(solver.nx):\n for j in range(solver.ny):\n self.assertEqual(actual_u[i,j], expected_u[i,j])\n\n# def test_initialize_domain():\n# \"\"\"\n# Check function SolveDiffusion2D.initialize_domain\n# \"\"\"\n# solver = SolveDiffusion2D()\n#\n# w = 12.\n# h = 20.\n# dx = 0.4\n# dy = 0.2\n# expected_nx = 30 #int(w / dx)\n# expected_ny = 100 #int(h / dy)\n#\n# solver.initialize_domain(w,h,dx,dy)\n#\n# assert solver.nx == expected_nx\n# assert solver.ny == expected_ny\n#\n# def test_initialize_physical_parameters():\n# \"\"\"\n# Checks function SolveDiffusion2D.initialize_domain\n# \"\"\"\n# solver = SolveDiffusion2D()\n# solver.dx = 0.2\n# solver.dy = 0.4\n# d=5.\n#\n# #dx**2 * dy**2 / (2 * d * (dx**2 + dy**2))\n# expected_dt = pytest.approx(0.0032, abs=0.000001)\n#\n# solver.initialize_physical_parameters(d)\n#\n# assert solver.dt == expected_dt\n#\n# def test_get_initial_condition():\n# \"\"\"\n# Checks function SolveDiffusion2D.get_initial_function\n# \"\"\"\n# solver = SolveDiffusion2D()\n# solver.T_cold = 300.\n# solver.T_hot = 700.\n# solver.dx = 0.1\n# solver.dy = 0.2\n# solver.nx = 100\n# solver.ny = 50\n#\n# expected_u = solver.T_cold * np.ones((solver.nx, solver.ny))\n#\n# # Initial conditions - circle of radius r centred at (cx,cy) (mm)\n# r, cx, cy = 2, 5, 5\n# r2 = r ** 2\n# for i in range(solver.nx):\n# for j in range(solver.ny):\n# p2 = (i * solver.dx - cx) ** 2 + (j * solver.dy - cy) ** 2\n# if p2 < r2:\n# expected_u[i, j] = solver.T_hot\n#\n# actual_u = solver.get_initial_condition()\n#\n# assert np.all(actual_u == expected_u)\n"
] | [
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nmardirossian/pyscf | [
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115",
"57c8912dcfcc1157a822feede63df54ed1067115"
] | [
"examples/gto/20-ao_integrals.py",
"pyscf/gto/moleintor.py",
"pyscf/ci/ucisd.py",
"pyscf/cc/test/test_h2o.py",
"pyscf/symm/test/test_cg.py",
"pyscf/scf/jk.py",
"pyscf/cc/ccsd_lambda_incore.py",
"pyscf/dft/xcfun.py",
"pyscf/pbc/df/ft_ao.py"
] | [
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nAccess AO integrals\n\nMole.intor and Mole.intor_by_shell functions can generate AO integrals.\nCalling Mole.intor with the integral function name returns a integral matrix\nfor all basis functions defined in Mole. If the integral operator has many\ncompenents eg gradients, keyword argument comp=* needs to be specified to\ntell the function how many components the integrals have.\nMole.intor_by_shell function generates the integrals for the given shell\nindices. Keyword argument comp=* is also required when the integral operator\nhas multiple components.\n\nSee pyscf/gto/moleintor.py file for the complete list of supported integrals.\n'''\n\nimport numpy\nfrom pyscf import gto, scf\n\nmol = gto.M(\n verbose = 0,\n atom = 'C 0 0 0; O 0 0 1.5',\n basis = 'ccpvdz'\n)\nmf = scf.RHF(mol)\nmf.kernel()\ndm = mf.make_rdm1()\n\n# Overlap, kinetic, nuclear attraction\ns = mol.intor('cint1e_ovlp_sph')\nt = mol.intor('cint1e_kin_sph')\nv = mol.intor('cint1e_nuc_sph')\n# Overlap, kinetic, nuclear attraction gradients (against electron coordinates)\ns1 = mol.intor('cint1e_ipovlp_sph', comp=3)\nt1 = mol.intor('cint1e_ipkin_sph' , comp=3)\nv1 = mol.intor('cint1e_ipnuc_sph' , comp=3)\n\nprint('Dipole %s' % numpy.einsum('xij,ij->x',\n mol.intor('cint1e_r_sph', comp=3), dm))\n\n#\n# AO overlap between two molecules\n#\nmol1 = gto.M(\n verbose = 0,\n atom = 'H 0 1 0; H 1 0 0',\n basis = 'ccpvdz'\n)\ns = gto.intor_cross('cint1e_ovlp_sph', mol, mol1)\nprint('overlap shape (%d, %d)' % s.shape)\n\n#\n# 2e integrals. Keyword aosym is to specify the permutation symmetry in the\n# AO integral matrix. s8 means 8-fold symmetry, s2kl means 2-fold symmetry\n# for the symmetry between kl in (ij|kl)\n#\neri = mol.intor('cint2e_sph', aosym='s8')\n#\n# 2e gradient integrals on first atom only\n#\neri = mol.intor('cint2e_ip1_sph', aosym='s2kl')\n\n#\n# 2e integral gradients on certain atom\n#\natm_id = 1 # second atom\nbas_start, bas_end, ao_start, ao_end = mol.aoslice_by_atom()[atm_id]\ntot_bra = ao_end - ao_start\nnao = mol.nao_nr()\neri1 = numpy.empty((3,tot_bra,nao,nao,nao))\npi = 0\nfor i in range(mol.nbas):\n if mol.bas_atom(i) == atm_id:\n pj = 0\n for j in range(mol.nbas):\n pk = 0\n for k in range(mol.nbas):\n pl = 0\n for l in range(mol.nbas):\n shls = (i, j, k, l)\n buf = mol.intor_by_shell('cint2e_ip1_sph', shls, comp=3)\n di, dj, dk, dl = buf.shape[1:]\n eri1[:,pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf\n pl += dl\n pk += dk\n pj += dj\n pi += di\nprint('integral shape %s' % str(eri1.shape))\n\n#\n# Generate a sub-block of AO integrals. The sub-block (ij|kl) contains the\n# shells 2:5 for basis i, 0:2 for j, 0:4 for k and 1:3 for l\n#\nsub_eri = mol.intor('int2e_sph', shls_slice=(2,5,0,2,0,4,1,3))\n# This statement is equivalent to\ndims = []\nfor i in range(mol.nbas):\n l = mol.bas_angular(i)\n nc = mol.bas_nctr(i)\n dims.append((l * 2 + 1) * nc)\nnao_i = sum(dims[2:5])\nnao_j = sum(dims[0:2])\nnao_k = sum(dims[0:4])\nnao_l = sum(dims[1:3])\nsub_eri = numpy.empty((nao_i,nao_j,nao_k,nao_l))\npi = 0\nfor i in range(2,5):\n pj = 0\n for j in range(0,2):\n pk = 0\n for k in range(0,4):\n pl = 0\n for l in range(1,3):\n shls = (i, j, k, l)\n buf = mol.intor_by_shell('int2e_sph', shls)\n di, dj, dk, dl = buf.shape\n sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf\n pl += dl\n pk += dk\n pj += dj\n pi += di\nsub_eri = sub_eri.reshape(nao_i*nao_j,nao_k*nao_l)\n\n#\n# Generate all AO integrals for a sub-system.\n#\nmol = gto.M(atom=[['H', 0,0,i] for i in range(10)])\natom_idx = [0,2,4] # The disjoint atoms\nsub_mol = mol.copy()\nsub_mol._bas = mol._bas[atom_idx]\nsub_eri = sub_mol.intor('int2e_sph', aosym='s1')\n\n# This statement is equivalent to\nsub_nao = 0\nfor i in range(mol.nbas):\n if mol.bas_atom(i) in atom_idx:\n l = mol.bas_angular(i)\n nc = mol.bas_nctr(i)\n sub_nao += (l * 2 + 1) * nc\nsub_eri = numpy.empty((sub_nao,sub_nao,sub_nao,sub_nao))\npi = 0\nfor i in range(mol.nbas):\n if mol.bas_atom(i) in atom_idx:\n pj = 0\n for j in range(mol.nbas):\n if mol.bas_atom(j) in atom_idx:\n pk = 0\n for k in range(mol.nbas):\n if mol.bas_atom(k) in atom_idx:\n pl = 0\n for l in range(mol.nbas):\n if mol.bas_atom(l) in atom_idx:\n shls = (i, j, k, l)\n buf = mol.intor_by_shell('int2e_sph', shls)\n di, dj, dk, dl = buf.shape\n sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf\n pl += dl\n pk += dk\n pj += dj\n pi += di\nsub_eri = sub_eri.reshape(sub_nao**2,sub_nao**2)\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport ctypes\nimport numpy\nfrom pyscf import lib\n\nlibcgto = lib.load_library('libcgto')\n\nANG_OF = 1\nNPRIM_OF = 2\nNCTR_OF = 3\nKAPPA_OF = 4\nPTR_EXP = 5\nPTR_COEFF = 6\nBAS_SLOTS = 8\n\ndef getints(intor_name, atm, bas, env, shls_slice=None, comp=1, hermi=0,\n aosym='s1', ao_loc=None, cintopt=None, out=None):\n r'''1e and 2e integral generator.\n\n Args:\n intor_name : str\n\n ================================ =============\n Function Expression\n ================================ =============\n \"int1e_ovlp_sph\" ( \\| \\)\n \"int1e_nuc_sph\" ( \\| nuc \\| \\)\n \"int1e_kin_sph\" (.5 \\| p dot p\\)\n \"int1e_ia01p_sph\" (#C(0 1) \\| nabla-rinv \\| cross p\\)\n \"int1e_giao_irjxp_sph\" (#C(0 1) \\| r cross p\\)\n \"int1e_cg_irxp_sph\" (#C(0 1) \\| rc cross p\\)\n \"int1e_giao_a11part_sph\" (-.5 \\| nabla-rinv \\| r\\)\n \"int1e_cg_a11part_sph\" (-.5 \\| nabla-rinv \\| rc\\)\n \"int1e_a01gp_sph\" (g \\| nabla-rinv cross p \\|\\)\n \"int1e_igkin_sph\" (#C(0 .5) g \\| p dot p\\)\n \"int1e_igovlp_sph\" (#C(0 1) g \\|\\)\n \"int1e_ignuc_sph\" (#C(0 1) g \\| nuc \\|\\)\n \"int1e_z_sph\" ( \\| zc \\| \\)\n \"int1e_zz_sph\" ( \\| zc zc \\| \\)\n \"int1e_r_sph\" ( \\| rc \\| \\)\n \"int1e_r2_sph\" ( \\| rc dot rc \\| \\)\n \"int1e_rr_sph\" ( \\| rc rc \\| \\)\n \"int1e_pnucp_sph\" (p* \\| nuc dot p \\| \\)\n \"int1e_prinvxp_sph\" (p* \\| rinv cross p \\| \\)\n \"int1e_ovlp_spinor\" ( \\| \\)\n \"int1e_nuc_spinor\" ( \\| nuc \\|\\)\n \"int1e_srsr_spinor\" (sigma dot r \\| sigma dot r\\)\n \"int1e_sr_spinor\" (sigma dot r \\|\\)\n \"int1e_srsp_spinor\" (sigma dot r \\| sigma dot p\\)\n \"int1e_spsp_spinor\" (sigma dot p \\| sigma dot p\\)\n \"int1e_sp_spinor\" (sigma dot p \\|\\)\n \"int1e_spnucsp_spinor\" (sigma dot p \\| nuc \\| sigma dot p\\)\n \"int1e_srnucsr_spinor\" (sigma dot r \\| nuc \\| sigma dot r\\)\n \"int1e_govlp_spinor\" (g \\|\\)\n \"int1e_gnuc_spinor\" (g \\| nuc \\|\\)\n \"int1e_cg_sa10sa01_spinor\" (.5 sigma cross rc \\| sigma cross nabla-rinv \\|\\)\n \"int1e_cg_sa10sp_spinor\" (.5 rc cross sigma \\| sigma dot p\\)\n \"int1e_cg_sa10nucsp_spinor\" (.5 rc cross sigma \\| nuc \\| sigma dot p\\)\n \"int1e_giao_sa10sa01_spinor\" (.5 sigma cross r \\| sigma cross nabla-rinv \\|\\)\n \"int1e_giao_sa10sp_spinor\" (.5 r cross sigma \\| sigma dot p\\)\n \"int1e_giao_sa10nucsp_spinor\" (.5 r cross sigma \\| nuc \\| sigma dot p\\)\n \"int1e_sa01sp_spinor\" (\\| nabla-rinv cross sigma \\| sigma dot p\\)\n \"int1e_spgsp_spinor\" (g sigma dot p \\| sigma dot p\\)\n \"int1e_spgnucsp_spinor\" (g sigma dot p \\| nuc \\| sigma dot p\\)\n \"int1e_spgsa01_spinor\" (g sigma dot p \\| nabla-rinv cross sigma \\|\\)\n \"int1e_spspsp_spinor\" (sigma dot p \\| sigma dot p sigma dot p\\)\n \"int1e_spnuc_spinor\" (sigma dot p \\| nuc \\|\\)\n \"int1e_ovlp_cart\" ( \\| \\)\n \"int1e_nuc_cart\" ( \\| nuc \\| \\)\n \"int1e_kin_cart\" (.5 \\| p dot p\\)\n \"int1e_ia01p_cart\" (#C(0 1) \\| nabla-rinv \\| cross p\\)\n \"int1e_giao_irjxp_cart\" (#C(0 1) \\| r cross p\\)\n \"int1e_cg_irxp_cart\" (#C(0 1) \\| rc cross p\\)\n \"int1e_giao_a11part_cart\" (-.5 \\| nabla-rinv \\| r\\)\n \"int1e_cg_a11part_cart\" (-.5 \\| nabla-rinv \\| rc\\)\n \"int1e_a01gp_cart\" (g \\| nabla-rinv cross p \\|\\)\n \"int1e_igkin_cart\" (#C(0 .5) g \\| p dot p\\)\n \"int1e_igovlp_cart\" (#C(0 1) g \\|\\)\n \"int1e_ignuc_cart\" (#C(0 1) g \\| nuc \\|\\)\n \"int1e_ipovlp_sph\" (nabla \\|\\)\n \"int1e_ipkin_sph\" (.5 nabla \\| p dot p\\)\n \"int1e_ipnuc_sph\" (nabla \\| nuc \\|\\)\n \"int1e_iprinv_sph\" (nabla \\| rinv \\|\\)\n \"int1e_rinv_sph\" (\\| rinv \\|\\)\n \"int1e_ipovlp_spinor\" (nabla \\|\\)\n \"int1e_ipkin_spinor\" (.5 nabla \\| p dot p\\)\n \"int1e_ipnuc_spinor\" (nabla \\| nuc \\|\\)\n \"int1e_iprinv_spinor\" (nabla \\| rinv \\|\\)\n \"int1e_ipspnucsp_spinor\" (nabla sigma dot p \\| nuc \\| sigma dot p\\)\n \"int1e_ipsprinvsp_spinor\" (nabla sigma dot p \\| rinv \\| sigma dot p\\)\n \"int1e_ipovlp_cart\" (nabla \\|\\)\n \"int1e_ipkin_cart\" (.5 nabla \\| p dot p\\)\n \"int1e_ipnuc_cart\" (nabla \\| nuc \\|\\)\n \"int1e_iprinv_cart\" (nabla \\| rinv \\|\\)\n \"int1e_rinv_cart\" (\\| rinv \\|\\)\n \"int2e_p1vxp1_sph\" ( p* \\, cross p \\| \\, \\) ; SSO\n \"int2e_sph\" ( \\, \\| \\, \\)\n \"int2e_ig1_sph\" (#C(0 1) g \\, \\| \\, \\)\n \"int2e_spinor\" (, \\| \\, \\)\n \"int2e_spsp1_spinor\" (sigma dot p \\, sigma dot p \\| \\, \\)\n \"int2e_spsp1spsp2_spinor\" (sigma dot p \\, sigma dot p \\| sigma dot p \\, sigma dot p \\)\n \"int2e_srsr1_spinor\" (sigma dot r \\, sigma dot r \\| \\,\\)\n \"int2e_srsr1srsr2_spinor\" (sigma dot r \\, sigma dot r \\| sigma dot r \\, sigma dot r\\)\n \"int2e_cg_sa10sp1_spinor\" (.5 rc cross sigma \\, sigma dot p \\| \\,\\)\n \"int2e_cg_sa10sp1spsp2_spinor\" (.5 rc cross sigma \\, sigma dot p \\| sigma dot p \\, sigma dot p \\)\n \"int2e_giao_sa10sp1_spinor\" (.5 r cross sigma \\, sigma dot p \\| \\,\\)\n \"int2e_giao_sa10sp1spsp2_spinor\" (.5 r cross sigma \\, sigma dot p \\| sigma dot p \\, sigma dot p \\)\n \"int2e_g1_spinor\" (g \\, \\| \\,\\)\n \"int2e_spgsp1_spinor\" (g sigma dot p \\, sigma dot p \\| \\,\\)\n \"int2e_g1spsp2_spinor\" (g \\, \\| sigma dot p \\, sigma dot p\\)\n \"int2e_spgsp1spsp2_spinor\" (g sigma dot p \\, sigma dot p \\| sigma dot p \\, sigma dot p\\)\n \"int2e_spv1_spinor\" (sigma dot p \\, \\| \\,\\)\n \"int2e_vsp1_spinor\" (\\, sigma dot p \\| \\,\\)\n \"int2e_spsp2_spinor\" (\\, \\| sigma dot p \\, sigma dot p\\)\n \"int2e_spv1spv2_spinor\" (sigma dot p \\, \\| sigma dot p \\,\\)\n \"int2e_vsp1spv2_spinor\" (\\, sigma dot p \\| sigma dot p \\,\\)\n \"int2e_spv1vsp2_spinor\" (sigma dot p \\, \\| \\, sigma dot p\\)\n \"int2e_vsp1vsp2_spinor\" (\\, sigma dot p \\| \\, sigma dot p\\)\n \"int2e_spv1spsp2_spinor\" (sigma dot p \\, \\| sigma dot p \\, sigma dot p\\)\n \"int2e_vsp1spsp2_spinor\" (\\, sigma dot p \\| sigma dot p \\, sigma dot p\\)\n \"int2e_ig1_cart\" (#C(0 1) g \\, \\| \\, \\)\n \"int2e_ip1_sph\" (nabla \\, \\| \\,\\)\n \"int2e_ip1_spinor\" (nabla \\, \\| \\,\\)\n \"int2e_ipspsp1_spinor\" (nabla sigma dot p \\, sigma dot p \\| \\,\\)\n \"int2e_ip1spsp2_spinor\" (nabla \\, \\| sigma dot p \\, sigma dot p\\)\n \"int2e_ipspsp1spsp2_spinor\" (nabla sigma dot p \\, sigma dot p \\| sigma dot p \\, sigma dot p\\)\n \"int2e_ipsrsr1_spinor\" (nabla sigma dot r \\, sigma dot r \\| \\,\\)\n \"int2e_ip1srsr2_spinor\" (nabla \\, \\| sigma dot r \\, sigma dot r\\)\n \"int2e_ipsrsr1srsr2_spinor\" (nabla sigma dot r \\, sigma dot r \\| sigma dot r \\, sigma dot r\\)\n \"int2e_ip1_cart\" (nabla \\, \\| \\,\\)\n \"int2e_ssp1ssp2_spinor\" ( \\, sigma dot p \\| gaunt \\| \\, sigma dot p\\)\n \"int2e_cg_ssa10ssp2_spinor\" (rc cross sigma \\, \\| gaunt \\| \\, sigma dot p\\)\n \"int2e_giao_ssa10ssp2_spinor\" (r cross sigma \\, \\| gaunt \\| \\, sigma dot p\\)\n \"int2e_gssp1ssp2_spinor\" (g \\, sigma dot p \\| gaunt \\| \\, sigma dot p\\)\n \"int2e_ipip1_sph\" ( nabla nabla \\, \\| \\, \\)\n \"int2e_ipvip1_sph\" ( nabla \\, nabla \\| \\, \\)\n \"int2e_ip1ip2_sph\" ( nabla \\, \\| nabla \\, \\)\n \"int3c2e_ip1_sph\" (nabla \\, \\| \\)\n \"int3c2e_ip2_sph\" ( \\, \\| nabla\\)\n \"int2c2e_ip1_sph\" (nabla \\| r12 \\| \\)\n \"int3c2e_spinor\" (nabla \\, \\| \\)\n \"int3c2e_spsp1_spinor\" (nabla \\, \\| \\)\n \"int3c2e_ip1_spinor\" (nabla \\, \\| \\)\n \"int3c2e_ip2_spinor\" ( \\, \\| nabla\\)\n \"int3c2e_ipspsp1_spinor\" (nabla sigma dot p \\, sigma dot p \\| \\)\n \"int3c2e_spsp1ip2_spinor\" (sigma dot p \\, sigma dot p \\| nabla \\)\n ================================ =============\n\n atm : int32 ndarray\n libcint integral function argument\n bas : int32 ndarray\n libcint integral function argument\n env : float64 ndarray\n libcint integral function argument\n\n Kwargs:\n shls_slice : 8-element list\n (ish_start, ish_end, jsh_start, jsh_end, ksh_start, ksh_end, lsh_start, lsh_end)\n comp : int\n Components of the integrals, e.g. int1e_ipovlp has 3 components.\n hermi : int (1e integral only)\n Symmetry of the 1e integrals\n\n | 0 : no symmetry assumed (default)\n | 1 : hermitian\n | 2 : anti-hermitian\n\n aosym : str (2e integral only)\n Symmetry of the 2e integrals\n\n | 4 or '4' or 's4': 4-fold symmetry (default)\n | '2ij' or 's2ij' : symmetry between i, j in (ij|kl)\n | '2kl' or 's2kl' : symmetry between k, l in (ij|kl)\n | 1 or '1' or 's1': no symmetry\n\n out : ndarray (2e integral only)\n array to store the 2e AO integrals\n\n Returns:\n ndarray of 1-electron integrals, can be either 2-dim or 3-dim, depending on comp\n\n Examples:\n\n >>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g')\n >>> gto.getints('int1e_ipnuc_sph', mol._atm, mol._bas, mol._env, comp=3) # <nabla i | V_nuc | j>\n [[[ 0. 0. ]\n [ 0. 0. ]]\n [[ 0. 0. ]\n [ 0. 0. ]]\n [[ 0.10289944 0.48176097]\n [-0.48176097 -0.10289944]]]\n '''\n intor_name = ascint3(intor_name)\n if (intor_name.startswith('int1e') or\n intor_name.startswith('ECP') or\n intor_name.startswith('int2c2e')):\n return getints2c(intor_name, atm, bas, env, shls_slice, comp,\n hermi, ao_loc, cintopt, out)\n elif intor_name.startswith('int2e') or intor_name.startswith('int4c1e'):\n return getints4c(intor_name, atm, bas, env, shls_slice, comp,\n aosym, ao_loc, cintopt, out)\n elif intor_name.startswith('int3c'):\n return getints3c(intor_name, atm, bas, env, shls_slice, comp,\n aosym, ao_loc, cintopt, out)\n else:\n raise RuntimeError('Unknown intor %s' % intor_name)\n\ndef getints2c(intor_name, atm, bas, env, shls_slice=None, comp=1, hermi=0,\n ao_loc=None, cintopt=None, out=None):\n atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = atm.shape[0]\n nbas = bas.shape[0]\n if shls_slice is None:\n shls_slice = (0, nbas, 0, nbas)\n else:\n assert(shls_slice[1] <= nbas and shls_slice[3] <= nbas)\n if ao_loc is None:\n ao_loc = make_loc(bas, intor_name)\n\n i0, i1, j0, j1 = shls_slice[:4]\n naoi = ao_loc[i1] - ao_loc[i0]\n naoj = ao_loc[j1] - ao_loc[j0]\n if intor_name.endswith('_cart') or intor_name.endswith('_sph'):\n mat = numpy.ndarray((naoi,naoj,comp), numpy.double, out, order='F')\n drv_name = 'GTOint2c'\n else:\n mat = numpy.ndarray((naoi,naoj,comp), numpy.complex, out, order='F')\n if '2c2e' in intor_name:\n assert(hermi != lib.HERMITIAN and\n hermi != lib.ANTIHERMI)\n drv_name = 'GTOint2c_spinor'\n\n if cintopt is None:\n cintopt = make_cintopt(atm, bas, env, intor_name)\n# cintopt = lib.c_null_ptr()\n\n fn = getattr(libcgto, drv_name)\n fn(getattr(libcgto, intor_name), mat.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(comp), ctypes.c_int(hermi),\n (ctypes.c_int*4)(*(shls_slice[:4])),\n ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt,\n atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),\n bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),\n env.ctypes.data_as(ctypes.c_void_p))\n\n mat = mat.transpose(2,0,1)\n if comp == 1:\n mat = mat[0]\n return mat\n\ndef getints3c(intor_name, atm, bas, env, shls_slice=None, comp=1,\n aosym='s1', ao_loc=None, cintopt=None, out=None):\n atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = atm.shape[0]\n nbas = bas.shape[0]\n if shls_slice is None:\n shls_slice = (0, nbas, 0, nbas, 0, nbas)\n else:\n assert(shls_slice[1] <= nbas and\n shls_slice[3] <= nbas and\n shls_slice[5] <= nbas)\n\n i0, i1, j0, j1, k0, k1 = shls_slice[:6]\n if ao_loc is None:\n ao_loc = make_loc(bas, intor_name)\n if k0 > j1 and k0 > i1:\n if 'ssc' in intor_name:\n ao_loc[k0-1:] = ao_loc[k0] + make_loc(bas[k0:], 'cart')\n elif 'spinor' in intor_name:\n ao_loc[k0-1:] = ao_loc[k0] + make_loc(bas[k0:], intor_name)\n\n naok = ao_loc[k1] - ao_loc[k0]\n\n if aosym in ('s1',):\n naoi = ao_loc[i1] - ao_loc[i0]\n naoj = ao_loc[j1] - ao_loc[j0]\n shape = (naoi, naoj, naok, comp)\n else:\n aosym = 's2ij'\n nij = ao_loc[i1]*(ao_loc[i1]+1)//2 - ao_loc[i0]*(ao_loc[i0]+1)//2\n shape = (nij, naok, comp)\n\n if 'spinor' in intor_name:\n mat = numpy.ndarray(shape, numpy.complex, out, order='F')\n drv = libcgto.GTOr3c_drv\n fill = getattr(libcgto, 'GTOr3c_fill_'+aosym)\n else:\n mat = numpy.ndarray(shape, numpy.double, out, order='F')\n drv = libcgto.GTOnr3c_drv\n fill = getattr(libcgto, 'GTOnr3c_fill_'+aosym)\n\n if cintopt is None:\n cintopt = make_cintopt(atm, bas, env, intor_name)\n\n drv(getattr(libcgto, intor_name), fill,\n mat.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(comp),\n (ctypes.c_int*6)(*(shls_slice[:6])),\n ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt,\n atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),\n bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),\n env.ctypes.data_as(ctypes.c_void_p))\n\n mat = numpy.rollaxis(mat, -1, 0)\n if comp == 1:\n mat = mat[0]\n return mat\n\ndef getints4c(intor_name, atm, bas, env, shls_slice=None, comp=1,\n aosym='s1', ao_loc=None, cintopt=None, out=None):\n aosym = _stand_sym_code(aosym)\n\n atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n env = numpy.asarray(env, dtype=numpy.double, order='C')\n c_atm = atm.ctypes.data_as(ctypes.c_void_p)\n c_bas = bas.ctypes.data_as(ctypes.c_void_p)\n c_env = env.ctypes.data_as(ctypes.c_void_p)\n natm = atm.shape[0]\n nbas = bas.shape[0]\n\n ao_loc = make_loc(bas, intor_name)\n if cintopt is None:\n cintopt = make_cintopt(atm, bas, env, intor_name)\n\n if aosym == 's8':\n assert('_spinor' not in intor_name)\n assert(shls_slice is None)\n from pyscf.scf import _vhf\n nao = ao_loc[-1]\n nao_pair = nao*(nao+1)//2\n out = numpy.ndarray((nao_pair*(nao_pair+1)//2), buffer=out)\n drv = _vhf.libcvhf.GTO2e_cart_or_sph\n drv(getattr(libcgto, intor_name), cintopt,\n out.ctypes.data_as(ctypes.c_void_p),\n ao_loc.ctypes.data_as(ctypes.c_void_p),\n c_atm, ctypes.c_int(natm), c_bas, ctypes.c_int(nbas), c_env)\n return out\n\n else:\n if shls_slice is None:\n shls_slice = (0, nbas, 0, nbas, 0, nbas, 0, nbas)\n elif len(shls_slice) == 4:\n shls_slice = shls_slice + (0, nbas, 0, nbas)\n else:\n assert(shls_slice[1] <= nbas and shls_slice[3] <= nbas and\n shls_slice[5] <= nbas and shls_slice[7] <= nbas)\n i0, i1, j0, j1, k0, k1, l0, l1 = shls_slice\n naoi = ao_loc[i1] - ao_loc[i0]\n naoj = ao_loc[j1] - ao_loc[j0]\n naok = ao_loc[k1] - ao_loc[k0]\n naol = ao_loc[l1] - ao_loc[l0]\n if aosym in ('s4', 's2ij'):\n nij = naoi * (naoi + 1) // 2\n assert(numpy.all(ao_loc[i0:i1]-ao_loc[i0] == ao_loc[j0:j1]-ao_loc[j0]))\n else:\n nij = naoi * naoj\n if aosym in ('s4', 's2kl'):\n nkl = naok * (naok + 1) // 2\n assert(numpy.all(ao_loc[k0:k1]-ao_loc[k0] == ao_loc[l0:l1]-ao_loc[l0]))\n else:\n nkl = naok * naol\n if comp == 1:\n out = numpy.ndarray((nij,nkl), buffer=out)\n else:\n out = numpy.ndarray((comp,nij,nkl), buffer=out)\n\n prescreen = lib.c_null_ptr()\n drv = libcgto.GTOnr2e_fill_drv\n drv(getattr(libcgto, intor_name),\n getattr(libcgto, 'GTOnr2e_fill_'+aosym), prescreen,\n out.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(comp),\n (ctypes.c_int*8)(*shls_slice),\n ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt,\n c_atm, ctypes.c_int(natm), c_bas, ctypes.c_int(nbas), c_env)\n return out\n\ndef getints_by_shell(intor_name, shls, atm, bas, env, comp=1):\n r'''For given 2, 3 or 4 shells, interface for libcint to get 1e, 2e,\n 2-center-2e or 3-center-2e integrals\n\n Args:\n intor_name : str\n See also :func:`getints` for the supported intor_name\n shls : list of int\n The AO shell-ids of the integrals\n atm : int32 ndarray\n libcint integral function argument\n bas : int32 ndarray\n libcint integral function argument\n env : float64 ndarray\n libcint integral function argument\n\n Kwargs:\n comp : int\n Components of the integrals, e.g. int1e_ipovlp has 3 components.\n\n Returns:\n ndarray of 2-dim to 5-dim, depending on the integral type (1e,\n 2e, 3c-2e, 2c2e) and the value of comp\n\n Examples:\n The gradients of the spherical 2e integrals\n\n >>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g')\n >>> gto.getints_by_shell('int2e_ip1_sph', (0,1,0,1), mol._atm, mol._bas, mol._env, comp=3)\n [[[[[-0. ]]]]\n [[[[-0. ]]]]\n [[[[-0.08760462]]]]]\n '''\n intor_name = ascint3(intor_name)\n atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(atm.shape[0])\n nbas = ctypes.c_int(bas.shape[0])\n if intor_name.endswith('_cart'):\n dtype = numpy.double\n def num_cgto_of(basid):\n l = bas[basid,ANG_OF]\n return (l+1)*(l+2)//2 * bas[basid,NCTR_OF]\n elif intor_name.endswith('_sph'):\n dtype = numpy.double\n def num_cgto_of(basid):\n l = bas[basid,ANG_OF]\n return (l*2+1) * bas[basid,NCTR_OF]\n else:\n from pyscf.gto.mole import len_spinor\n dtype = numpy.complex\n def num_cgto_of(basid):\n l = bas[basid,ANG_OF]\n k = bas[basid,KAPPA_OF]\n return len_spinor(l,k) * bas[basid,NCTR_OF]\n\n null = lib.c_null_ptr()\n if intor_name.startswith('int3c'):\n assert(len(shls) == 3)\n di = num_cgto_of(shls[0])\n dj = num_cgto_of(shls[1])\n l = bas[shls[2],ANG_OF]\n if intor_name.endswith('_ssc'): # mixed spherical-cartesian\n dk = (l+1)*(l+2)//2 * bas[shls[2],NCTR_OF]\n else:\n dk = (l*2+1) * bas[shls[2],NCTR_OF]\n buf = numpy.empty((di,dj,dk,comp), dtype, order='F')\n fintor = getattr(libcgto, intor_name)\n fintor(buf.ctypes.data_as(ctypes.c_void_p),\n null, (ctypes.c_int*3)(*shls),\n atm.ctypes.data_as(ctypes.c_void_p), natm,\n bas.ctypes.data_as(ctypes.c_void_p), nbas,\n env.ctypes.data_as(ctypes.c_void_p), null, null)\n if comp == 1:\n return buf.reshape(di,dj,dk)\n else:\n return buf.transpose(3,0,1,2)\n\n elif intor_name.startswith('int2e') or intor_name.startswith('int4c'):\n assert(len(shls) == 4)\n di, dj, dk, dl = [num_cgto_of(x) for x in shls]\n buf = numpy.empty((di,dj,dk,dl,comp), dtype, order='F')\n fintor = getattr(libcgto, intor_name)\n fintor(buf.ctypes.data_as(ctypes.c_void_p),\n null, (ctypes.c_int*4)(*shls),\n atm.ctypes.data_as(ctypes.c_void_p), natm,\n bas.ctypes.data_as(ctypes.c_void_p), nbas,\n env.ctypes.data_as(ctypes.c_void_p), null, null)\n if comp == 1:\n return buf.reshape(di,dj,dk,dl)\n else:\n return buf.transpose(4,0,1,2,3)\n\n elif (intor_name.startswith('int2c') or '1e' in intor_name or\n 'ECP' in intor_name):\n assert(len(shls) == 2)\n di = num_cgto_of(shls[0])\n dj = num_cgto_of(shls[1])\n buf = numpy.empty((di,dj,comp), dtype, order='F')\n fintor = getattr(libcgto, intor_name)\n fintor(buf.ctypes.data_as(ctypes.c_void_p),\n null, (ctypes.c_int*2)(*shls),\n atm.ctypes.data_as(ctypes.c_void_p), natm,\n bas.ctypes.data_as(ctypes.c_void_p), nbas,\n env.ctypes.data_as(ctypes.c_void_p), null, null)\n if comp == 1:\n return buf.reshape(di,dj)\n else:\n return buf.transpose(2,0,1)\n\n else:\n raise RuntimeError('Unknown intor %s' % intor_name)\n\n\ndef make_loc(bas, key):\n if 'cart' in key:\n l = bas[:,ANG_OF]\n dims = (l+1)*(l+2)//2 * bas[:,NCTR_OF]\n elif 'sph' in key:\n dims = (bas[:,ANG_OF]*2+1) * bas[:,NCTR_OF]\n else: # spinor\n l = bas[:,ANG_OF]\n k = bas[:,KAPPA_OF]\n dims = (l*4+2) * bas[:,NCTR_OF]\n dims[k<0] = (l[k<0] * 2 + 2) * bas[k<0,NCTR_OF]\n dims[k>0] = (l[k>0] * 2 ) * bas[k>0,NCTR_OF]\n\n ao_loc = numpy.empty(len(dims)+1, dtype=numpy.int32)\n ao_loc[0] = 0\n dims.cumsum(dtype=numpy.int32, out=ao_loc[1:])\n return ao_loc\n\ndef make_cintopt(atm, bas, env, intor):\n intor = intor.replace('_sph','').replace('_cart','').replace('_spinor','')\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = c_atm.shape[0]\n nbas = c_bas.shape[0]\n cintopt = lib.c_null_ptr()\n foptinit = getattr(libcgto, intor+'_optimizer')\n foptinit(ctypes.byref(cintopt),\n c_atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),\n c_bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),\n c_env.ctypes.data_as(ctypes.c_void_p))\n return ctypes.cast(cintopt, _cintoptHandler)\nclass _cintoptHandler(ctypes.c_void_p):\n def __del__(self):\n libcgto.CINTdel_optimizer(ctypes.byref(self))\n\ndef _stand_sym_code(sym):\n if isinstance(sym, int):\n return 's%d' % sym\n elif sym[0] in 'sS':\n return sym.lower()\n else:\n return 's' + sym.lower()\n\ndef ascint3(intor_name):\n '''convert cint2 function name to cint3 function name'''\n if intor_name.startswith('cint'):\n intor_name = intor_name[1:]\n if not (intor_name.endswith('_cart') or\n intor_name.endswith('_sph') or\n intor_name.endswith('_spinor')):\n intor_name = intor_name + '_spinor'\n return intor_name\n\n\nif __name__ == '__main__':\n from pyscf import gto\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n\n mol.atom.extend([\n [\"H\", (0, 0, 0 )],\n [\"H\", (0, 0, 1 )],\n ])\n mol.basis = {\"H\": 'cc-pvdz'}\n mol.build()\n mol.set_rinv_origin(mol.atom_coord(0))\n for i in range(mol.nbas):\n for j in range(mol.nbas):\n print(i, j, getints_by_shell('int1e_prinvxp_sph', (i,j),\n mol._atm, mol._bas, mol._env, 3))\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nUnrestricted CISD\n'''\n\nimport time\nfrom functools import reduce\nimport tempfile\nimport numpy\nimport h5py\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf import ao2mo\nfrom pyscf.cc import uccsd\nfrom pyscf.ci import cisd\nfrom pyscf.cc.rccsd import _unpack_4fold, _mem_usage\nfrom pyscf.ci.ucisd_slow import from_fci, to_fci\nfrom pyscf.ci.ucisd_slow import make_rdm1, make_rdm2\n\neinsum = lib.einsum\n\ndef kernel(myci, eris, ci0=None, max_cycle=50, tol=1e-8,\n verbose=logger.INFO):\n mol = myci.mol\n diag = myci.make_diagonal(eris)\n ehf = diag[0]\n diag -= ehf\n\n if ci0 is None:\n ci0 = myci.get_init_guess(eris)[1]\n\n def op(xs):\n return [myci.contract(x, eris) for x in xs]\n\n def precond(x, e, *args):\n diagd = diag - (e-myci.level_shift)\n diagd[abs(diagd)<1e-8] = 1e-8\n return x / diagd\n\n conv, ecisd, ci = lib.davidson1(op, ci0, precond, tol=tol,\n max_cycle=max_cycle, max_space=myci.max_space,\n lindep=myci.lindep, nroots=myci.nroots,\n verbose=verbose)\n if myci.nroots == 1:\n conv = conv[0]\n ecisd = ecisd[0]\n ci = ci[0]\n return conv, ecisd, ci\n\n\ndef make_diagonal(myci, eris):\n nocca = eris.nocca\n noccb = eris.noccb\n nmoa = eris.focka.shape[0]\n nmob = eris.focka.shape[1]\n nvira = nmoa - nocca\n nvirb = nmob - noccb\n jdiag_aa = numpy.zeros((nmoa,nmoa))\n jdiag_ab = numpy.zeros((nmoa,nmob))\n jdiag_bb = numpy.zeros((nmob,nmob))\n jdiag_aa[:nocca,:nocca] = numpy.einsum('iijj->ij', eris.oooo)\n jdiag_aa[:nocca,nocca:] = numpy.einsum('iijj->ji', eris.vvoo)\n jdiag_aa[nocca:,:nocca] = jdiag_aa[:nocca,nocca:].T\n jdiag_ab[:nocca,:noccb] = numpy.einsum('iijj->ij', eris.ooOO)\n jdiag_ab[:nocca,noccb:] = numpy.einsum('iijj->ji', eris.VVoo)\n jdiag_ab[nocca:,:noccb] = numpy.einsum('iijj->ij', eris.vvOO)\n jdiag_bb[:noccb,:noccb] = numpy.einsum('iijj->ij', eris.OOOO)\n jdiag_bb[:noccb,noccb:] = numpy.einsum('iijj->ji', eris.VVOO)\n jdiag_bb[noccb:,:noccb] = jdiag_bb[:noccb,noccb:].T\n\n kdiag_aa = numpy.zeros((nmoa,nmoa))\n kdiag_bb = numpy.zeros((nmob,nmob))\n kdiag_aa[:nocca,:nocca] = numpy.einsum('ijji->ij', eris.oooo)\n kdiag_aa[:nocca,nocca:] = numpy.einsum('ijji->ji', eris.voov)\n kdiag_aa[nocca:,:nocca] = kdiag_aa[:nocca,nocca:].T\n kdiag_bb[:noccb,:noccb] = numpy.einsum('ijji->ij', eris.OOOO)\n kdiag_bb[:noccb,noccb:] = numpy.einsum('ijji->ji', eris.VOOV)\n kdiag_bb[noccb:,:noccb] = kdiag_bb[:noccb,noccb:].T\n\n# if eris.vvvv is not None and eris.vvVV is not None and eris.VVVV is not None:\n# def diag_idx(n):\n# idx = numpy.arange(n)\n# return idx * (idx + 1) // 2 + idx\n# jdiag_aa[nocca:,nocca:] = eris.vvvv[diag_idx(nvira)[:,None],diag_idx(nvira)]\n# jdiag_ab[nocca:,noccb:] = eris.vvVV[diag_idx(nvira)[:,None],diag_idx(nvirb)]\n# jdiag_bb[noccb:,noccb:] = eris.VVVV[diag_idx(nvirb)[:,None],diag_idx(nvirb)]\n# kdiag_aa[nocca:,nocca:] = lib.unpack_tril(eris.vvvv.diagonal())\n# kdiag_bb[noccb:,noccb:] = lib.unpack_tril(eris.VVVV.diagonal())\n\n jkdiag_aa = jdiag_aa - kdiag_aa\n jkdiag_bb = jdiag_bb - kdiag_bb\n\n mo_ea = eris.focka.diagonal()\n mo_eb = eris.fockb.diagonal()\n ehf = (mo_ea[:nocca].sum() + mo_eb[:noccb].sum()\n - jkdiag_aa[:nocca,:nocca].sum() * .5\n - jdiag_ab[:nocca,:noccb].sum()\n - jkdiag_bb[:noccb,:noccb].sum() * .5)\n\n dia_a = lib.direct_sum('a-i->ia', mo_ea[nocca:], mo_ea[:nocca])\n dia_a -= jkdiag_aa[:nocca,nocca:]\n dia_b = lib.direct_sum('a-i->ia', mo_eb[noccb:], mo_eb[:noccb])\n dia_b -= jkdiag_bb[:noccb,noccb:]\n e1diag_a = dia_a + ehf\n e1diag_b = dia_b + ehf\n\n e2diag_aa = lib.direct_sum('ia+jb->ijab', dia_a, dia_a)\n e2diag_aa += ehf\n e2diag_aa += jkdiag_aa[:nocca,:nocca].reshape(nocca,nocca,1,1)\n e2diag_aa -= jkdiag_aa[:nocca,nocca:].reshape(nocca,1,1,nvira)\n e2diag_aa -= jkdiag_aa[:nocca,nocca:].reshape(1,nocca,nvira,1)\n e2diag_aa += jkdiag_aa[nocca:,nocca:].reshape(1,1,nvira,nvira)\n\n e2diag_ab = lib.direct_sum('ia+jb->ijab', dia_a, dia_b)\n e2diag_ab += ehf\n e2diag_ab += jdiag_ab[:nocca,:noccb].reshape(nocca,noccb,1,1)\n e2diag_ab += jdiag_ab[nocca:,noccb:].reshape(1,1,nvira,nvirb)\n e2diag_ab -= jdiag_ab[:nocca,noccb:].reshape(nocca,1,1,nvirb)\n e2diag_ab -= jdiag_ab[nocca:,:noccb].T.reshape(1,noccb,nvira,1)\n\n e2diag_bb = lib.direct_sum('ia+jb->ijab', dia_b, dia_b)\n e2diag_bb += ehf\n e2diag_bb += jkdiag_bb[:noccb,:noccb].reshape(noccb,noccb,1,1)\n e2diag_bb -= jkdiag_bb[:noccb,noccb:].reshape(noccb,1,1,nvirb)\n e2diag_bb -= jkdiag_bb[:noccb,noccb:].reshape(1,noccb,nvirb,1)\n e2diag_bb += jkdiag_bb[noccb:,noccb:].reshape(1,1,nvirb,nvirb)\n\n return amplitudes_to_cisdvec(ehf, (e1diag_a, e1diag_b),\n (e2diag_aa, e2diag_ab, e2diag_bb))\n\ndef contract(myci, civec, eris):\n nocca = eris.nocca\n noccb = eris.noccb\n nmoa = eris.focka.shape[0]\n nmob = eris.fockb.shape[0]\n nvira = nmoa - nocca\n nvirb = nmob - noccb\n c0, (c1a,c1b), (c2aa,c2ab,c2bb) = \\\n cisdvec_to_amplitudes(civec, (nmoa,nmob), (nocca,noccb))\n\n fooa = eris.focka[:nocca,:nocca]\n foob = eris.fockb[:noccb,:noccb]\n fova = eris.focka[:nocca,nocca:]\n fovb = eris.fockb[:noccb,noccb:]\n fvva = eris.focka[nocca:,nocca:]\n fvvb = eris.fockb[noccb:,noccb:]\n\n t0 = 0\n t1a = 0\n t1b = 0\n t2aa = 0\n t2ab = 0\n t2bb = 0\n eris_vvoo = _cp(eris.vvoo)\n eris_VVoo = _cp(eris.VVoo)\n eris_vvOO = _cp(eris.vvOO)\n eris_VVOO = _cp(eris.VVOO)\n eris_voov = _cp(eris.voov)\n eris_voOV = _cp(eris.voOV)\n eris_VOOV = _cp(eris.VOOV)\n #:t2 += eris.oovv * c0\n t2aa += .25 * c0 * eris_voov.transpose(1,2,0,3)\n t2aa -= .25 * c0 * eris_voov.transpose(1,2,3,0)\n t2bb += .25 * c0 * eris_VOOV.transpose(1,2,0,3)\n t2bb -= .25 * c0 * eris_VOOV.transpose(1,2,3,0)\n t2ab += c0 * eris_voOV.transpose(1,2,0,3)\n #:t0 += numpy.einsum('ijab,ijab', eris.oovv, c2) * .25\n t0 += numpy.einsum('aijb,ijab', eris.voov, c2aa) * .25\n t0 -= numpy.einsum('ajib,ijab', eris.voov, c2aa) * .25\n t0 += numpy.einsum('aijb,ijab', eris.VOOV, c2bb) * .25\n t0 -= numpy.einsum('ajib,ijab', eris.VOOV, c2bb) * .25\n t0 += numpy.einsum('aijb,ijab', eris.voOV, c2ab)\n\n #:tmp = einsum('imae,mbej->ijab', c2, eris.ovvo)\n #:tmp = tmp - tmp.transpose(0,1,3,2)\n #:t2 += tmp - tmp.transpose(1,0,2,3)\n voov = eris_voov - eris_vvoo.transpose(0,3,2,1)\n VOOV = eris_VOOV - eris_VVOO.transpose(0,3,2,1)\n t2aa += lib.einsum('imae,bjme->ijab', c2aa, voov)\n t2aa += lib.einsum('iMaE,bjME->ijab', c2ab, eris_voOV)\n t2bb += lib.einsum('imae,bjme->ijab', c2bb, VOOV)\n t2bb += lib.einsum('mIeA,emJB->IJAB', c2ab, eris_voOV)\n t2ab += lib.einsum('imae,emJB->iJaB', c2aa, eris_voOV)\n t2ab += lib.einsum('iMaE,EMJB->iJaB', c2ab, VOOV)\n t2ab += lib.einsum('IMAE,bjME->jIbA', c2bb, eris_voOV)\n t2ab += lib.einsum('mIeA,bjme->jIbA', c2ab, voov)\n t2ab -= lib.einsum('iMeA,ebJM->iJbA', c2ab, eris_vvOO)\n t2ab -= lib.einsum('mIaE,EBjm->jIaB', c2ab, eris_VVoo)\n\n #:t1 += einsum('nf,nafi->ia', c1, eris.ovvo)\n t1a += numpy.einsum('nf,ainf->ia', c1a, eris_voov)\n t1a -= numpy.einsum('nf,fani->ia', c1a, eris_vvoo)\n t1b += numpy.einsum('nf,ainf->ia', c1b, eris_VOOV)\n t1b -= numpy.einsum('nf,fani->ia', c1b, eris_VVOO)\n t1b += numpy.einsum('nf,fnia->ia', c1a, eris_voOV)\n t1a += numpy.einsum('nf,ainf->ia', c1b, eris_voOV)\n\n #:t1 -= 0.5*einsum('mnae,mnie->ia', c2, eris.ooov)\n eris_vooo = _cp(eris.vooo)\n eris_VOOO = _cp(eris.VOOO)\n eris_VOoo = _cp(eris.VOoo)\n eris_voOO = _cp(eris.voOO)\n t1a += lib.einsum('mnae,emni->ia', c2aa, eris_vooo)\n t1b += lib.einsum('mnae,emni->ia', c2bb, eris_VOOO)\n t1a -= lib.einsum('nMaE,EMni->ia', c2ab, eris_VOoo)\n t1b -= lib.einsum('mNeA,emNI->IA', c2ab, eris_voOO)\n #:tmp = einsum('ma,mbij->ijab', c1, eris.ovoo)\n #:t2 -= tmp - tmp.transpose(0,1,3,2)\n t2aa -= lib.einsum('ma,bjmi->jiba', c1a, eris_vooo)\n t2bb -= lib.einsum('ma,bjmi->jiba', c1b, eris_VOOO)\n t2ab -= lib.einsum('ma,BJmi->iJaB', c1a, eris_VOoo)\n t2ab -= lib.einsum('MA,biMJ->iJbA', c1b, eris_voOO)\n\n #:#:t1 -= 0.5*einsum('imef,maef->ia', c2, eris.ovvv)\n #:eris_vovv = _cp(eris.vovv)\n #:eris_VOVV = _cp(eris.VOVV)\n #:eris_voVV = _cp(eris.voVV)\n #:eris_VOvv = _cp(eris.VOvv)\n #:t1a += lib.einsum('mief,emfa->ia', c2aa, eris_vovv)\n #:t1b += lib.einsum('MIEF,EMFA->IA', c2bb, eris_VOVV)\n #:t1a += lib.einsum('iMfE,EMaf->ia', c2ab, eris_VOvv)\n #:t1b += lib.einsum('mIeF,emAF->IA', c2ab, eris_voVV)\n #:#:tmp = einsum('ie,jeba->ijab', c1, numpy.asarray(eris.ovvv).conj())\n #:#:t2 += tmp - tmp.transpose(1,0,2,3)\n #:t2aa += lib.einsum('ie,bmae->imab', c1a, eris_vovv)\n #:t2bb += lib.einsum('ie,bmae->imab', c1b, eris_VOVV)\n #:t2ab += lib.einsum('ie,BMae->iMaB', c1a, eris_VOvv)\n #:t2ab += lib.einsum('IE,amBE->mIaB', c1b, eris_voVV)\n if nvira > 0 and nocca > 0:\n mem_now = lib.current_memory()[0]\n max_memory = myci.max_memory - mem_now\n blksize = max(int(max_memory*1e6/8/(nvira**2*nocca*2)), 2)\n for p0,p1 in lib.prange(0, nvira, blksize):\n vovv = _cp(eris.vovv[p0:p1]).reshape((p1-p0)*nocca,-1)\n vovv = lib.unpack_tril(vovv).reshape(p1-p0,nocca,nvira,nvira)\n t1a += lib.einsum('mief,emfa->ia', c2aa[:,:,p0:p1], vovv)\n t2aa[:,:,p0:p1] += lib.einsum('ie,bmae->miba', c1a, vovv)\n vovv = None\n\n if nvirb > 0 and noccb > 0:\n mem_now = lib.current_memory()[0]\n max_memory = myci.max_memory - mem_now\n blksize = max(int(max_memory*1e6/8/(nvirb**2*noccb*2)), 2)\n for p0,p1 in lib.prange(0, nvirb, blksize):\n VOVV = _cp(eris.VOVV[p0:p1]).reshape((p1-p0)*noccb,-1)\n VOVV = lib.unpack_tril(VOVV).reshape(p1-p0,noccb,nvirb,nvirb)\n t1b += lib.einsum('MIEF,EMFA->IA', c2bb[:,:,p0:p1], VOVV)\n t2bb[:,:,p0:p1] += lib.einsum('ie,bmae->miba', c1b, VOVV)\n VOVV = None\n\n if nvirb > 0 and nocca > 0:\n mem_now = lib.current_memory()[0]\n max_memory = myci.max_memory - mem_now\n blksize = max(int(max_memory*1e6/8/(nvirb**2*nocca*2)), 2)\n for p0,p1 in lib.prange(0, nvira, blksize):\n voVV = _cp(eris.voVV[p0:p1]).reshape((p1-p0)*nocca,-1)\n voVV = lib.unpack_tril(voVV).reshape(p1-p0,nocca,nvirb,nvirb)\n t1b += lib.einsum('mIeF,emAF->IA', c2ab[:,:,p0:p1], voVV)\n t2ab[:,:,p0:p1] += lib.einsum('IE,amBE->mIaB', c1b, voVV)\n voVV = None\n\n if nvira > 0 and noccb > 0:\n mem_now = lib.current_memory()[0]\n max_memory = myci.max_memory - mem_now\n blksize = max(int(max_memory*1e6/8/(nvira**2*noccb*2)), 2)\n for p0,p1 in lib.prange(0, nvirb, blksize):\n VOvv = _cp(eris.VOvv[p0:p1]).reshape((p1-p0)*noccb,-1)\n VOvv = lib.unpack_tril(VOvv).reshape(p1-p0,noccb,nvira,nvira)\n t1a += lib.einsum('iMfE,EMaf->ia', c2ab[:,:,:,p0:p1], VOvv)\n t2ab[:,:,:,p0:p1] += lib.einsum('ie,BMae->iMaB', c1a, VOvv)\n VOvv = None\n\n #:t1 = einsum('ie,ae->ia', c1, fvv)\n t1a += einsum('ie,ae->ia', c1a, fvva)\n t1b += einsum('ie,ae->ia', c1b, fvvb)\n #:t1 -= einsum('ma,mi->ia', c1, foo)\n t1a -=einsum('ma,mi->ia', c1a, fooa)\n t1b -=einsum('ma,mi->ia', c1b, foob)\n #:t1 += einsum('imae,me->ia', c2, fov)\n t1a += numpy.einsum('imae,me->ia', c2aa, fova)\n t1a += numpy.einsum('imae,me->ia', c2ab, fovb)\n t1b += numpy.einsum('imae,me->ia', c2bb, fovb)\n t1b += numpy.einsum('miea,me->ia', c2ab, fova)\n\n #:tmp = einsum('ijae,be->ijab', c2, fvv)\n #:t2 = tmp - tmp.transpose(0,1,3,2)\n t2aa += lib.einsum('ijae,be->ijab', c2aa, fvva*.5)\n t2bb += lib.einsum('ijae,be->ijab', c2bb, fvvb*.5)\n t2ab += lib.einsum('iJaE,BE->iJaB', c2ab, fvvb)\n t2ab += lib.einsum('iJeA,be->iJbA', c2ab, fvva)\n #:tmp = einsum('imab,mj->ijab', c2, foo)\n #:t2 -= tmp - tmp.transpose(1,0,2,3)\n t2aa -= lib.einsum('imab,mj->ijab', c2aa, fooa*.5)\n t2bb -= lib.einsum('imab,mj->ijab', c2bb, foob*.5)\n t2ab -= lib.einsum('iMaB,MJ->iJaB', c2ab, foob)\n t2ab -= lib.einsum('mIaB,mj->jIaB', c2ab, fooa)\n\n #:tmp = numpy.einsum('ia,jb->ijab', c1, fov)\n #:tmp = tmp - tmp.transpose(0,1,3,2)\n #:t2 += tmp - tmp.transpose(1,0,2,3)\n t2aa += numpy.einsum('ia,jb->ijab', c1a, fova)\n t2bb += numpy.einsum('ia,jb->ijab', c1b, fovb)\n t2ab += numpy.einsum('ia,jb->ijab', c1a, fovb)\n t2ab += numpy.einsum('ia,jb->jiba', c1b, fova)\n\n t2aa = t2aa - t2aa.transpose(0,1,3,2)\n t2aa = t2aa - t2aa.transpose(1,0,2,3)\n t2bb = t2bb - t2bb.transpose(0,1,3,2)\n t2bb = t2bb - t2bb.transpose(1,0,2,3)\n\n #:t2 += 0.5*einsum('mnab,mnij->ijab', c2, eris.oooo)\n eris_oooo = _cp(eris.oooo)\n eris_OOOO = _cp(eris.OOOO)\n eris_ooOO = _cp(eris.ooOO)\n t2aa += lib.einsum('mnab,minj->ijab', c2aa, eris_oooo)\n t2bb += lib.einsum('mnab,minj->ijab', c2bb, eris_OOOO)\n t2ab += lib.einsum('mNaB,miNJ->iJaB', c2ab, eris_ooOO)\n\n #:t2 += 0.5*einsum('ijef,abef->ijab', c2, eris.vvvv)\n #:eris_vvvv = ao2mo.restore(1, eris.vvvv, nvira)\n #:eris_vvVV = ucisd_slow._restore(eris.vvVV, nvira, nvirb)\n #:eris_VVVV = ao2mo.restore(1, eris.VVVV, nvirb)\n #:t2aa += lib.einsum('ijef,aebf->ijab', c2aa, eris_vvvv)\n #:t2bb += lib.einsum('ijef,aebf->ijab', c2bb, eris_VVVV)\n #:t2ab += lib.einsum('iJeF,aeBF->iJaB', c2ab, eris_vvVV)\n uccsd._add_vvvv_(myci, (c2aa,c2ab,c2bb), eris, (t2aa,t2ab,t2bb))\n\n #:t1 += fov * c0\n t1a += fova * c0\n t1b += fovb * c0\n #:t0 = numpy.einsum('ia,ia', fov, c1)\n t0 += numpy.einsum('ia,ia', fova, c1a)\n t0 += numpy.einsum('ia,ia', fovb, c1b)\n return amplitudes_to_cisdvec(t0, (t1a,t1b), (t2aa,t2ab,t2bb))\n\ndef amplitudes_to_cisdvec(c0, c1, c2):\n c1a, c1b = c1\n c2aa, c2ab, c2bb = c2\n nocca, nvira = c1a.shape\n noccb, nvirb = c1b.shape\n def trilidx(n):\n idx = numpy.tril_indices(n, -1)\n return idx[0] * n + idx[1]\n ooidxa = trilidx(nocca)\n vvidxa = trilidx(nvira)\n ooidxb = trilidx(noccb)\n vvidxb = trilidx(nvirb)\n size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb,\n len(ooidxa)*len(vvidxa), len(ooidxb)*len(vvidxb))\n loc = numpy.cumsum(size)\n civec = numpy.empty(loc[-1])\n civec[0] = c0\n civec[loc[0]:loc[1]] = c1a.ravel()\n civec[loc[1]:loc[2]] = c1b.ravel()\n civec[loc[2]:loc[3]] = c2ab.ravel()\n lib.take_2d(c2aa.reshape(nocca**2,nvira**2), ooidxa, vvidxa, out=civec[loc[3]:loc[4]])\n lib.take_2d(c2bb.reshape(noccb**2,nvirb**2), ooidxb, vvidxb, out=civec[loc[4]:loc[5]])\n return civec\n\ndef cisdvec_to_amplitudes(civec, nmoa_nmob, nocca_noccb):\n norba, norbb = nmoa_nmob\n nocca, noccb = nocca_noccb\n nvira = norba - nocca\n nvirb = norbb - noccb\n nooa = nocca * (nocca-1) // 2\n nvva = nvira * (nvira-1) // 2\n noob = noccb * (noccb-1) // 2\n nvvb = nvirb * (nvirb-1) // 2\n size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb,\n nooa*nvva, noob*nvvb)\n loc = numpy.cumsum(size)\n c0 = civec[0]\n c1a = civec[loc[0]:loc[1]].reshape(nocca,nvira)\n c1b = civec[loc[1]:loc[2]].reshape(noccb,nvirb)\n c2ab = civec[loc[2]:loc[3]].reshape(nocca,noccb,nvira,nvirb)\n c2aa = _unpack_4fold(civec[loc[3]:loc[4]], nocca, nvira)\n c2bb = _unpack_4fold(civec[loc[4]:loc[5]], noccb, nvirb)\n return c0, (c1a,c1b), (c2aa,c2ab,c2bb)\n\n\nclass UCISD(cisd.CISD):\n\n @property\n def nocc(self):\n nocca, noccb = self.get_nocc()\n return nocca + noccb\n\n @property\n def nmo(self):\n nmoa, nmob = self.get_nmo()\n return nmoa + nmob\n\n get_nocc = uccsd.get_nocc\n get_nmo = uccsd.get_nmo\n\n def kernel(self, ci0=None, mo_coeff=None, eris=None):\n if eris is None:\n eris = self.ao2mo(mo_coeff)\n self.converged, self.e_corr, self.ci = \\\n kernel(self, eris, ci0, max_cycle=self.max_cycle,\n tol=self.conv_tol, verbose=self.verbose)\n if numpy.all(self.converged):\n logger.info(self, 'UCISD converged')\n else:\n logger.info(self, 'UCISD not converged')\n if self.nroots > 1:\n for i,e in enumerate(self.e_tot):\n logger.note(self, 'UCISD root %d E = %.16g', i, e)\n else:\n logger.note(self, 'E(UCISD) = %.16g E_corr = %.16g',\n self.e_tot, self.e_corr)\n return self.e_corr, self.ci\n\n def get_init_guess(self, eris=None):\n if eris is None:\n eris = self.ao2mo(self.mo_coeff)\n nocca = eris.nocca\n noccb = eris.noccb\n mo_ea = eris.focka.diagonal()\n mo_eb = eris.fockb.diagonal()\n eia_a = mo_ea[:nocca,None] - mo_ea[None,nocca:]\n eia_b = mo_eb[:noccb,None] - mo_eb[None,noccb:]\n t1a = eris.focka[:nocca,nocca:] / eia_a\n t1b = eris.fockb[:noccb,noccb:] / eia_b\n\n eris_voov = _cp(eris.voov)\n eris_voOV = _cp(eris.voOV)\n eris_VOOV = _cp(eris.VOOV)\n t2aa = eris_voov.transpose(1,2,0,3) - eris_voov.transpose(2,1,0,3)\n t2bb = eris_VOOV.transpose(1,2,0,3) - eris_VOOV.transpose(2,1,0,3)\n t2ab = eris_voOV.transpose(1,2,0,3).copy()\n t2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)\n t2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)\n t2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)\n\n emp2 = numpy.einsum('ia,ia', eris.focka[:nocca,nocca:], t1a)\n emp2 += numpy.einsum('ia,ia', eris.fockb[:noccb,noccb:], t1b)\n emp2 += numpy.einsum('aijb,ijab', eris_voov, t2aa) * .25\n emp2 -= numpy.einsum('ajib,ijab', eris_voov, t2aa) * .25\n emp2 += numpy.einsum('aijb,ijab', eris_VOOV, t2bb) * .25\n emp2 -= numpy.einsum('ajib,ijab', eris_VOOV, t2bb) * .25\n emp2 += numpy.einsum('aijb,ijab', eris_voOV, t2ab)\n self.emp2 = emp2\n logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)\n return self.emp2, amplitudes_to_cisdvec(1, (t1a,t1b), (t2aa,t2ab,t2bb))\n\n contract = contract\n make_diagonal = make_diagonal\n\n def ao2mo(self, mo_coeff=None):\n nocc = self.nocc\n nvir = self.nmo - nocc\n mem_incore, mem_outcore, mem_basic = _mem_usage(nocc, nvir)\n mem_now = lib.current_memory()[0]\n if (self._scf._eri is not None and\n (mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):\n return _make_eris_incore(self, mo_coeff)\n\n elif hasattr(self._scf, 'with_df'):\n raise NotImplementedError\n\n else:\n return _make_eris_outcore(self, mo_coeff)\n\n def to_fci(self, cisdvec, nmoa_nmob=None, nocca_noccb=None):\n return to_fci(cisdvec, nmoa_nmob, nocca_noccb)\n\n def from_fci(self, fcivec, nmoa_nmob=None, nocca_noccb=None):\n return from_fci(fcivec, nmoa_nmob, nocca_noccb)\n\n def make_rdm1(self, ci=None, nmoa_nmob=None, nocca_noccb=None):\n if ci is None: ci = self.ci\n if nmoa_nmob is None: nmoa_nmob = self.get_nmo()\n if nocca_noccb is None: nocca_noccb = self.get_nocc()\n return make_rdm1(ci, nmoa_nmob, nocca_noccb)\n\n def make_rdm2(self, ci=None, nmoa_nmob=None, nocca_noccb=None):\n if ci is None: ci = self.ci\n if nmoa_nmob is None: nmoa_nmob = self.get_nmo()\n if nocca_noccb is None: nocca_noccb = self.get_nocc()\n return make_rdm2(ci, nmoa_nmob, nocca_noccb)\n\nCISD = UCISD\n\n\nclass _UCISD_ERIs:\n def __init__(self, myci, mo_coeff=None):\n moidx = uccsd.get_umoidx(myci)\n if mo_coeff is None:\n mo_coeff = (myci.mo_coeff[0][:,moidx[0]], myci.mo_coeff[1][:,moidx[1]])\n else:\n mo_coeff = (mo_coeff[0][:,moidx[0]], mo_coeff[1][:,moidx[1]])\n# Note: Always recompute the fock matrix in UCISD because the mf object may be\n# converted from ROHF object in which orbital energies are eigenvalues of\n# Roothaan Fock rather than the true alpha, beta orbital energies. \n dm = myci._scf.make_rdm1(myci.mo_coeff, myci.mo_occ)\n fockao = myci._scf.get_hcore() + myci._scf.get_veff(myci.mol, dm)\n self.focka = reduce(numpy.dot, (mo_coeff[0].T, fockao[0], mo_coeff[0]))\n self.fockb = reduce(numpy.dot, (mo_coeff[1].T, fockao[1], mo_coeff[1]))\n self.mo_coeff = mo_coeff\n self.nocca, self.noccb = myci.get_nocc()\n\n self.oooo = None\n self.vooo = None\n self.voov = None\n self.vvoo = None\n self.vovv = None\n self.vvvv = None\n\n self.OOOO = None\n self.VOOO = None\n self.VOOV = None\n self.VVOO = None\n self.VOVV = None\n self.VVVV = None\n\n self.ooOO = None\n self.voOO = None\n self.voOV = None\n self.vvOO = None\n self.voVV = None\n self.vvVV = None\n\n self.VOoo = None\n self.VVoo = None\n self.VOvv = None\n\ndef _make_eris_incore(myci, mo_coeff=None):\n eris = _UCISD_ERIs(myci, mo_coeff)\n nocca = eris.nocca\n noccb = eris.noccb\n nmoa = eris.focka.shape[0]\n nmob = eris.fockb.shape[0]\n nvira = nmoa - nocca\n nvirb = nmob - noccb\n moa, mob = eris.mo_coeff\n\n eri_aa = ao2mo.restore(1, ao2mo.full(myci._scf._eri, moa), nmoa)\n eris.oooo = eri_aa[:nocca,:nocca,:nocca,:nocca].copy()\n eris.vooo = eri_aa[nocca:,:nocca,:nocca,:nocca].copy()\n eris.voov = eri_aa[nocca:,:nocca,:nocca,nocca:].copy()\n eris.vvoo = eri_aa[nocca:,nocca:,:nocca,:nocca].copy()\n vovv = eri_aa[nocca:,:nocca,nocca:,nocca:].reshape(-1,nvira,nvira)\n eris.vovv = lib.pack_tril(vovv).reshape(nvira,nocca,nvira*(nvira+1)//2)\n eris.vvvv = ao2mo.restore(4, eri_aa[nocca:,nocca:,nocca:,nocca:].copy(), nvira)\n vovv = eri_aa = None\n\n eri_bb = ao2mo.restore(1, ao2mo.full(myci._scf._eri, mob), nmob)\n eris.OOOO = eri_bb[:noccb,:noccb,:noccb,:noccb].copy()\n eris.VOOO = eri_bb[noccb:,:noccb,:noccb,:noccb].copy()\n eris.VOOV = eri_bb[noccb:,:noccb,:noccb,noccb:].copy()\n eris.VVOO = eri_bb[noccb:,noccb:,:noccb,:noccb].copy()\n VOVV = eri_bb[noccb:,:noccb,noccb:,noccb:].reshape(-1,nvirb,nvirb)\n eris.VOVV = lib.pack_tril(VOVV).reshape(nvirb,noccb,nvirb*(nvirb+1)//2)\n eris.VVVV = ao2mo.restore(4, eri_bb[noccb:,noccb:,noccb:,noccb:].copy(), nvirb)\n VOVV = eri_bb = None\n\n eri_ab = ao2mo.general(myci._scf._eri, (moa,moa,mob,mob), compact=False)\n eri_ab = eri_ab.reshape(nmoa,nmoa,nmob,nmob)\n eris.ooOO = eri_ab[:nocca,:nocca,:noccb,:noccb].copy()\n eris.voOO = eri_ab[nocca:,:nocca,:noccb,:noccb].copy()\n eris.voOV = eri_ab[nocca:,:nocca,:noccb,noccb:].copy()\n eris.vvOO = eri_ab[nocca:,nocca:,:noccb,:noccb].copy()\n voVV = eri_ab[nocca:,:nocca,noccb:,noccb:].reshape(nocca*nvira,nvirb,nvirb)\n eris.voVV = lib.pack_tril(voVV).reshape(nvira,nocca,nvirb*(nvirb+1)//2)\n voVV = None\n vvVV = eri_ab[nocca:,nocca:,noccb:,noccb:].reshape(nvira**2,nvirb**2)\n idxa = numpy.tril_indices(nvira)\n idxb = numpy.tril_indices(nvirb)\n eris.vvVV = lib.take_2d(vvVV, idxa[0]*nvira+idxa[1], idxb[0]*nvirb+idxb[1])\n\n eri_ba = lib.transpose(eri_ab.reshape(nmoa**2,nmob**2))\n eri_ba = eri_ba.reshape(nmob,nmob,nmoa,nmoa)\n eris.VOoo = eri_ba[noccb:,:noccb,:nocca,:nocca].copy()\n eris.VVoo = eri_ba[noccb:,noccb:,:nocca,:nocca].copy()\n VOvv = eri_ba[noccb:,:noccb,nocca:,nocca:].reshape(noccb*nvirb,nvira,nvira)\n eris.VOvv = lib.pack_tril(VOvv).reshape(nvirb,noccb,nvira*(nvira+1)//2)\n VOvv = None\n eris.VVvv = eri_ba[noccb:,noccb:,nocca:,nocca:].copy() #X\n return eris\n\ndef _make_eris_outcore(myci, mo_coeff=None):\n cput0 = (time.clock(), time.time())\n log = logger.Logger(myci.stdout, myci.verbose)\n eris = _UCISD_ERIs(myci, mo_coeff)\n\n nocca = eris.nocca\n noccb = eris.noccb\n nmoa = eris.focka.shape[0]\n nmob = eris.fockb.shape[0]\n nvira = nmoa - nocca\n nvirb = nmob - noccb\n moa, mob = eris.mo_coeff\n mol = myci.mol\n\n eris.feri = lib.H5TmpFile()\n dtype = 'f8'\n eris.oooo = eris.feri.create_dataset('oooo', (nocca,nocca,nocca,nocca), dtype)\n eris.vooo = eris.feri.create_dataset('vooo', (nvira,nocca,nocca,nocca), dtype)\n eris.voov = eris.feri.create_dataset('voov', (nvira,nocca,nocca,nvira), dtype)\n eris.vvoo = eris.feri.create_dataset('vvoo', (nvira,nvira,nocca,nocca), dtype)\n eris.vovv = eris.feri.create_dataset('vovv', (nvira,nocca,nvira*(nvira+1)//2), dtype)\n #eris.vvvv = eris.feri.create_dataset('vvvv', (nvira,nvira,nvira,nvira), dtype)\n eris.OOOO = eris.feri.create_dataset('OOOO', (noccb,noccb,noccb,noccb), dtype)\n eris.VOOO = eris.feri.create_dataset('VOOO', (nvirb,noccb,noccb,noccb), dtype)\n eris.VOOV = eris.feri.create_dataset('VOOV', (nvirb,noccb,noccb,nvirb), dtype)\n eris.VVOO = eris.feri.create_dataset('VVOO', (nvirb,nvirb,noccb,noccb), dtype)\n eris.VOVV = eris.feri.create_dataset('VOVV', (nvirb,noccb,nvirb*(nvirb+1)//2), dtype)\n #eris.VVVV = eris.feri.create_dataset('VVVV', (nvirb,nvirb,nvirb,nvirb), dtype)\n eris.ooOO = eris.feri.create_dataset('ooOO', (nocca,nocca,noccb,noccb), dtype)\n eris.voOO = eris.feri.create_dataset('voOO', (nvira,nocca,noccb,noccb), dtype)\n eris.voOV = eris.feri.create_dataset('voOV', (nvira,nocca,noccb,nvirb), dtype)\n eris.vvOO = eris.feri.create_dataset('vvOO', (nvira,nvira,noccb,noccb), dtype)\n eris.voVV = eris.feri.create_dataset('voVV', (nvira,nocca,nvirb*(nvirb+1)//2), dtype)\n #eris.vvVV = eris.feri.create_dataset('vvVV', (nvira,nvira,nvirb,nvirb), dtype)\n eris.VOoo = eris.feri.create_dataset('VOoo', (nvirb,noccb,nocca,nocca), dtype)\n eris.VVoo = eris.feri.create_dataset('VVoo', (nvirb,nvirb,nocca,nocca), dtype)\n eris.VOvv = eris.feri.create_dataset('VOvv', (nvirb,noccb,nvira*(nvira+1)//2), dtype)\n\n cput1 = time.clock(), time.time()\n # <ij||pq> = <ij|pq> - <ij|qp> = (ip|jq) - (iq|jp)\n tmpfile2 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)\n ao2mo.general(mol, (moa,moa[:,:nocca],moa,moa), tmpfile2.name, 'aa')\n with h5py.File(tmpfile2.name) as f:\n buf = lib.unpack_tril(f['aa'][:nocca*nocca])\n buf = buf.reshape(nocca,nocca,nmoa,nmoa)\n eris.oooo[:] = buf[:,:,:nocca,:nocca]\n oovv = buf[:,:,nocca:,nocca:].reshape(nocca**2,nvira**2)\n eris.vvoo[:] = lib.transpose(oovv).reshape(nvira,nvira,nocca,nocca)\n buf = oovv = None\n for i0, i1 in lib.prange(0, nvira, nocca):\n buf = lib.unpack_tril(f['aa'][(nocca+i0)*nocca:(nocca+i1)*nocca])\n eris.vovv[i0:i1] = lib.pack_tril(buf[:,nocca:,nocca:]).reshape(i1-i0,nocca,-1)\n buf = buf.reshape(i1-i0,nocca,nmoa,nmoa)\n eris.vooo[i0:i1] = buf[:,:nocca,:nocca,:nocca]\n eris.voov[i0:i1] = buf[:,:nocca,:nocca,nocca:]\n buf = None\n del(f['aa'])\n\n if noccb > 0:\n ao2mo.general(mol, (mob,mob[:,:noccb],mob,mob), tmpfile2.name, 'bb')\n with h5py.File(tmpfile2.name) as f:\n buf = lib.unpack_tril(f['bb'][:noccb*noccb])\n buf = buf.reshape(noccb,noccb,nmob,nmob)\n eris.OOOO[:] = buf[:,:,:noccb,:noccb]\n oovv = buf[:,:,noccb:,noccb:].reshape(noccb**2,nvirb**2)\n eris.VVOO[:] = lib.transpose(oovv).reshape(nvirb,nvirb,noccb,noccb)\n buf = oovv = None\n for i0, i1 in lib.prange(0, nvirb, noccb):\n buf = lib.unpack_tril(f['bb'][(noccb+i0)*noccb:(noccb+i1)*noccb])\n eris.VOVV[i0:i1] = lib.pack_tril(buf[:,noccb:,noccb:]).reshape(i1-i0,noccb,-1)\n buf = buf.reshape(i1-i0,noccb,nmob,nmob)\n eris.VOOO[i0:i1] = buf[:,:noccb,:noccb,:noccb]\n eris.VOOV[i0:i1] = buf[:,:noccb,:noccb,noccb:]\n buf = None\n del(f['bb'])\n\n ao2mo.general(mol, (moa,moa[:,:nocca],mob,mob), tmpfile2.name, 'ab')\n with h5py.File(tmpfile2.name) as f:\n buf = lib.unpack_tril(f['ab'][:nocca*nocca])\n buf = buf.reshape(nocca,nocca,nmob,nmob)\n eris.ooOO[:] = buf[:,:,:noccb,:noccb]\n oovv = buf[:,:,noccb:,noccb:].reshape(nocca**2,nvirb**2)\n eris.VVoo[:] = lib.transpose(oovv).reshape(nvirb,nvirb,nocca,nocca)\n buf = oovv = None\n for i0, i1 in lib.prange(0, nvira, nocca):\n buf = lib.unpack_tril(f['ab'][(nocca+i0)*nocca:(nocca+i1)*nocca])\n eris.voVV[i0:i1] = lib.pack_tril(buf[:,noccb:,noccb:]).reshape(i1-i0,nocca,-1)\n buf = buf.reshape(i1-i0,nocca,nmob,nmob)\n eris.voOO[i0:i1] = buf[:,:nocca,:noccb,:noccb]\n eris.voOV[i0:i1] = buf[:,:nocca,:noccb,noccb:]\n buf = None\n del(f['ab'])\n\n if noccb > 0:\n ao2mo.general(mol, (mob,mob[:,:noccb],moa,moa), tmpfile2.name, 'ba')\n with h5py.File(tmpfile2.name) as f:\n buf = lib.unpack_tril(f['ba'][:noccb*noccb])\n buf = buf.reshape(noccb,noccb,nmoa,nmoa)\n oovv = buf[:,:,nocca:,nocca:].reshape(noccb**2,nvira**2)\n eris.vvOO[:] = lib.transpose(oovv).reshape(nvira,nvira,noccb,noccb)\n buf = oovv = None\n for i0, i1 in lib.prange(0, nvirb, noccb):\n buf = lib.unpack_tril(f['ba'][(noccb+i0)*noccb:(noccb+i1)*noccb])\n eris.VOvv[i0:i1] = lib.pack_tril(buf[:,nocca:,nocca:]).reshape(i1-i0,noccb,-1)\n buf = buf.reshape(i1-i0,noccb,nmoa,nmoa)\n eris.VOoo[i0:i1] = buf[:,:noccb,:nocca,:nocca]\n buf = None\n del(f['ba'])\n\n cput1 = log.timer_debug1('transforming vopq', *cput1)\n\n orbva = moa[:,nocca:]\n orbvb = mob[:,noccb:]\n ao2mo.full(mol, orbva, eris.feri, dataname='vvvv')\n ao2mo.full(mol, orbvb, eris.feri, dataname='VVVV')\n ao2mo.general(mol, (orbva,orbva,orbvb,orbvb), eris.feri, dataname='vvVV')\n eris.vvvv = eris.feri['vvvv']\n eris.VVVV = eris.feri['VVVV']\n eris.vvVV = eris.feri['vvVV']\n\n cput1 = log.timer_debug1('transforming vvvv', *cput1)\n log.timer('CISD integral transformation', *cput0)\n return eris\n\n\ndef _cp(a):\n return numpy.array(a, copy=False, order='C')\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf import fci\n numpy.random.seed(12)\n nocc = 3\n nvir = 5\n nmo = nocc + nvir\n\n c1a = numpy.random.random((nocc,nvir))\n c1b = numpy.random.random((nocc,nvir))\n c2aa = numpy.random.random((nocc,nocc,nvir,nvir))\n c2bb = numpy.random.random((nocc,nocc,nvir,nvir))\n c2ab = numpy.random.random((nocc,nocc,nvir,nvir))\n c1 = (c1a, c1b)\n c2 = (c2aa, c2ab, c2bb)\n cisdvec = amplitudes_to_cisdvec(1., c1, c2)\n fcivec = to_fci(cisdvec, (nmo,nmo), (nocc,nocc))\n cisdvec1 = from_fci(fcivec, (nmo,nmo), (nocc,nocc))\n print(abs(cisdvec-cisdvec1).sum())\n ci1 = to_fci(cisdvec1, (nmo,nmo), (nocc,nocc))\n print(abs(fcivec-ci1).sum())\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.atom = [\n ['H', ( 1.,-1. , 0. )],\n ['H', ( 0.,-1. ,-1. )],\n ['H', ( 1.,-0.5 , 0. )],\n ['H', ( 0.,-1. , 1. )],\n ]\n mol.charge = -2\n mol.spin = 2\n mol.basis = '3-21g'\n mol.build()\n mf = scf.UHF(mol).run(conv_tol=1e-14)\n ehf0 = mf.e_tot - mol.energy_nuc()\n myci = CISD(mf)\n numpy.random.seed(10)\n mo = numpy.random.random(myci.mo_coeff.shape)\n\n eris0 = _make_eris_incore(myci, mo)\n eris1 = _make_eris_outcore(myci, mo)\n print('oooo', abs(eris0.oooo - eris1.oooo).max())\n print('vooo', abs(eris0.vooo - eris1.vooo).max())\n print('voov', abs(eris0.voov - eris1.voov).max())\n print('vvoo', abs(eris0.vvoo - eris1.vvoo).max())\n print('vovv', abs(eris0.vovv - eris1.vovv).max())\n print('vvvv', abs(eris0.vvvv - eris1.vvvv).max())\n\n print('OOOO', abs(eris0.OOOO - eris1.OOOO).max())\n print('VOOO', abs(eris0.VOOO - eris1.VOOO).max())\n print('VOOV', abs(eris0.VOOV - eris1.VOOV).max())\n print('VVOO', abs(eris0.VVOO - eris1.VVOO).max())\n print('VOVV', abs(eris0.VOVV - eris1.VOVV).max())\n print('VVVV', abs(eris0.VVVV - eris1.VVVV).max())\n\n print('ooOO', abs(eris0.ooOO - eris1.ooOO).max())\n print('voOO', abs(eris0.voOO - eris1.voOO).max())\n print('voOV', abs(eris0.voOV - eris1.voOV).max())\n print('vvOO', abs(eris0.vvOO - eris1.vvOO).max())\n print('voVV', abs(eris0.voVV - eris1.voVV).max())\n print('vvVV', abs(eris0.vvVV - eris1.vvVV).max())\n\n print('VOoo', abs(eris0.VOoo - eris1.VOoo).max())\n print('VVoo', abs(eris0.VVoo - eris1.VVoo).max())\n print('VOvv', abs(eris0.VOvv - eris1.VOvv).max())\n\n eris = myci.ao2mo(mo)\n print(lib.finger(myci.make_diagonal(eris)) - -838.45507742639279)\n\n numpy.random.seed(12)\n nocca, noccb = mol.nelec\n nmo = mf.mo_occ[0].size\n nvira = nmo - nocca\n nvirb = nmo - noccb\n c1a = .1 * numpy.random.random((nocca,nvira))\n c1b = .1 * numpy.random.random((noccb,nvirb))\n c2aa = .1 * numpy.random.random((nocca,nocca,nvira,nvira))\n c2bb = .1 * numpy.random.random((noccb,noccb,nvirb,nvirb))\n c2ab = .1 * numpy.random.random((nocca,noccb,nvira,nvirb))\n cisdvec = amplitudes_to_cisdvec(1., (c1a, c1b), (c2aa, c2ab, c2bb))\n\n hcisd0 = contract(myci, amplitudes_to_cisdvec(1., (c1a,c1b), (c2aa,c2ab,c2bb)), eris)\n# from pyscf.ci import gcisd_slow\n# res = cisdvec_to_amplitudes(hcisd0, nmoa_nmob, nocca_noccb)\n# res = (res[0],\n# uccsd.spatial2spin(res[1], eris.orbspin),\n# uccsd.spatial2spin(res[2], eris.orbspin))\n# print(lib.finger(gcisd_slow.amplitudes_to_cisdvec(*res)) - 187.10206473716548)\n print(lib.finger(hcisd0) - 466.56620234351681)\n eris = myci.ao2mo(mf.mo_coeff)\n hcisd0 = contract(myci, cisdvec, eris)\n eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])\n eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])\n eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],\n mf.mo_coeff[1], mf.mo_coeff[1]])\n h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))\n h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))\n h2e = fci.direct_uhf.absorb_h1e((h1a,h1b), (eri_aa,eri_ab,eri_bb),\n h1a.shape[0], mol.nelec, .5)\n nmo = (mf.mo_coeff[0].shape[1],mf.mo_coeff[1].shape[1])\n fcivec = to_fci(cisdvec, nmo, mol.nelec)\n hci1 = fci.direct_uhf.contract_2e(h2e, fcivec, h1a.shape[0], mol.nelec)\n hci1 -= ehf0 * fcivec\n hcisd1 = from_fci(hci1, nmo, mol.nelec)\n print(numpy.linalg.norm(hcisd1-hcisd0) / numpy.linalg.norm(hcisd0))\n\n ecisd = myci.kernel(eris=eris)[0]\n efci = fci.direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),\n h1a.shape[0], mol.nelec)[0]\n print(ecisd, ecisd - -0.037067274690894436, '> E(fci)', efci-ehf0)\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.atom = [\n ['H', ( 1.,-1. , 0. )],\n ['H', ( 0.,-1. ,-1. )],\n ['H', ( 1.,-0.5 , 0. )],\n ['H', ( 0.,-1. , 1. )],\n ]\n mol.charge = 2\n mol.spin = 2\n mol.basis = '6-31g'\n mol.build()\n mf = scf.UHF(mol).run(conv_tol=1e-14)\n ehf0 = mf.e_tot - mol.energy_nuc()\n myci = CISD(mf)\n eris = myci.ao2mo()\n ecisd = myci.kernel(eris=eris)[0]\n eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])\n eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])\n eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],\n mf.mo_coeff[1], mf.mo_coeff[1]])\n h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))\n h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))\n efci, fcivec = fci.direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),\n h1a.shape[0], mol.nelec)\n print(ecisd, '== E(fci)', efci-ehf0)\n dm1ref, dm2ref = fci.direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec)\n rdm1 = make_rdm1(myci.ci, myci.get_nmo(), myci.get_nocc())\n rdm2 = make_rdm2(myci.ci, myci.get_nmo(), myci.get_nocc())\n print('dm1a', abs(dm1ref[0] - rdm1[0]).max())\n print('dm1b', abs(dm1ref[1] - rdm1[1]).max())\n print('dm2aa', abs(dm2ref[0] - rdm2[0]).max())\n print('dm2ab', abs(dm2ref[1] - rdm2[1]).max())\n print('dm2bb', abs(dm2ref[2] - rdm2[2]).max())\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.atom = [\n ['O', ( 0., 0. , 0. )],\n ['H', ( 0., -0.757, 0.587)],\n ['H', ( 0., 0.757 , 0.587)],]\n mol.basis = {'H': 'sto-3g',\n 'O': 'sto-3g',}\n mol.build()\n mf = scf.UHF(mol).run(conv_tol=1e-14)\n myci = CISD(mf)\n eris = myci.ao2mo()\n ecisd, civec = myci.kernel(eris=eris)\n print(ecisd - -0.048878084082066106)\n\n nmoa = mf.mo_energy[0].size\n nmob = mf.mo_energy[1].size\n rdm1 = myci.make_rdm1(civec)\n rdm2 = myci.make_rdm2(civec)\n eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0], compact=False).reshape([nmoa]*4)\n eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1], compact=False).reshape([nmob]*4)\n eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],\n mf.mo_coeff[1], mf.mo_coeff[1]], compact=False)\n eri_ab = eri_ab.reshape(nmoa,nmoa,nmob,nmob)\n h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))\n h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))\n e2 = (numpy.einsum('ij,ji', h1a, rdm1[0]) +\n numpy.einsum('ij,ji', h1b, rdm1[1]) +\n numpy.einsum('ijkl,ijkl', eri_aa, rdm2[0]) * .5 +\n numpy.einsum('ijkl,ijkl', eri_ab, rdm2[1]) +\n numpy.einsum('ijkl,ijkl', eri_bb, rdm2[2]) * .5)\n print(ecisd + mf.e_tot - mol.energy_nuc() - e2) # = 0\n\n print(abs(rdm1[0] - (numpy.einsum('ijkk->ij', rdm2[0]) +\n numpy.einsum('ijkk->ij', rdm2[1]))/(mol.nelectron-1)).sum())\n print(abs(rdm1[1] - (numpy.einsum('ijkk->ij', rdm2[2]) +\n numpy.einsum('kkij->ij', rdm2[1]))/(mol.nelectron-1)).sum())\n\n",
"#!/usr/bin/env python\nimport unittest\nimport numpy\n\nfrom pyscf import gto, lib\nfrom pyscf import scf\nfrom pyscf import cc\n\nmol = gto.Mole()\nmol.verbose = 7\nmol.output = '/dev/null'\nmol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)]]\n\nmol.basis = {'H': 'cc-pvdz',\n 'O': 'cc-pvdz',}\nmol.build()\nmf = scf.RHF(mol)\nmf.conv_tol_grad = 1e-8\nehf = mf.kernel()\n\n\nclass KnowValues(unittest.TestCase):\n def test_ccsd(self):\n mcc = cc.ccsd.CC(mf)\n mcc.conv_tol = 1e-9\n mcc.conv_tol_normt = 1e-7\n eris = mcc.ao2mo()\n emp2, t1, t2 = mcc.init_amps(eris)\n self.assertAlmostEqual(abs(t2).sum(), 4.9556571211255909, 6)\n self.assertAlmostEqual(emp2, -0.2040199672883385, 10)\n t1, t2 = cc.ccsd.update_amps(mcc, t1, t2, eris)\n self.assertAlmostEqual(abs(t1).sum(), 0.0475038989126, 8)\n self.assertAlmostEqual(abs(t2).sum(), 5.4018238455030, 6)\n self.assertAlmostEqual(cc.ccsd.energy(mcc, t1, t2, eris),\n -0.208967840546667, 10)\n t1, t2 = cc.ccsd.update_amps(mcc, t1, t2, eris)\n self.assertAlmostEqual(cc.ccsd.energy(mcc, t1, t2, eris),\n -0.212173678670510, 10)\n self.assertAlmostEqual(abs(t1).sum(), 0.05470123093500083, 8)\n self.assertAlmostEqual(abs(t2).sum(), 5.5605208386554716, 6)\n\n mcc.kernel()\n self.assertTrue(numpy.allclose(mcc.t2,mcc.t2.transpose(1,0,3,2)))\n self.assertAlmostEqual(mcc.ecc, -0.2133432312951, 8)\n self.assertAlmostEqual(abs(mcc.t2).sum(), 5.63970279799556984, 6)\n\n nocc, nvir = t1.shape\n tau = t2 + numpy.einsum('ia,jb->ijab', t1, t1)\n ovvv = lib.unpack_tril(eris.ovvv).reshape(nocc,nvir,nvir,nvir)\n tmp = -numpy.einsum('ijcd,ka,kdcb->ijba', tau, t1, ovvv)\n t2a = tmp + tmp.transpose(1,0,3,2)\n t2a = t2a[numpy.tril_indices(nocc)]\n t2a += mcc.add_wvvVV(t1, t2, eris)\n mcc.direct = True\n t2b = mcc.add_wvvVV(t1, t2, eris)\n self.assertTrue(numpy.allclose(t2a,t2b))\n\n def test_ccsd_frozen(self):\n mcc = cc.ccsd.CC(mf, frozen=range(1))\n mcc.conv_tol = 1e-10\n mcc.kernel()\n self.assertAlmostEqual(mcc.ecc, -0.21124878189922872, 8)\n self.assertAlmostEqual(abs(mcc.t2).sum(), 5.4996425901189347, 6)\n\n def test_ccsd_cart(self):\n pmol = mol.copy()\n pmol.cart = True\n pmol.build()\n mf = scf.RHF(pmol).set(conv_tol_grad=1e-8).run()\n mcc = cc.ccsd.CC(mf, frozen=range(1))\n mcc.conv_tol = 1e-10\n mcc.kernel()\n self.assertAlmostEqual(mcc.ecc, -0.21303885376969361, 8)\n\n def test_h2o_non_hf_orbital(self):\n nmo = mf.mo_energy.size\n nocc = mol.nelectron // 2\n nvir = nmo - nocc\n u = numpy.eye(nmo)\n numpy.random.seed(1)\n u[:nocc,:nocc] = numpy.linalg.svd(numpy.random.random((nocc,nocc)))[0]\n u[nocc:,nocc:] = numpy.linalg.svd(numpy.random.random((nvir,nvir)))[0]\n mycc = cc.ccsd.CCSD(mf)\n mycc.conv_tol = 1e-12\n mycc.diis_start_energy_diff = 1e2\n mycc.max_cycle = 1000\n mycc.mo_coeff = mo_coeff=numpy.dot(mf.mo_coeff,u)\n self.assertAlmostEqual(mycc.kernel()[0], -0.21334323320620596, 8)\n\n## FIXME\n# def test_h2o_without_scf(self):\n# mycc = cc.ccsd.CCSD(mf)\n# nmo = mf.mo_energy.size\n# nocc = mol.nelectron // 2\n# nvir = nmo - nocc\n# numpy.random.seed(1)\n# u = numpy.eye(nmo) + numpy.random.random((nmo,nmo))*.2\n# u, w, vh = numpy.linalg.svd(u)\n# u = numpy.dot(u, vh)\n#\n# mo1 = numpy.dot(mf.mo_coeff, u)\n# dm1 = mf.make_rdm1(mo1, mf.mo_occ)\n#\n# mycc.diis_start_energy_diff = 1e2\n# mycc.max_cycle = 1000\n# mycc.conv_tol = 1e-12\n# mycc.mo_coeff = mo1\n# self.assertAlmostEqual(mf.energy_tot(dm1)+mycc.kernel()[0],\n# ehf-0.21334323320620596, 8)\n\n def test_ccsd_lambda(self):\n mcc = cc.ccsd.CC(mf)\n mcc.conv_tol = 1e-9\n mcc.conv_tol_normt = 1e-7\n mcc.kernel()\n mcc.solve_lambda()\n self.assertAlmostEqual(numpy.linalg.norm(mcc.l1), 0.01326267012100099, 7)\n self.assertAlmostEqual(numpy.linalg.norm(mcc.l2), 0.21257559872380857, 7)\n\n def test_ccsd_rdm(self):\n mcc = cc.ccsd.CC(mf)\n mcc.conv_tol = 1e-9\n mcc.conv_tol_normt = 1e-7\n mcc.kernel()\n mcc.solve_lambda()\n dm1 = mcc.make_rdm1()\n dm2 = mcc.make_rdm2()\n self.assertAlmostEqual(numpy.linalg.norm(dm1), 4.4227836730016374, 7)\n self.assertAlmostEqual(numpy.linalg.norm(dm2), 20.074629443311355, 7)\n\n def test_scanner(self):\n mol1 = mol.copy()\n mol1.set_geom_('''\n O 0. 0. .1\n H 0. -0.757 0.587\n H 0. 0.757 0.587''')\n cc_scanner = scf.RHF(mol).apply(cc.CCSD).as_scanner()\n self.assertAlmostEqual(cc_scanner(mol), -76.240108935038691, 7)\n self.assertAlmostEqual(cc_scanner(mol1), -76.228972886940639, 7)\n\n def test_init(self):\n from pyscf.cc import ccsd\n from pyscf.cc import uccsd\n from pyscf.cc import dfccsd\n self.assertTrue(isinstance(cc.CCSD(mf), ccsd.CCSD))\n self.assertTrue(isinstance(cc.CCSD(mf.density_fit()), dfccsd.RCCSD))\n self.assertTrue(isinstance(cc.CCSD(mf.newton()), ccsd.CCSD))\n self.assertTrue(isinstance(cc.CCSD(mf.density_fit().newton()), dfccsd.RCCSD))\n self.assertTrue(isinstance(cc.CCSD(mf.newton().density_fit()), ccsd.CCSD))\n self.assertTrue(not isinstance(cc.CCSD(mf.newton().density_fit()), dfccsd.RCCSD))\n self.assertTrue(isinstance(cc.CCSD(mf.density_fit().newton().density_fit()), dfccsd.RCCSD))\n\n self.assertTrue(isinstance(cc.UCCSD(mf), uccsd.UCCSD))\n# self.assertTrue(isinstance(cc.UCCSD(mf.density_fit()), dfccsd.UCCSD))\n self.assertTrue(isinstance(cc.UCCSD(mf.newton()), uccsd.UCCSD))\n# self.assertTrue(isinstance(cc.UCCSD(mf.density_fit().newton()), dfccsd.UCCSD))\n self.assertTrue(isinstance(cc.UCCSD(mf.newton().density_fit()), uccsd.UCCSD))\n# self.assertTrue(not isinstance(cc.UCCSD(mf.newton().density_fit()), dfccsd.UCCSD))\n# self.assertTrue(isinstance(cc.UCCSD(mf.density_fit().newton().density_fit()), dfccsd.UCCSD))\n\n umf = scf.convert_to_uhf(mf, scf.UHF(mol))\n self.assertTrue(isinstance(cc.CCSD(umf), uccsd.UCCSD))\n# self.assertTrue(isinstance(cc.CCSD(umf.density_fit()), dfccsd.UCCSD))\n self.assertTrue(isinstance(cc.CCSD(umf.newton()), uccsd.UCCSD))\n# self.assertTrue(isinstance(cc.CCSD(umf.density_fit().newton()), dfccsd.UCCSD))\n self.assertTrue(isinstance(cc.CCSD(umf.newton().density_fit()), uccsd.UCCSD))\n# self.assertTrue(not isinstance(cc.CCSD(umf.newton().density_fit()), dfccsd.UCCSD))\n# self.assertTrue(isinstance(cc.CCSD(umf.density_fit().newton().density_fit()), dfccsd.UCCSD))\n\nif __name__ == \"__main__\":\n print(\"Full Tests for H2O\")\n unittest.main()\n\n",
"#!/usr/bin/env python\n\nfrom functools import reduce\nimport unittest\nimport numpy\nfrom pyscf import gto\nfrom pyscf import symm\n\nh2o = gto.Mole()\nh2o.verbose = 0\nh2o.output = None#\"out_h2o\"\nh2o.atom.extend([\n [\"O\" , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)] ])\n\nh2o.basis = {\"H\": 'cc-pVDZ',\n \"O\": 'cc-pVDZ',}\nh2o.build()\n\n\nclass KnowValues(unittest.TestCase):\n def test_real2spinor(self):\n s0 = h2o.intor('int1e_ovlp_sph')\n s1 = h2o.intor('int1e_ovlp_spinor')\n\n ua, ub = symm.cg.real2spinor_whole(h2o)\n\n s2 = reduce(numpy.dot, (ua.T.conj(), s0, ua)) \\\n + reduce(numpy.dot, (ub.T.conj(), s0, ub))\n self.assertTrue(numpy.allclose(s2,s1))\n\n\nif __name__ == \"__main__\":\n print(\"Full Tests geom\")\n unittest.main()\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nGeneral JK contraction function for\n* arbitrary integrals\n* 4 different molecules\n* multiple density matrices\n* arbitrary basis subset for the 4 indices\n'''\n\nimport time\nimport numpy\nimport pyscf.lib\nfrom pyscf import gto\nfrom pyscf.lib import logger\nfrom pyscf.scf import _vhf\n\n\ndef get_jk(mols, dms, scripts=['ijkl,ji->kl'], intor='int2e_sph',\n aosym='s1', comp=1, hermi=0, shls_slice=None, verbose=logger.WARN):\n '''Compute J/K matrices for the given density matrix\n\n Args:\n mols : an instance of :class:`Mole` or a list of `Mole` objects\n\n dms : ndarray or list of ndarrays\n A density matrix or a list of density matrices\n\n Kwargs:\n hermi : int\n Whether J/K matrix is hermitian\n\n | 0 : no hermitian or symmetric\n | 1 : hermitian\n | 2 : anti-hermitian\n\n intor : str\n 2-electron integral name. See :func:`getints` for the complete\n list of available 2-electron integral names\n aosym : int or str\n Permutation symmetry for the AO integrals\n\n | 4 or '4' or 's4': 4-fold symmetry (default)\n | '2ij' or 's2ij' : symmetry between i, j in (ij|kl)\n | '2kl' or 's2kl' : symmetry between k, l in (ij|kl)\n | 1 or '1' or 's1': no symmetry\n | 'a4ij' : 4-fold symmetry with anti-symmetry between i, j in (ij|kl)\n | 'a4kl' : 4-fold symmetry with anti-symmetry between k, l in (ij|kl)\n | 'a2ij' : anti-symmetry between i, j in (ij|kl)\n | 'a2kl' : anti-symmetry between k, l in (ij|kl)\n\n comp : int\n Components of the integrals, e.g. cint2e_ip_sph has 3 components.\n scripts : a list of strings\n Contraction description (following numpy.einsum convention) based on\n letters [ijkl]. Each script will be one-to-one applied to each\n entry of dms. So it must have the same number of elements as the\n dms, len(scripts) == len(dms).\n shls_slice : 8-element list\n (ish_start, ish_end, jsh_start, jsh_end, ksh_start, ksh_end, lsh_start, lsh_end)\n\n Returns:\n Depending on the number of density matrices, the function returns one\n J/K matrix or a list of J/K matrices (the same number of entries as the\n input dms).\n Each JK matrices may be a 2D array or 3D array if the AO integral\n has multiple components.\n\n Examples:\n\n >>> from pyscf import gto\n >>> mol = gto.M(atom='H 0 -.5 0; H 0 .5 0', basis='cc-pvdz')\n >>> nao = mol.nao_nr()\n >>> dm = numpy.random.random((nao,nao))\n >>> # Default, Coulomb matrix\n >>> vj = get_jk(mol, dm)\n >>> # Coulomb matrix with 8-fold permutation symmetry for AO integrals\n >>> vj = get_jk(mol, dm, 'ijkl,ji->kl', aosym='s8')\n >>> # Exchange matrix with 8-fold permutation symmetry for AO integrals\n >>> vk = get_jk(mol, dm, 'ijkl,jk->il', aosym='s8')\n >>> # Compute coulomb and exchange matrices together\n >>> vj, vk = get_jk(mol, (dm,dm), ('ijkl,ji->kl','ijkl,li->kj'), aosym='s8')\n >>> # Analytical gradients for coulomb matrix\n >>> j1 = get_jk(mol, dm, 'ijkl,lk->ij', intor='int2e_ip1_sph', aosym='s2kl', comp=3)\n\n >>> # contraction across two molecules\n >>> mol1 = gto.M(atom='He 2 0 0', basis='6-31g')\n >>> nao1 = mol1.nao_nr()\n >>> dm1 = numpy.random.random((nao1,nao1))\n >>> # Coulomb interaction between two molecules, note 4-fold symmetry can be applied\n >>> jcross = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', aosym='s4')\n >>> ecoul = numpy.einsum('ij,ij', jcross, dm1)\n >>> # Exchange interaction between two molecules, no symmetry can be used\n >>> kcross = get_jk((mol1,mol,mol,mol1), dm, scripts='ijkl,jk->il')\n >>> ex = numpy.einsum('ij,ji', kcross, dm1)\n\n >>> # Analytical gradients for coulomb matrix between two molecules\n >>> jcros1 = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3)\n >>> # Analytical gradients for coulomb interaction between 1s density and the other molecule\n >>> jpart1 = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3,\n ... shls_slice=(0,1,0,1,0,mol.nbas,0,mol.nbas))\n '''\n if isinstance(mols, (tuple, list)):\n assert(len(mols) == 4)\n assert(mols[0].cart == mols[1].cart == mols[2].cart == mols[3].cart)\n if shls_slice is None:\n shls_slice = numpy.array([(0, mol.nbas) for mol in mols])\n else:\n shls_slice = numpy.asarray(shls_slice).reshape(4,2)\n# concatenate unique mols and build corresponding shls_slice\n mol_ids = [id(mol) for mol in mols]\n atm, bas, env = mols[0]._atm, mols[0]._bas, mols[0]._env\n bas_start = numpy.zeros(4, dtype=int)\n for m in range(1,4):\n first = mol_ids.index(mol_ids[m])\n if first == m: # the unique mol\n bas_start[m] = bas.shape[0]\n atm, bas, env = gto.conc_env(atm, bas, env, mols[m]._atm,\n mols[m]._bas, mols[m]._env)\n else:\n bas_start[m] = bas_start[first]\n shls_slice[m] += bas_start[m]\n shls_slice = shls_slice.flatten()\n else:\n atm, bas, env = mols._atm, mols._bas, mols._env\n if shls_slice is None:\n shls_slice = (0, mols.nbas) * 4\n\n if isinstance(scripts, str):\n scripts = [scripts]\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n dms = [dms]\n assert(len(scripts) == len(dms))\n\n #format scripts\n descript = []\n for script in scripts:\n dmsym, vsym = script.lower().split(',')[1].split('->')\n if hermi == 0:\n descript.append('->'.join((dmsym,'s1'+vsym)))\n else:\n descript.append('->'.join((dmsym,'s2'+vsym)))\n\n vs = _vhf.direct_bindm(intor, aosym, descript, dms, comp, atm, bas, env,\n shls_slice=shls_slice)\n if hermi != 0:\n for v in vs:\n if v.ndim == 3:\n for vi in v:\n pyscf.lib.hermi_triu(vi, hermi, inplace=True)\n else:\n pyscf.lib.hermi_triu(v, hermi, inplace=True)\n return vs\n\njk_build = get_jk\n\n\nif __name__ == '__main__':\n mol = gto.M(atom='H 0 -.5 0; H 0 .5 0', basis='cc-pvdz')\n\n nao = mol.nao_nr()\n dm = numpy.random.random((nao,nao))\n eri0 = mol.intor('int2e_sph').reshape((nao,)*4)\n vj = get_jk(mol, dm, 'ijkl,ji->kl')\n print(numpy.allclose(vj, numpy.einsum('ijkl,ji->kl', eri0, dm)))\n vj = get_jk(mol, dm, 'ijkl,ji->kl', aosym='s8')\n print(numpy.allclose(vj, numpy.einsum('ijkl,ji->kl', eri0, dm)))\n vk = get_jk(mol, dm, 'ijkl,jk->il', aosym='s8')\n print(numpy.allclose(vk, numpy.einsum('ijkl,jk->il', eri0, dm)))\n vj, vk = get_jk(mol, (dm,dm), ('ijkl,ji->kl','ijkl,li->kj'))\n eri1 = mol.intor('int2e_ip1_sph', comp=3).reshape([3]+[nao]*4)\n j1 = get_jk(mol, dm, 'ijkl,lk->ij', intor='int2e_ip1_sph', aosym='s2kl', comp=3)\n print(numpy.allclose(j1, numpy.einsum('xijkl,lk->xij', eri1, dm)))\n\n mol1 = gto.M(atom='He 2 0 0', basis='6-31g')\n nao1 = mol1.nao_nr()\n dm1 = numpy.random.random((nao1,nao1))\n eri0 = gto.conc_mol(mol, mol1).intor('int2e_sph').reshape([nao+nao1]*4)\n jcross = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', aosym='s4')\n ecoul = numpy.einsum('ij,ij', jcross, dm1)\n print(numpy.allclose(jcross, numpy.einsum('ijkl,lk->ij', eri0[nao:,nao:,:nao,:nao], dm)))\n print(ecoul-numpy.einsum('ijkl,lk,ij', eri0[nao:,nao:,:nao,:nao], dm, dm1))\n kcross = get_jk((mol1,mol,mol,mol1), dm, scripts='ijkl,jk->il')\n ex = numpy.einsum('ij,ji', kcross, dm1)\n print(numpy.allclose(kcross, numpy.einsum('ijkl,jk->il', eri0[nao:,:nao,:nao,nao:], dm)))\n print(ex-numpy.einsum('ijkl,jk,li', eri0[nao:,:nao,:nao,nao:], dm, dm1))\n\n eri1 = gto.conc_mol(mol, mol1).intor('int2e_ip1_sph',comp=3).reshape([3]+[nao+nao1]*4)\n j1cross = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3)\n print(numpy.allclose(j1cross, numpy.einsum('xijkl,lk->xij', eri1[:,nao:,nao:,:nao,:nao], dm)))\n j1part = get_jk((mol1,mol1,mol,mol), dm, scripts='ijkl,lk->ij', intor='int2e_ip1_sph', comp=3,\n shls_slice=(0,1,0,1,0,mol.nbas,0,mol.nbas))\n print(numpy.allclose(j1part, numpy.einsum('xijkl,lk->xij', eri1[:,nao:nao+1,nao:nao+1,:nao,:nao], dm)))\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport time\nimport ctypes\nimport tempfile\nimport numpy\nimport h5py\nimport pyscf.lib as lib\nfrom pyscf.lib import logger\nimport pyscf.ao2mo\nfrom pyscf.cc import _ccsd\n\n# t2,l2 as ijab\n\ndef kernel(mycc, eris, t1=None, t2=None, l1=None, l2=None,\n max_cycle=50, tol=1e-8, verbose=logger.INFO):\n cput0 = (time.clock(), time.time())\n if isinstance(verbose, logger.Logger):\n log = verbose\n else:\n log = logger.Logger(mycc.stdout, verbose)\n\n if t1 is None: t1 = mycc.t1\n if t2 is None: t2 = mycc.t2\n if l1 is None: l1 = t1\n if l2 is None: l2 = t2\n\n nocc, nvir = t1.shape\n saved = make_intermediates(mycc, t1, t2, eris)\n\n if mycc.diis:\n adiis = lib.diis.DIIS(mycc, mycc.diis_file)\n adiis.space = mycc.diis_space\n else:\n adiis = lambda t1,t2,*args: (t1, t2)\n cput0 = log.timer('CCSD lambda initialization', *cput0)\n\n conv = False\n for istep in range(max_cycle):\n l1new, l2new = update_amps(mycc, t1, t2, l1, l2, eris, saved)\n normt = numpy.linalg.norm(l1new-l1) + numpy.linalg.norm(l2new-l2)\n l1, l2 = l1new, l2new\n l1new = l2new = None\n if mycc.diis:\n l1, l2 = mycc.diis(l1, l2, istep, normt, 0, adiis)\n log.info('cycle = %d norm(lambda1,lambda2) = %.6g', istep+1, normt)\n cput0 = log.timer('CCSD iter', *cput0)\n if normt < tol:\n conv = True\n break\n return conv, l1, l2\n\n\n# l2, t2 as ijab\ndef make_intermediates(mycc, t1, t2, eris):\n log = logger.Logger(mycc.stdout, mycc.verbose)\n nocc, nvir = t1.shape\n nov = nocc * nvir\n foo = eris.fock[:nocc,:nocc]\n fov = eris.fock[:nocc,nocc:]\n fvv = eris.fock[nocc:,nocc:]\n\n class _Saved(object):\n pass\n saved = _Saved()\n\n# As we don't have l2 in memory, hold tau temporarily in memory\n w1 = fvv - numpy.einsum('ja,jb->ba', fov, t1)\n w2 = foo + numpy.einsum('ib,jb->ij', fov, t1)\n w3 = _cp(numpy.einsum('kc,jkbc->bj', fov, t2) * 2 + fov.T)\n w3 -= numpy.einsum('kc,kjbc->bj', fov, t2)\n w3 += reduce(numpy.dot, (t1.T, fov, t1.T))\n w4 = fov.copy()\n\n eris_ovvv = _cp(eris.ovvv)\n eris_ovvv = lib.unpack_tril(eris_ovvv.reshape(nov,-1))\n eris_ovvv = eris_ovvv.reshape(nocc,nvir,nvir,nvir)\n\n wovvv = numpy.empty((nocc,nvir,nvir,nvir))\n t2tmp = numpy.empty((nocc,nvir,nocc,nvir))\n for i in range(nocc):\n wovvv[i] = eris_ovvv[i].transpose(1,0,2) * 2\n t2tmp[i] = t2[i].transpose(2,0,1)\n #:wovvv += numpy.einsum('jabd,kjdc->kabc', eris_ovvv, t2) * -1.5\n tmp = lib.dot(t2tmp.reshape(nov,-1), wovvv.reshape(-1,nvir**2),\n -1.5/2).reshape(-1,nvir,nvir,nvir)\n g2ovvv = tmp\n for i in range(nocc):\n wovvv[i] -= eris_ovvv[i].transpose(1,2,0)\n wovvv[i] += tmp[i].transpose(1,2,0)\n g2ovvv[i] = eris_ovvv[i]*2\n g2ovvv[i] -= eris_ovvv[i].transpose(1,2,0)\n tmp = t2tmp = None\n\n w1 += numpy.einsum('jcba,jc->ba', eris_ovvv, t1*2)\n w1 -= numpy.einsum('jabc,jc->ba', eris_ovvv, t1)\n #:w3 += numpy.einsum('kdcb,kjdc->bj', eris_ovvv, theta)\n theta = numpy.empty(t2.shape)\n for i in range(nocc):\n theta[i] = t2[i] * 2\n theta[i] -= t2[i].transpose(0,2,1)\n lib.dot(eris_ovvv[i].reshape(-1,nvir).T,\n _cp(theta[i].reshape(nocc,-1)).T, 1, w3, 1)\n\n theta = _cp(theta.transpose(0,2,1,3))\n #:vkbca = numpy.einsum('jdca,kbjd->kbca', g2ovvv, theta)\n vkbca = lib.dot(_cp(theta.reshape(nov,-1)),\n g2ovvv.reshape(-1,nvir*nvir)).reshape(-1,nvir,nvir,nvir)\n for i in range(nocc):\n wovvv[i] += vkbca[i].transpose(2,0,1)\n wovvv[i] -= vkbca[i].transpose(2,1,0) * .5\n vkabc = None\n\n #:wOVov = numpy.einsum('jbcd,kd->jbkc', eris_ovvv, t1)\n #:wOvOv = numpy.einsum('jdcb,kd->jbkc', eris_ovvv, -t1)\n wOVov = lib.dot(eris_ovvv.reshape(-1,nvir),\n t1.T).reshape(-1,nvir,nvir,nocc).transpose(0,1,3,2).copy()\n for i in range(nocc):\n g2ovvv[i] = eris_ovvv[i].transpose(1,2,0) * 2\n wOvOv = lib.dot(g2ovvv.reshape(-1,nvir),\n -t1.T, .5).reshape(-1,nvir,nvir,nocc).transpose(0,1,3,2).copy()\n for i in range(nocc):\n g2ovvv[i] -= eris_ovvv[i].transpose(1,0,2)\n eris_ovov = _cp(_cp(eris.ovov).transpose(0,2,1,3))\n tau = _ccsd.make_tau(t2, t1, t1)\n #:wooov[:,j0:j1] = numpy.einsum('icbd,jkbd->ijkc', g2ovvv, tau)\n #:woooo[:,:,j0:j1] = numpy.einsum('icjd,klcd->ijkl', eris_ovov, tau)\n tmp = lib.dot(g2ovvv.reshape(-1,nvir**2), tau.reshape(-1,nvir**2).T)\n wooov = _cp(tmp.reshape(-1,nvir,nocc,nocc).transpose(0,2,3,1))\n woooo = lib.dot(eris_ovov.reshape(-1,nvir**2),\n tau.reshape(-1,nvir**2).T).reshape(-1,nocc,nocc,nocc)\n eris_ovov = eris_ovvv = g2ovvv = tau = tmp = None\n\n eris_ooov = _cp(eris.ooov)\n eris_ovoo = _cp(eris.ovoo)\n #:woooo += numpy.einsum('icjl,kc->ijkl', eris_ovoo, t1)\n #:wOVov += numpy.einsum('jblk,lc->jbkc', eris_ovoo, -t1)\n for i in range(nocc):\n woooo[i] += lib.dot(t1, eris_ovoo[i].reshape(nvir,-1)).reshape((nocc,)*3).transpose(1,0,2)\n lib.dot(eris_ovoo.reshape(-1,nocc), t1, -1, wOVov.reshape(-1,nvir), 1)\n #:wooov -= numpy.einsum('ibjl,lkcb->ijkc', eris_ovoo*1.5, t2)\n t2tmp = numpy.empty((nocc,nvir,nocc,nvir))\n for i in range(nocc):\n t2tmp[i] = t2[i].transpose(2,0,1)\n tmp_ooov = _cp(-eris_ooov.transpose(2,0,1,3)).reshape(-1,nov)\n lib.dot(tmp_ooov, t2tmp.reshape(nov,-1), 1.5, wooov.reshape(-1,nov), 1)\n t2tmp = None\n\n g2ooov, tmp_ooov = tmp_ooov.reshape(nocc,nocc,nocc,nvir), None\n g2ooov += eris_ooov * 2\n #:vikjc = numpy.einsum('iklb,jlcb->ikjc', g2ooov, theta)\n vikjc = lib.dot(g2ooov.reshape(-1,nov), theta.reshape(-1,nov).T)\n vikjc = vikjc.reshape(nocc,nocc,nocc,nvir)\n wooov += vikjc.transpose(0,2,1,3)\n wooov -= vikjc*.5\n g2ooov = vikjc = eris_ovoo = None\n\n w2 += numpy.einsum('ijkb,kb->ij', eris_ooov, t1) * 2\n w2 -= numpy.einsum('kjib,kb->ij', eris_ooov, t1)\n #:w3 -= numpy.einsum('kjlc,klbc->bj', eris_ooov, theta)\n for i in range(nocc):\n lib.dot(_cp(theta[i].transpose(1,2,0)).reshape(-1,nvir).T,\n eris_ooov[i].reshape(nocc,-1).T, -1, w3, 1)\n #:woooo += numpy.einsum('ikjc,lc->ijkl', eris_ooov, t1)\n #:wOvOv += numpy.einsum('jklb,lc->jbkc', eris_ooov, t1)\n woooo += lib.dot(eris_ooov.reshape(-1,nvir),\n t1.T).reshape((-1,nocc,nocc,nocc)).transpose(0,2,1,3)\n for i in range(nocc):\n lib.dot(_cp(eris_ooov[i].transpose(2,0,1)).reshape(-1,nocc),\n t1, 1, wOvOv[i].reshape(-1,nvir), 1)\n wooov[i] += eris_ooov[i].transpose(1,0,2)*2\n wooov[i] -= eris_ooov[i]\n eris_ooov = theta = None\n\n eris_ovov = _cp(eris.ovov)\n g2ovov = numpy.empty((nocc,nocc,nvir,nvir))\n for i in range(nocc):\n g2ovov[i] = eris_ovov[i].transpose(1,0,2)*2\n g2ovov[i] -= eris_ovov[i].transpose(1,2,0)\n tmpw4 = numpy.einsum('klcd,ld->kc', g2ovov, t1)\n #:w1 -= numpy.einsum('kcja,kjcb->ba', g2ovov, t2)\n w1 -= lib.dot(t2.reshape(-1,nvir).T, g2ovov.reshape(-1,nvir))\n w1 -= numpy.einsum('ja,jb->ba', tmpw4, t1)\n #:w2 += numpy.einsum('ibkc,jkbc->ij', g2ovov, t2)\n w2 += lib.dot(g2ovov.reshape(nocc,-1), t2.reshape(nocc,-1).T)\n w2 += numpy.einsum('ib,jb->ij', tmpw4, t1)\n w3 += reduce(numpy.dot, (t1.T, tmpw4, t1.T))\n w4 += tmpw4\n vOVov = eris_ovov.copy()\n #:vOVov += numpy.einsum('jbld,klcd->jbkc', g2ovov, t2)\n #:vOVov -= numpy.einsum('jbld,kldc->jbkc', eris_ovov, t2)\n lib.dot(_cp(g2ovov.transpose(0,2,1,3)).reshape(-1,nov),\n _cp(t2.transpose(0,2,1,3).reshape(nov,-1).T), 1,\n vOVov.reshape(nov,-1), 1)\n lib.dot(eris_ovov.reshape(-1,nov),\n _cp(t2.transpose(0,3,1,2).reshape(nov,-1).T), -1,\n vOVov.reshape(nov,-1), 1)\n g2ovov = None\n\n #:tmp = numpy.einsum('jbld,kd->ljbk', eris_ovov, t1)\n #:wOVov -= numpy.einsum('ljbk,lc->jbkc', tmp, t1)\n #:tmp = numpy.einsum('jdlb,kd->ljbk', eris_ovov, t1)\n #:wOvOv += numpy.einsum('ljbk,lc->jbkc', tmp, t1)\n tmp = numpy.empty((nocc,nvir,nocc))\n for j in range(nocc):\n lib.dot(_cp(eris_ovov[j].transpose(1,0,2)).reshape(-1,nvir),\n t1.T, 1, tmp.reshape(-1,nocc))\n lib.dot(tmp.reshape(nocc,-1).T, t1, -1, wOVov[j].reshape(-1,nvir), 1)\n lib.dot(eris_ovov[j].reshape(nvir,-1).T, t1.T, 1,\n tmp.reshape(-1,nocc))\n lib.dot(tmp.reshape(nocc,-1).T, t1, 1, wOvOv[j].reshape(-1,nvir), 1)\n tmp = None\n\n #:vOvOv = numpy.einsum('jdlb,kldc->jbkc', eris_ovov, t2)\n ovovtmp = _cp(eris_ovov.transpose(0,3,2,1).reshape(-1,nov))\n vOvOv = numpy.empty((nocc,nvir,nocc,nvir))\n for j in range(nocc):\n lib.dot(t2[j].reshape(-1,nvir).T, ovovtmp.T, 1,\n vOvOv[j].reshape(nvir,-1))\n vOvOv[j] -= eris.oovv[j].transpose(2,0,1)\n ovovtmp = eris_ovov = None\n vOvOv = lib.transpose(vOvOv.reshape(nov,-1)).reshape(nocc,nvir,nocc,nvir)\n wOVov += vOVov\n wOvOv += vOvOv\n saved.wOVov = wOVov\n saved.wOvOv = wOvOv\n ovovtmp = wOVov = wOvOv = eris_ovov = None\n\n ov2 = vOVov*2 + vOvOv\n w3 += numpy.einsum('kcjb,kc->bj', ov2, t1)\n #:wooov += numpy.einsum('ibjc,kb->ijkc', ov2, t1)\n #:wovvv -= numpy.einsum('jakb,jc->kabc', ov2, t1)\n for i in range(nocc):\n wooov[i] += lib.dot(t1, ov2[i].reshape(nvir,-1)).reshape(nocc,nocc,nvir).transpose(1,0,2)\n lib.dot(_cp(ov2.transpose(0,2,1,3).reshape(nocc,-1)).T,\n t1, -1, wovvv.reshape(-1,nvir), 1)\n ov2 = None\n ov1 = vOvOv*2 + vOVov\n #:wooov -= numpy.einsum('ibkc,jb->ijkc', ov1, t1)\n #:wovvv += numpy.einsum('jakc,jb->kabc', ov1, t1)\n for i in range(nocc):\n lib.dot(t1, ov1[i].reshape(nvir,-1), -1, wooov[i].reshape(nocc,-1), 1)\n wovvv += lib.dot(_cp(ov1.reshape(nocc,-1)).T,\n t1).reshape(nvir,-1,nvir,nvir).transpose(1,0,3,2)\n ov1 = None\n\n woooo += _cp(eris.oooo).transpose(0,2,1,3)\n saved.woooo = woooo\n saved.wooov = wooov\n woooo = wooov = None\n\n w3 += numpy.einsum('bc,jc->bj', w1, t1)\n w3 -= numpy.einsum('kj,kb->bj', w2, t1)\n\n eris_ooov = _cp(eris.ooov)\n g2ooov = eris_ooov * 2\n g2ooov -= eris_ooov.transpose(2,0,1,3)\n #:tmp = numpy.einsum('kjla,jb->kabl', g2ooov, t1)\n #:wovvv = numpy.einsum('kabl,lc->kabc', tmp, t1)\n #:wovvv += numpy.einsum('kjla,jlbc->kabc', g2ooov, t2)\n tmp = lib.dot(g2ooov.reshape(nocc,-1).T, t1).reshape(-1,nocc,nvir,nvir).transpose(0,2,3,1)\n lib.dot(_cp(tmp.reshape(-1,nocc)), t1, 1, wovvv.reshape(-1,nvir), 1)\n tmp = None\n lib.dot(_cp(g2ooov.transpose(0,2,1,3).reshape(nocc**2,-1)).T,\n t2.reshape(nocc**2,-1), 1, wovvv.reshape(nov,-1), 1)\n g2ooov = eris_ooov = vOVov = vOvOv = None\n\n saved.wovvv = wovvv\n saved.w1 = w1\n saved.w2 = w2\n saved.w3 = w3\n saved.w4 = w4\n return saved\n\n\n# update L1, L2\ndef update_amps(mycc, t1, t2, l1, l2, eris=None, saved=None):\n if saved is None:\n saved = make_intermediates(mycc, t1, t2, eris)\n time1 = time0 = time.clock(), time.time()\n log = logger.Logger(mycc.stdout, mycc.verbose)\n nocc, nvir = t1.shape\n nov = nocc * nvir\n foo = eris.fock[:nocc,:nocc]\n fov = eris.fock[:nocc,nocc:]\n fvv = eris.fock[:nocc,:nocc]\n\n #:mba = numpy.einsum('klca,klcb->ba', l2, t2*2-t2.transpose(0,1,3,2))\n #:mij = numpy.einsum('ikcd,jkcd->ij', l2, t2*2-t2.transpose(0,1,3,2))\n #:theta = t2*2 - t2.transpose(0,1,3,2)\n theta = _ccsd.make_0132(t2, t2, 2, -1)\n mba = lib.dot(theta.reshape(-1,nvir).T, l2.reshape(-1,nvir))\n mij = lib.dot(l2.reshape(nocc,-1), theta.reshape(nocc,-1).T)\n theta = None\n mba1 = numpy.einsum('jc,jb->bc', l1, t1) + mba\n mij1 = numpy.einsum('kb,jb->kj', l1, t1) + mij\n mia1 =(t1 + numpy.einsum('kc,jkbc->jb', l1, t2) * 2\n - numpy.einsum('kc,jkcb->jb', l1, t2)\n - reduce(numpy.dot, (t1, l1.T, t1))\n - numpy.einsum('bd,jd->jb', mba, t1)\n - numpy.einsum('lj,lb->jb', mij, t1))\n\n tmp = mycc.add_wvvVV(numpy.zeros_like(l1), l2, eris)\n l2new = numpy.empty((nocc,nocc,nvir,nvir))\n ij = 0\n for i in range(nocc):\n for j in range(i):\n tmp1 = tmp[ij] * .5 # *.5 because of l2+l2.transpose(1,0,3,2) later\n l2new[i,j] = tmp1\n l2new[j,i] = tmp1.T\n ij += 1\n l2new[i,i] = tmp[ij] * .5\n ij += 1\n l1new =(numpy.einsum('ijab,jb->ia', l2new, t1) * 4\n - numpy.einsum('jiab,jb->ia', l2new, t1) * 2)\n tmp = tmp1 = None\n\n l1new += eris.fock[:nocc,nocc:]\n l1new += numpy.einsum('ib,ba->ia', l1, saved.w1)\n l1new -= numpy.einsum('ja,ij->ia', l1, saved.w2)\n l1new -= numpy.einsum('ik,ka->ia', mij, saved.w4)\n l1new -= numpy.einsum('ca,ic->ia', mba, saved.w4)\n l1new += numpy.einsum('ijab,bj->ia', l2, saved.w3) * 2\n l1new -= numpy.einsum('ijba,bj->ia', l2, saved.w3)\n\n l2new += numpy.einsum('ia,jb->ijab', l1, saved.w4)\n #:l2new += numpy.einsum('jibc,ca->jiba', l2, saved.w1)\n #:l2new -= numpy.einsum('kiba,jk->jiba', l2, saved.w2)\n lib.dot(l2.reshape(-1,nvir), saved.w1, 1, l2new.reshape(-1,nvir), 1)\n lib.dot(saved.w2, l2.reshape(nocc,-1),-1, l2new.reshape(nocc,-1), 1)\n\n eris_ooov = _cp(eris.ooov)\n l1new -= numpy.einsum('jkia,kj->ia', eris_ooov, mij1) * 2\n l1new += numpy.einsum('ikja,kj->ia', eris_ooov, mij1)\n #:l2new -= numpy.einsum('ka,kijb->jiba', l1, eris_ooov)\n lib.dot(_cp(eris_ooov.transpose(0,2,1,3).reshape(nocc,-1)).T,\n l1, -1, l2new.reshape(-1,nvir), 1)\n eris_ooov = None\n\n tau = _ccsd.make_tau(t2, t1, t1)\n #:l2tau = numpy.einsum('ijcd,klcd->ijkl', l2, tau)\n l2tau = lib.dot(l2.reshape(nocc**2,-1),\n tau.reshape(nocc**2,-1).T).reshape((nocc,)*4)\n tau = None\n l2t1 = numpy.einsum('ijcd,kc->ijkd', l2, t1)\n\n eris_ovvv = _cp(eris.ovvv)\n eris_ovvv = lib.unpack_tril(eris_ovvv.reshape(nov,-1))\n eris_ovvv = eris_ovvv.reshape(nocc,nvir,nvir,nvir)\n\n l1new += numpy.einsum('iabc,bc->ia', eris_ovvv, mba1) * 2\n l1new -= numpy.einsum('ibca,bc->ia', eris_ovvv, mba1)\n #:l2new += numpy.einsum('ic,jbac->jiba', l1, eris_ovvv)\n tmp = lib.dot(l1, eris_ovvv.reshape(-1,nvir).T).reshape(nocc,-1,nvir,nvir)\n for i in range(nocc):\n l2new[i] += tmp[i].transpose(0,2,1)\n #:m4 = numpy.einsum('ijkd,kadb->ijab', l2t1, eris_ovvv)\n m4 = tmp\n lib.dot(_cp(l2t1.reshape(nocc*nocc,-1)),\n _cp(eris_ovvv.transpose(0,2,1,3).reshape(-1,nvir**2)),\n 1, m4.reshape(nocc*nocc,-1))\n l2new -= m4\n l1new -= numpy.einsum('ijab,jb->ia', m4, t1) * 2\n l1new -= numpy.einsum('ijab,ia->jb', m4, t1) * 2\n l1new += numpy.einsum('jiab,jb->ia', m4, t1)\n l1new += numpy.einsum('jiab,ia->jb', m4, t1)\n eris_ovvv = tmp = None\n\n eris_ovov = _cp(eris.ovov)\n l1new += numpy.einsum('jb,iajb->ia', l1, eris_ovov) * 2\n #:l2new -= numpy.einsum('jbic,ca->jiba', eris_ovov, mba1)\n #:l2new -= numpy.einsum('kajb,ik->ijab', eris_ovov, mij1)\n tmp = lib.dot(eris_ovov.reshape(-1,nvir), mba1).reshape(nocc,nvir,nocc,nvir)\n lib.dot(mij1, eris_ovov.reshape(nocc,-1), 1, tmp.reshape(nocc,-1), 1)\n tmp_oovv = numpy.empty((nocc,nocc,nvir,nvir))\n for i in range(nocc):\n tmp_oovv[i] = eris_ovov[i].transpose(1,0,2) * .5\n l2new[i] += tmp_oovv[i]\n l2new[i] -= tmp[i].transpose(1,0,2)\n tmp = None\n l1new += numpy.einsum('iajb,jb->ia', eris_ovov, mia1) * 2\n l1new -= numpy.einsum('ibja,jb->ia', eris_ovov, mia1)\n #:m4 = numpy.einsum('kalb,ijkl->ijab', eris_ovov, l2tau)\n lib.dot(l2tau.reshape(nocc*nocc,-1), tmp_oovv.reshape(-1,nvir**2),\n 1, m4.reshape(nocc**2,-1))\n l2new += m4\n l1new += numpy.einsum('ijab,jb->ia', m4, t1) * 4\n l1new -= numpy.einsum('ijba,jb->ia', m4, t1) * 2\n eris_ovov = m4 = tmp_oovv = None\n\n eris_oovv = _cp(eris.oovv)\n l1new -= numpy.einsum('jb,ijba->ia', l1, eris_oovv)\n eris_oovv = None\n\n saved_wooov = _cp(saved.wooov)\n #:l1new -= numpy.einsum('jkca,ijkc->ia', l2, saved_wooov)\n l1new -= lib.dot(saved_wooov.reshape(nocc,-1), l2.reshape(-1,nvir))\n saved_wovvv = _cp(saved.wovvv)\n #:l1new += numpy.einsum('kibc,kabc->ia', l2, saved_wovvv)\n for j in range(nocc):\n l1new += lib.dot(l2[j].reshape(nocc,-1),\n saved_wovvv[j].reshape(nvir,-1).T)\n saved_wooov = saved_wovvv = None\n\n saved_wOvOv = _cp(saved.wOvOv)\n tmp_ovov = _cp(saved.wOVov) * 2\n tmp_ovov += saved_wOvOv\n #:tmp = l2.transpose(0,2,1,3) - l2.transpose(0,3,1,2)*.5\n #:l2new += numpy.einsum('kcia,kcjb->jiba', tmp, tmp_ovov)\n tmp = numpy.empty((nocc,nvir,nocc,nvir))\n for i in range(nocc):\n tmp[i] = l2[i].transpose(2,0,1)*-.5\n tmp[i] += l2[i].transpose(1,0,2)\n tmp = lib.dot(tmp_ovov.reshape(-1,nov),\n tmp.reshape(nov,-1)).reshape(-1,nvir,nocc,nvir)\n #:tmp = numpy.einsum('jkca,ibkc->ijab', l2, saved_wOvOv)\n for i in range(nocc):\n l2new[i] += tmp[i].transpose(1,0,2)\n tmp_ovov[i] = l2[i].transpose(2,0,1)\n lib.dot(saved_wOvOv.reshape(-1,nov), tmp_ovov.reshape(nov,-1),\n 1, tmp.reshape(nov,-1))\n for i in range(nocc):\n l2new[i] += tmp[i].transpose(1,2,0)\n l2new[i] += tmp[i].transpose(1,0,2) * .5\n saved_wOvOv = tmp = tmp_ovov = None\n\n saved_woooo = _cp(saved.woooo)\n #:m3 = numpy.einsum('klab,ijkl->ijab', l2, saved_woooo)\n m3 = lib.dot(saved_woooo.reshape(-1,nocc**2),\n l2.reshape(nocc**2,-1), .5).reshape(-1,nocc,nvir,nvir)\n l2new += m3\n l1new += numpy.einsum('ijab,jb->ia', m3, t1) * 4\n l1new -= numpy.einsum('ijba,jb->ia', m3, t1) * 2\n saved_woooo = m3 = None\n\n mo_e = eris.fock.diagonal()\n eia = lib.direct_sum('i-j->ij', mo_e[:nocc], mo_e[nocc:])\n l1new /= eia\n l1new += l1\n\n# l2new = l2new + l2new.transpose(1,0,3,2)\n# l2new /= lib.direct_sum('ia+jb->ijab', eia, eia)\n# l2new += l2\n ij = 0\n for i in range(nocc):\n for j in range(i):\n dab = lib.direct_sum('a+b->ab', eia[i], eia[j])\n tmp = (l2new[i,j]+l2new[j,i].T) / dab + l2[i,j]\n l2new[i,j] = tmp\n l2new[j,i] = tmp.T\n ij += 1\n dab = lib.direct_sum('a+b->ab', eia[i], eia[i])\n l2new[i,i] = (l2new[i,i]+l2new[i,i].T)/dab + l2[i,i]\n ij += 1\n\n time0 = log.timer_debug1('update l1 l2', *time0)\n return l1new, l2new\n\ndef _cp(a):\n return numpy.array(a, copy=False, order='C')\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf.cc import ccsd\n from pyscf import ao2mo\n\n mol = gto.M()\n mf = scf.RHF(mol)\n\n mcc = ccsd.CCSD(mf)\n\n numpy.random.seed(12)\n nocc = 5\n nmo = 12\n nvir = nmo - nocc\n eri0 = numpy.random.random((nmo,nmo,nmo,nmo))\n eri0 = ao2mo.restore(1, ao2mo.restore(8, eri0, nmo), nmo)\n fock0 = numpy.random.random((nmo,nmo))\n fock0 = fock0 + fock0.T + numpy.diag(range(nmo))*2\n t1 = numpy.random.random((nocc,nvir))\n t2 = numpy.random.random((nocc,nocc,nvir,nvir))\n t2 = t2 + t2.transpose(1,0,3,2)\n l1 = numpy.random.random((nocc,nvir))\n l2 = numpy.random.random((nocc,nocc,nvir,nvir))\n l2 = l2 + l2.transpose(1,0,3,2)\n\n eris = lambda:None\n eris.oooo = eri0[:nocc,:nocc,:nocc,:nocc].copy()\n eris.ooov = eri0[:nocc,:nocc,:nocc,nocc:].copy()\n eris.ovoo = eri0[:nocc,nocc:,:nocc,:nocc].copy()\n eris.oovv = eri0[:nocc,:nocc,nocc:,nocc:].copy()\n eris.ovov = eri0[:nocc,nocc:,:nocc,nocc:].copy()\n idx = numpy.tril_indices(nvir)\n eris.ovvv = eri0[:nocc,nocc:,nocc:,nocc:][:,:,idx[0],idx[1]].copy()\n eris.vvvv = pyscf.ao2mo.restore(4,eri0[nocc:,nocc:,nocc:,nocc:],nvir)\n eris.fock = fock0\n\n saved = make_intermediates(mcc, t1, t2, eris)\n l1new, l2new = update_amps(mcc, t1, t2, l1, l2, eris, saved)\n print(abs(l1new).sum()-38172.7896467303)\n print(numpy.dot(l1new.flatten(), numpy.arange(35)) - 739312.005491083)\n print(numpy.dot(l1new.flatten(), numpy.sin(numpy.arange(35)))-7019.50937051188)\n print(numpy.dot(numpy.sin(l1new.flatten()), numpy.arange(35))-69.6652346635955)\n\n print(abs(l2new).sum()-72035.4931071527)\n print(abs(l2new-l2new.transpose(1,0,3,2)).sum())\n print(numpy.dot(l2new.flatten(), numpy.arange(35**2)) - 48427109.5409886)\n print(numpy.dot(l2new.flatten(), numpy.sin(numpy.arange(35**2)))-137.758016736487)\n print(numpy.dot(numpy.sin(l2new.flatten()), numpy.arange(35**2))-507.656936701192)\n\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)]]\n\n mol.basis = 'cc-pvdz'\n mol.build()\n rhf = scf.RHF(mol)\n rhf.conv_tol = 1e-16\n rhf.scf()\n\n mcc = ccsd.CCSD(rhf)\n mcc.conv_tol = 1e-12\n ecc, t1, t2 = mcc.kernel()\n\n nmo = rhf.mo_energy.size\n fock0 = numpy.diag(rhf.mo_energy)\n nocc = mol.nelectron // 2\n nvir = nmo - nocc\n\n eris = mcc.ao2mo()\n conv, l1, l2 = kernel(mcc, eris, t1, t2, tol=1e-8)\n print(numpy.linalg.norm(l1)-0.0132626841292)\n print(numpy.linalg.norm(l2)-0.212575609057)\n\n import ccsd_rdm\n dm1 = ccsd_rdm.make_rdm1(mcc, t1, t2, l1, l2)\n dm2 = ccsd_rdm.make_rdm2(mcc, t1, t2, l1, l2)\n h1 = reduce(numpy.dot, (rhf.mo_coeff.T, rhf.get_hcore(), rhf.mo_coeff))\n eri = pyscf.ao2mo.full(rhf._eri, rhf.mo_coeff)\n eri = pyscf.ao2mo.restore(1, eri, nmo).reshape((nmo,)*4)\n e1 = numpy.einsum('pq,pq', h1, dm1)\n e2 = numpy.einsum('pqrs,pqrs', eri, dm2) * .5\n print(e1+e2+mol.energy_nuc() - rhf.e_tot - ecc)\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nXC functional, the interface to xcfun (https://github.com/dftlibs/xcfun)\nU. Ekstrom et al, J. Chem. Theory Comput., 6, 1971\n'''\n\nimport copy\nimport ctypes\nimport math\nimport numpy\nfrom pyscf import lib\n\n_itrf = lib.load_library('libxcfun_itrf')\n\nXC = XC_CODES = {\n'SLATERX' : 0, # Slater LDA exchange\n'VWN5C' : 1, # VWN5 LDA Correlation functional\n'BECKEX' : 2, # Becke 88 exchange\n'BECKECORRX' : 3, # Becke 88 exchange correction\n'BECKESRX' : 4, # Short range Becke 88 exchange\n'OPTX' : 5, # OPTX Handy & Cohen exchange\n'LYPC' : 6, # LYP correlation\n'PBEX' : 7, # PBE Exchange Functional\n'REVPBEX' : 8, # Revised PBE Exchange Functional\n'RPBEX' : 9, # RPBE Exchange Functional\n'PBEC' : 10, # PBE correlation functional\n'SPBEC' : 11, # sPBE correlation functional\n'VWN_PBEC' : 12, # PBE correlation functional using VWN LDA correlation.\n#'RANGESEP_MU' : 16, # Error function range separation parameter (1/a0)\n'KTX' : 17, # KT exchange GGA correction\n#'TFK' : 18, # Thomas-Fermi Kinetic Energy Functional\n'PW91X' : 19, # Perdew-Wang 1991 GGA Exchange Functional\n#'PW91K' : 20, # PW91 GGA Kinetic Energy Functional\n'PW92C' : 21, # PW92 LDA correlation\n'M05X' : 22, # M05 exchange\n'M05X2X' : 23, # M05-2X exchange\n'M06X' : 24, # M06 exchange\n'M06X2X' : 25, # M06-2X exchange\n'M06LX' : 26, # M06-L exchange\n'M06HFX' : 27, # M06-HF exchange\n'BRX' : 28, # BR exchange. Becke-Roussels exchange functional.\n'M05X2C' : 29, # M05-2X Correlation\n'M05C' : 30, # M05 Correlation\n'M06C' : 31, # M06 Correlation\n'M06LC' : 32, # M06-L Correlation\n'M06X2C' : 33, # M06-2X Correlation\n'TPSSC' : 34, # TPSS original correlation functional\n'TPSSX' : 35, # TPSS original exchange functional\n'REVTPSSC' : 36, # Revised TPSS correlation functional\n'REVTPSSX' : 37, # Reviewed TPSS exchange functional\n#\n# alias\n#\n'SLATER' : 0, # SLATERX\n'LDA' : 0, # SLATERX\n'VWN' : 1, # VWN5C\n'VWN5' : 1, # VWN5C\n'B88' : 2, # BECKEX\n'LYP' : 6, # LYP correlation\n'P86' : None,\n'BLYP' : 'BECKEX + LYP',\n'BP86' : None,\n'BPW91' : 'BECKEX + PW91C',\n'BPW92' : 'BECKEX + PW92C',\n'OLYP' : '2.4832*SLATER - 1.43169*OPTX + LYP', # CPL, 341, 319\n'KT1' : '1.006*SLATER - .006*KTX + VWN5', # JCP, 119, 3015\n'KT2' : '1.07773*SLATER - .006*KTX + 0.576727*VWN5', # JCP, 119, 3015\n'KT3' : '2.021452*SLATER - .004*KTX - .925452*OPTX + .864409*LYP', # JCP, 121, 5654\n'PBE0' : '.25*HF + .75*PBEX + PBEC', # JCP, 110, 6158\n'PBE1PBE' : 'PBE0',\n'B3PW91' : None,\n'B3P86' : None,\n# Note, use VWN5 for B3LYP. It is different to the libxc default B3LYP\n'B3LYP' : 'B3LYP5',\n'B3LYP5' : '.2*HF + .08*SLATER + .72*BECKE + .81*LYP + .19*VWN5',\n'B3LYPG' : None, # B3LYP-VWN3 used by Gaussian and libxc\n'O3LYP' : '.1161*HF + .1129*SLATER + .8133*OPTX + .81*LYP + .19*VWN5', # Mol. Phys. 99 607\n'M062X' : 'M06X2X, M062XC',\n'CAMB3LYP' : None,\n}\n\nLDA_IDS = set([0, 1, 13, 14, 15, 16, 18, 21])\nGGA_IDS = set([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 17, 19, 20])\nMGGA_IDS = set([22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37])\nMLGGA_IDS = set([28])\nHYB_XC = set(('PBE0' , 'PBE1PBE' , 'B3PW91' , 'B3P86' , 'B3LYP' ,\n 'B3LYPG' , 'O3LYP' , 'M062X' , 'CAMB3LYP',))\nMAX_DERIV_ORDER = 3\n\ndef xc_type(xc_code):\n if isinstance(xc_code, str):\n hyb, fn_facs = parse_xc(xc_code)\n else:\n fn_facs = [(xc_code, 1)] # mimic fn_facs\n if not fn_facs:\n return 'HF'\n elif all(xid in LDA_IDS for xid, val in fn_facs):\n return 'LDA'\n elif any(xid in MGGA_IDS or xid in MLGGA_IDS for xid, val in fn_facs):\n return 'MGGA'\n else:\n # all((xid in GGA_IDS or xid in LDA_IDS for xid, val in fn_fns)):\n # include hybrid_xc\n return 'GGA'\n\ndef is_lda(xc_code):\n return xc_type(xc_code) == 'LDA'\n\ndef is_hybrid_xc(xc_code):\n if isinstance(xc_code, str):\n return ('HF' in xc_code or xc_code in HYB_XC or\n hybrid_coeff(xc_code) != 0)\n elif isinstance(xc_code, int):\n return False\n else:\n return any((is_hybrid_xc(x) for x in xc_code))\n\ndef is_meta_gga(xc_code):\n return xc_type(xc_code) == 'MGGA'\n\ndef is_gga(xc_code):\n return xc_type(xc_code) == 'GGA'\n\ndef max_deriv_order(xc_code):\n hyb, fn_facs = parse_xc(xc_code)\n return MAX_DERIV_ORDER\n\ndef test_deriv_order(xc_code, deriv, raise_error=False):\n support = deriv <= max_deriv_order(xc_code)\n if not support and raise_error:\n raise NotImplementedError('xcfun library does not support derivative '\n 'order %d for %s' % (deriv, xc_code))\n return support\n\ndef hybrid_coeff(xc_code, spin=0):\n return parse_xc(xc_code)[0]\n\ndef parse_xc_name(xc_name):\n fn_facs = parse_xc(xc_name)[1]\n return fn_facs[0][0], fn_facs[1][0]\n\ndef parse_xc(description):\n '''Rules to input functional description:\n\n * The given functional description must be a one-line string.\n * The functional description is case-insensitive.\n * The functional description string has two parts, separated by \",\". The\n first part describes the exchange functional, the second is the correlation\n functional.\n\n - If \",\" was not appeared in string, the entire string is considered as\n X functional.\n - To neglect X functional (just apply C functional), leave blank in the\n first part, eg description=',vwn' for pure VWN functional\n\n * The functional name can be placed in arbitrary order. Two name needs to\n be separated by operators \"+\" or \"-\". Blank spaces are ignored.\n NOTE the parser only reads operators \"+\" \"-\" \"*\". / is not in support.\n * A functional name is associated with one factor. If the factor is not\n given, it is assumed equaling 1.\n * String \"HF\" stands for exact exchange (HF K matrix). It is allowed to\n put in C functional part.\n * Be careful with the xcfun convention on GGA functional, in which the LDA\n contribution is included.\n '''\n\n if isinstance(description, int):\n return 0, ((description, 1.))\n elif not isinstance(description, str): #isinstance(description, (tuple,list)):\n return parse_xc('%s,%s' % tuple(description))\n\n if ',' in description:\n x_code, c_code = description.replace(' ','').replace('_','').upper().split(',')\n else:\n x_code, c_code = description.replace(' ','').replace('_','').upper(), ''\n\n hyb = [0]\n fn_facs = []\n def parse_token(token, suffix):\n if token:\n if '*' in token:\n fac, key = token.split('*')\n if fac[0].isalpha():\n fac, key = key, fac\n fac = float(fac)\n else:\n fac, key = 1, token\n if key == 'HF':\n hyb[0] += fac\n elif key.isdigit():\n fn_facs.append((int(key), fac))\n else:\n if key in XC_CODES:\n x_id = XC_CODES[key]\n elif key+suffix in XC_CODES:\n x_id = XC_CODES[key+suffix]\n else:\n raise KeyError('Unknown key %s' % key)\n if isinstance(x_id, str):\n hyb1, fn_facs1 = parse_xc(x_id)\n# Recursively scale the composed functional, to support '0.5*b3lyp'\n hyb[0] += hyb1 * fac\n fn_facs.extend([(xid, c*fac) for xid, c in fn_facs1])\n elif x_id is None:\n raise NotImplementedError(key)\n else:\n fn_facs.append((x_id, fac))\n def remove_dup(fn_facs):\n fn_ids = []\n facs = []\n n = 0\n for key, val in fn_facs:\n if key in fn_ids:\n facs[fn_ids.index(key)] += val\n else:\n fn_ids.append(key)\n facs.append(val)\n n += 1\n return list(zip(fn_ids, facs))\n\n for token in x_code.replace('-', '+-').split('+'):\n parse_token(token, 'X')\n for token in c_code.replace('-', '+-').split('+'):\n parse_token(token, 'C')\n return hyb[0], remove_dup(fn_facs)\n\n\ndef eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):\n r'''Interface to call xcfun library to evaluate XC functional, potential\n and functional derivatives.\n\n See also :func:`pyscf.dft.libxc.eval_xc`\n '''\n hyb, fn_facs = parse_xc(xc_code)\n return _eval_xc(fn_facs, rho, spin, relativity, deriv, verbose)\n\nXC_D0 = 0\nXC_D1 = 1\nXC_D2 = 2\nXC_D3 = 3\nXC_D4 = 4\n\nXC_D00 = 0\nXC_D10 = 1\nXC_D01 = 2\nXC_D20 = 3\nXC_D11 = 4\nXC_D02 = 5\nXC_D30 = 6\nXC_D21 = 7\nXC_D12 = 8\nXC_D03 = 9\nXC_D40 = 10\nXC_D31 = 11\nXC_D22 = 12\nXC_D13 = 13\nXC_D04 = 14\n\nXC_D000 = 0\nXC_D100 = 1\nXC_D010 = 2\nXC_D001 = 3\nXC_D200 = 4\nXC_D110 = 5\nXC_D101 = 6\nXC_D020 = 7\nXC_D011 = 8\nXC_D002 = 9\nXC_D300 = 10\nXC_D210 = 11\nXC_D201 = 12\nXC_D120 = 13\nXC_D111 = 14\nXC_D102 = 15\nXC_D030 = 16\nXC_D021 = 17\nXC_D012 = 18\nXC_D003 = 19\nXC_D400 = 20\nXC_D310 = 21\nXC_D301 = 22\nXC_D220 = 23\nXC_D211 = 24\nXC_D202 = 25\nXC_D130 = 26\nXC_D121 = 27\nXC_D112 = 28\nXC_D103 = 29\nXC_D040 = 30\nXC_D031 = 31\nXC_D022 = 32\nXC_D013 = 33\nXC_D004 = 34\n\nXC_D00000 = 0\nXC_D10000 = 1\nXC_D01000 = 2\nXC_D00100 = 3\nXC_D00010 = 4\nXC_D00001 = 5\nXC_D20000 = 6\nXC_D11000 = 7\nXC_D10100 = 8\nXC_D10010 = 9\nXC_D10001 = 10\nXC_D02000 = 11\nXC_D01100 = 12\nXC_D01010 = 13\nXC_D01001 = 14\nXC_D00200 = 15\nXC_D00110 = 16\nXC_D00101 = 17\nXC_D00020 = 18\nXC_D00011 = 19\nXC_D00002 = 20\nXC_D30000 = 21\nXC_D21000 = 22\nXC_D20100 = 23\nXC_D20010 = 24\nXC_D20001 = 25\nXC_D12000 = 26\nXC_D11100 = 27\nXC_D11010 = 28\nXC_D11001 = 29\nXC_D10200 = 30\nXC_D10110 = 31\nXC_D10101 = 32\nXC_D10020 = 33\nXC_D10011 = 34\nXC_D10002 = 35\nXC_D03000 = 36\nXC_D02100 = 37\nXC_D02010 = 38\nXC_D02001 = 39\nXC_D01200 = 40\nXC_D01110 = 41\nXC_D01101 = 42\nXC_D01020 = 43\nXC_D01011 = 44\nXC_D01002 = 45\nXC_D00300 = 46\nXC_D00210 = 47\nXC_D00201 = 48\nXC_D00120 = 49\nXC_D00111 = 50\nXC_D00102 = 51\nXC_D00030 = 52\nXC_D00021 = 53\nXC_D00012 = 54\nXC_D00003 = 55\nXC_D40000 = 56\nXC_D31000 = 57\nXC_D30100 = 58\nXC_D30010 = 59\nXC_D30001 = 60\nXC_D22000 = 61\nXC_D21100 = 62\nXC_D21010 = 63\nXC_D21001 = 64\nXC_D20200 = 65\nXC_D20110 = 66\nXC_D20101 = 67\nXC_D20020 = 68\nXC_D20011 = 69\nXC_D20002 = 70\nXC_D13000 = 71\nXC_D12100 = 72\nXC_D12010 = 73\nXC_D12001 = 74\nXC_D11200 = 75\nXC_D11110 = 76\nXC_D11101 = 77\nXC_D11020 = 78\nXC_D11011 = 79\nXC_D11002 = 80\nXC_D10300 = 81\nXC_D10210 = 82\nXC_D10201 = 83\nXC_D10120 = 84\nXC_D10111 = 85\nXC_D10102 = 86\nXC_D10030 = 87\nXC_D10021 = 88\nXC_D10012 = 89\nXC_D10003 = 90\nXC_D04000 = 91\nXC_D03100 = 92\nXC_D03010 = 93\nXC_D03001 = 94\nXC_D02200 = 95\nXC_D02110 = 96\nXC_D02101 = 97\nXC_D02020 = 98\nXC_D02011 = 99\nXC_D02002 = 100\nXC_D01300 = 101\nXC_D01210 = 102\nXC_D01201 = 103\nXC_D01120 = 104\nXC_D01111 = 105\nXC_D01102 = 106\nXC_D01030 = 107\nXC_D01021 = 108\nXC_D01012 = 109\nXC_D01003 = 110\nXC_D00400 = 111\nXC_D00310 = 112\nXC_D00301 = 113\nXC_D00220 = 114\nXC_D00211 = 115\nXC_D00202 = 116\nXC_D00130 = 117\nXC_D00121 = 118\nXC_D00112 = 119\nXC_D00103 = 120\nXC_D00040 = 121\nXC_D00031 = 122\nXC_D00022 = 123\nXC_D00013 = 124\nXC_D00004 = 125\n\nXC_D0000000 = 0\nXC_D1000000 = 1\nXC_D0100000 = 2\nXC_D0010000 = 3\nXC_D0001000 = 4\nXC_D0000100 = 5\nXC_D0000010 = 6\nXC_D0000001 = 7\nXC_D2000000 = 8\nXC_D1100000 = 9\nXC_D1010000 = 10\nXC_D1001000 = 11\nXC_D1000100 = 12\nXC_D1000010 = 13\nXC_D1000001 = 14\nXC_D0200000 = 15\nXC_D0110000 = 16\nXC_D0101000 = 17\nXC_D0100100 = 18\nXC_D0100010 = 19\nXC_D0100001 = 20\nXC_D0020000 = 21\nXC_D0011000 = 22\nXC_D0010100 = 23\nXC_D0010010 = 24\nXC_D0010001 = 25\nXC_D0002000 = 26\nXC_D0001100 = 27\nXC_D0001010 = 28\nXC_D0001001 = 29\nXC_D0000200 = 30\nXC_D0000110 = 31\nXC_D0000101 = 32\nXC_D0000020 = 33\nXC_D0000011 = 34\nXC_D0000002 = 35\nXC_D3000000 = 36\nXC_D2100000 = 37\nXC_D2010000 = 38\nXC_D2001000 = 39\nXC_D2000100 = 40\nXC_D2000010 = 41\nXC_D2000001 = 42\nXC_D1200000 = 43\nXC_D1110000 = 44\nXC_D1101000 = 45\nXC_D1100100 = 46\nXC_D1100010 = 47\nXC_D1100001 = 48\nXC_D1020000 = 49\nXC_D1011000 = 50\nXC_D1010100 = 51\nXC_D1010010 = 52\nXC_D1010001 = 53\nXC_D1002000 = 54\nXC_D1001100 = 55\nXC_D1001010 = 56\nXC_D1001001 = 57\nXC_D1000200 = 58\nXC_D1000110 = 59\nXC_D1000101 = 60\nXC_D1000020 = 61\nXC_D1000011 = 62\nXC_D1000002 = 63\nXC_D0300000 = 64\nXC_D0210000 = 65\nXC_D0201000 = 66\nXC_D0200100 = 67\nXC_D0200010 = 68\nXC_D0200001 = 69\nXC_D0120000 = 70\nXC_D0111000 = 71\nXC_D0110100 = 72\nXC_D0110010 = 73\nXC_D0110001 = 74\nXC_D0102000 = 75\nXC_D0101100 = 76\nXC_D0101010 = 77\nXC_D0101001 = 78\nXC_D0100200 = 79\nXC_D0100110 = 80\nXC_D0100101 = 81\nXC_D0100020 = 82\nXC_D0100011 = 83\nXC_D0100002 = 84\nXC_D0030000 = 85\nXC_D0021000 = 86\nXC_D0020100 = 87\nXC_D0020010 = 88\nXC_D0020001 = 89\nXC_D0012000 = 90\nXC_D0011100 = 91\nXC_D0011010 = 92\nXC_D0011001 = 93\nXC_D0010200 = 94\nXC_D0010110 = 95\nXC_D0010101 = 96\nXC_D0010020 = 97\nXC_D0010011 = 98\nXC_D0010002 = 99\nXC_D0003000 = 100\nXC_D0002100 = 101\nXC_D0002010 = 102\nXC_D0002001 = 103\nXC_D0001200 = 104\nXC_D0001110 = 105\nXC_D0001101 = 106\nXC_D0001020 = 107\nXC_D0001011 = 108\nXC_D0001002 = 109\nXC_D0000300 = 110\nXC_D0000210 = 111\nXC_D0000201 = 112\nXC_D0000120 = 113\nXC_D0000111 = 114\nXC_D0000102 = 115\nXC_D0000030 = 116\nXC_D0000021 = 117\nXC_D0000012 = 118\nXC_D0000003 = 119\n\ndef _eval_xc(fn_facs, rho, spin=0, relativity=0, deriv=1, verbose=None):\n assert(deriv < 4)\n if spin == 0:\n rho_u = rho_d = numpy.asarray(rho, order='C')\n else:\n rho_u = numpy.asarray(rho[0], order='C')\n rho_d = numpy.asarray(rho[1], order='C')\n\n if rho_u.ndim == 2:\n ngrids = rho_u.shape[1]\n else:\n ngrids = len(rho_u)\n\n fn_ids = [x[0] for x in fn_facs]\n facs = [x[1] for x in fn_facs]\n if all((is_lda(x) for x in fn_ids)): # LDA\n if spin == 0:\n nvar = 1\n else:\n nvar = 2\n elif any((is_meta_gga(x) for x in fn_ids)):\n raise RuntimeError('xcfun MGGA interface not correct')\n if spin == 0:\n nvar = 3\n else:\n nvar = 7\n else: # GGA\n if spin == 0:\n nvar = 2\n else:\n nvar = 5\n outlen = (math.factorial(nvar+deriv) //\n (math.factorial(nvar) * math.factorial(deriv)))\n outbuf = numpy.empty((ngrids,outlen))\n\n n = len(fn_ids)\n _itrf.XCFUN_eval_xc(ctypes.c_int(n),\n (ctypes.c_int*n)(*fn_ids), (ctypes.c_double*n)(*facs),\n ctypes.c_int(spin),\n ctypes.c_int(deriv), ctypes.c_int(ngrids),\n rho_u.ctypes.data_as(ctypes.c_void_p),\n rho_d.ctypes.data_as(ctypes.c_void_p),\n outbuf.ctypes.data_as(ctypes.c_void_p))\n\n outbuf = outbuf.T\n exc = outbuf[0]\n vxc = fxc = kxc = None\n if nvar == 1:\n if deriv > 0:\n vxc = (outbuf[1], None, None, None)\n if deriv > 1:\n fxc = (outbuf[2],) + (None,)*9\n if deriv > 2:\n kxc = (outbuf[3], None, None, None)\n elif nvar == 2:\n if spin == 0: # GGA\n if deriv > 0:\n vxc = (outbuf[1], outbuf[2], None, None)\n if deriv > 1:\n fxc = (outbuf[3], outbuf[4], outbuf[5],) + (None,)*7\n if deriv > 2:\n kxc = outbuf[6:10]\n else: # LDA\n if deriv > 0:\n vxc = (outbuf[1:3].T, None, None, None)\n if deriv > 1:\n fxc = (outbuf[3:6].T,) + (None,)*9\n if deriv > 2:\n kxc = (outbuf[6:10].T, None, None, None)\n elif nvar == 5:\n if deriv > 0:\n vxc = (outbuf[1:3].T, outbuf[3:6].T, None, None)\n if deriv > 1:\n fxc = (outbuf[[XC_D20000,XC_D11000,XC_D02000]].T,\n outbuf[[XC_D10100,XC_D10010,XC_D10001,\n XC_D01100,XC_D01010,XC_D01001]].T,\n outbuf[[XC_D00200,XC_D00110,XC_D00101,XC_D00020,XC_D00011,XC_D00002]].T) + (None,)*7\n if deriv > 2:\n kxc = (outbuf[[XC_D30000,XC_D21000,XC_D12000,XC_D03000]].T,\n outbuf[[XC_D20100,XC_D20010,XC_D20001,\n XC_D11100,XC_D11010,XC_D11001,\n XC_D02100,XC_D02010,XC_D02001]].T,\n outbuf[[XC_D10200,XC_D10110,XC_D10101,XC_D10020,XC_D10011,XC_D10002,\n XC_D01200,XC_D01110,XC_D01101,XC_D01020,XC_D01011,XC_D01002]].T,\n outbuf[[XC_D00300,XC_D00210,XC_D00201,XC_D00120,XC_D00111,\n XC_D00102,XC_D00030,XC_D00021,XC_D00012,XC_D00003]].T)\n# MGGA/MLGGA: Note the MLGGA interface are not implemented. MGGA only needs 3\n# input arguments. To make the interface compatible with libxc, treat MGGA as\n# MLGGA\n elif nvar == 3:\n if deriv > 0:\n vxc = (outbuf[1], outbuf[2], numpy.zeros_like(outbuf[1]), outbuf[3])\n if deriv > 1:\n fxc = (outbuf[XC_D200], outbuf[XC_D110], outbuf[XC_D020],\n None, outbuf[XC_D002], None, outbuf[XC_D101], None, None, outbuf[XC_D011])\n if deriv > 2:\n kxc = (outbuf[XC_D300], outbuf[XC_D210], outbuf[XC_D120], outbuf[XC_D030],\n outbuf[XC_D201], outbuf[XC_D111], outbuf[XC_D102],\n outbuf[XC_D021], outbuf[XC_D012], outbuf[XC_D003])\n elif nvar == 7:\n if deriv > 0:\n vxc = (outbuf[1:3].T, outbuf[3:6].T, None, outbuf[6:8].T)\n if deriv > 1:\n fxc = (outbuf[[XC_D2000000,XC_D1100000,XC_D0200000]].T,\n outbuf[[XC_D1010000,XC_D1001000,XC_D1000100,\n XC_D0110000,XC_D0101000,XC_D0100100]].T,\n outbuf[[XC_D0020000,XC_D0011000,XC_D0010100,\n XC_D0002000,XC_D0001100,XC_D0000200]].T,\n None,\n outbuf[[XC_D0000020,XC_D0000011,XC_D0000002]].T,\n None,\n outbuf[[XC_D1000010,XC_D1000001,XC_D0100010,XC_D0100001]].T,\n None, None,\n outbuf[[XC_D0010010,XC_D0010001,XC_D0001010,XC_D0001001,\n XC_D0000110,XC_D0000101]].T)\n if deriv > 2:\n kxc = (outbuf[[XC_D3000000,XC_D2100000,XC_D1200000,XC_D0300000]].T,\n outbuf[[XC_D2010000,XC_D2001000,XC_D2000100,\n XC_D1110000,XC_D1101000,XC_D1100100,\n XC_D0210000,XC_D0201000,XC_D0200100]].T,\n outbuf[[XC_D1020000,XC_D1011000,XC_D1010100,XC_D1002000,XC_D1001100,XC_D1000200,\n XC_D0120000,XC_D0111000,XC_D0110100,XC_D0102000,XC_D0101100,XC_D0100200]].T,\n outbuf[[XC_D0030000,XC_D0021000,XC_D0020100,XC_D0012000,XC_D0011100,\n XC_D0010200,XC_D0003000,XC_D0002100,XC_D0001200,XC_D0000300]].T,\n outbuf[[XC_D2000010,XC_D2000001,XC_D1100010,XC_D1100001,XC_D0200010,XC_D0200001]].T,\n outbuf[[XC_D1010010,XC_D1010001,XC_D1001010,XC_D1001001,XC_D1000110,XC_D1000101,\n XC_D0110010,XC_D0110001,XC_D0101010,XC_D0101001,XC_D0100110,XC_D0100101]].T,\n outbuf[[XC_D1000020,XC_D1000011,XC_D1000002,XC_D0100020,XC_D0100011,XC_D0100002]].T,\n outbuf[[XC_D0020010,XC_D0020001,XC_D0011010,XC_D0011001,XC_D0010110,XC_D0010101,\n XC_D0002010,XC_D0002001,XC_D0001110,XC_D0001101,XC_D0000210,XC_D0000201]].T,\n outbuf[[XC_D0010020,XC_D0010011,XC_D0010002,\n XC_D0001020,XC_D0001011,XC_D0001002,\n XC_D0000120,XC_D0000111,XC_D0000102]].T,\n outbuf[[XC_D0000030,XC_D0000021,XC_D0000012,XC_D0000003]].T)\n return exc, vxc, fxc, kxc\n\n\ndef define_xc_(ni, description, xctype='LDA', hyb=0):\n '''Define XC functional. See also :func:`eval_xc` for the rules of input description.\n\n Args:\n ni : an instance of :class:`_NumInt`\n\n description : str\n A string to describe the linear combination of different XC functionals.\n The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.\n If \"HF\" was appeared in the string, it stands for the exact exchange.\n\n Examples:\n\n >>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')\n >>> mf = dft.RKS(mol)\n >>> define_xc_(mf._numint, '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN')\n >>> mf.kernel()\n -76.3783361189611\n >>> define_xc_(mf._numint, 'LDA*.08 + .72*B88 + .2*HF, .81*LYP + .19*VWN')\n >>> mf.kernel()\n -76.3783361189611\n >>> def eval_xc(xc_code, rho, *args, **kwargs):\n ... exc = 0.01 * rho**2\n ... vrho = 0.01 * 2 * rho\n ... vxc = (vrho, None, None, None)\n ... fxc = None # 2nd order functional derivative\n ... kxc = None # 3rd order functional derivative\n ... return exc, vxc, fxc, kxc\n >>> define_xc_(mf._numint, eval_xc, xctype='LDA')\n >>> mf.kernel()\n 48.8525211046668\n '''\n if isinstance(description, str):\n ni.eval_xc = lambda xc_code, rho, *args, **kwargs: \\\n eval_xc(description, rho, *args, **kwargs)\n ni.hybrid_coeff = lambda *args, **kwargs: hybrid_coeff(description)\n ni._xc_type = lambda *args: xc_type(description)\n\n elif callable(description):\n ni.eval_xc = description\n ni.hybrid_coeff = lambda *args, **kwargs: hyb\n ni._xc_type = lambda *args: xctype\n else:\n raise RuntimeError('Unknown description %s' % description)\n return ni\n\ndef define_xc(ni, description):\n return define_xc_(copy.copy(ni), description)\ndefine_xc.__doc__ = define_xc_.__doc__\n\n\nif __name__ == '__main__':\n from pyscf import gto, dft\n mol = gto.M(\n atom = [\n [\"O\" , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)] ],\n basis = '6311g',)\n mf = dft.RKS(mol)\n mf._numint.libxc = dft.xcfun\n print(mf.kernel() - -75.8503877483363)\n\n mf.xc = 'b88,lyp'\n print(mf.kernel() - -76.3969707800463)\n\n mf.xc = 'b3lyp'\n print(mf.kernel() - -76.3969707800463)\n\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nAnalytic Fourier transformation AO-pair value for PBC\n'''\n\nimport ctypes\nimport numpy\nfrom pyscf import lib\nfrom pyscf import gto\nfrom pyscf.gto.ft_ao import ft_ao as mol_ft_ao\nfrom pyscf.pbc.lib.kpts_helper import is_zero, gamma_point\n\nlibpbc = lib.load_library('libpbc')\n\n#\n# \\int mu*nu*exp(-ik*r) dr\n#\ndef ft_aopair(cell, Gv, shls_slice=None, aosym='s1',\n b=None, gxyz=None, Gvbase=None, kpti_kptj=numpy.zeros((2,3)),\n q=None, intor='GTO_ft_ovlp_sph', comp=1, verbose=None):\n r'''\n FT transform AO pair\n \\sum_T exp(-i k_j * T) \\int exp(-i(G+q)r) i(r) j(r-T) dr^3\n '''\n kpti, kptj = kpti_kptj\n if q is None:\n q = kptj - kpti\n val = _ft_aopair_kpts(cell, Gv, shls_slice, aosym, b, gxyz, Gvbase,\n q, kptj.reshape(1,3), intor, comp)\n return val[0]\n\n# NOTE buffer out must be initialized to 0\n# gxyz is the index for Gvbase\ndef _ft_aopair_kpts(cell, Gv, shls_slice=None, aosym='s1',\n b=None, gxyz=None, Gvbase=None, q=numpy.zeros(3),\n kptjs=numpy.zeros((1,3)), intor='GTO_ft_ovlp_sph', comp=1,\n out=None):\n r'''\n FT transform AO pair\n \\sum_T exp(-i k_j * T) \\int exp(-i(G+q)r) i(r) j(r-T) dr^3\n\n The return array holds the AO pair\n corresponding to the kpoints given by kptjs\n '''\n q = numpy.reshape(q, 3)\n kptjs = numpy.asarray(kptjs, order='C').reshape(-1,3)\n nGv = Gv.shape[0]\n GvT = numpy.asarray(Gv.T, order='C')\n GvT += q.reshape(-1,1)\n\n if (gxyz is None or b is None or Gvbase is None or (abs(q).sum() > 1e-9)\n# backward compatibility for pyscf-1.2, in which the argument Gvbase is gs\n or (Gvbase is not None and isinstance(Gvbase[0], (int, numpy.integer)))):\n p_gxyzT = lib.c_null_ptr()\n p_gs = (ctypes.c_int*3)(0,0,0)\n p_b = (ctypes.c_double*1)(0)\n eval_gz = 'GTO_Gv_general'\n else:\n if abs(b-numpy.diag(b.diagonal())).sum() < 1e-8:\n eval_gz = 'GTO_Gv_orth'\n else:\n eval_gz = 'GTO_Gv_nonorth'\n gxyzT = numpy.asarray(gxyz.T, order='C', dtype=numpy.int32)\n p_gxyzT = gxyzT.ctypes.data_as(ctypes.c_void_p)\n b = numpy.hstack((b.ravel(), q) + Gvbase)\n p_b = b.ctypes.data_as(ctypes.c_void_p)\n p_gs = (ctypes.c_int*3)(*[len(x) for x in Gvbase])\n\n Ls = cell.get_lattice_Ls()\n expkL = numpy.exp(1j * numpy.dot(kptjs, Ls.T))\n\n atm, bas, env = gto.conc_env(cell._atm, cell._bas, cell._env,\n cell._atm, cell._bas, cell._env)\n ao_loc = gto.moleintor.make_loc(bas, intor)\n if shls_slice is None:\n shls_slice = (0, cell.nbas, cell.nbas, cell.nbas*2)\n else:\n shls_slice = (shls_slice[0], shls_slice[1],\n cell.nbas+shls_slice[2], cell.nbas+shls_slice[3])\n ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]\n nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]\n nkpts = len(kptjs)\n nimgs = len(Ls)\n shape = (nkpts, comp, ni, nj, nGv)\n\n# Theoretically, hermitian symmetry can be also found for kpti == kptj:\n# f_ji(G) = \\int f_ji exp(-iGr) = \\int f_ij^* exp(-iGr) = [f_ij(-G)]^*\n# hermi operation needs reordering the axis-0. It is inefficient.\n if aosym == 's1hermi': # Symmetry for Gamma point\n assert(is_zero(q) and is_zero(kptjs) and ni == nj)\n elif aosym == 's2':\n i0 = ao_loc[shls_slice[0]]\n i1 = ao_loc[shls_slice[1]]\n nij = i1*(i1+1)//2 - i0*(i0+1)//2\n shape = (nkpts, comp, nij, nGv)\n\n drv = libpbc.PBC_ft_latsum_drv\n intor = getattr(libpbc, intor)\n eval_gz = getattr(libpbc, eval_gz)\n if nkpts == 1:\n fill = getattr(libpbc, 'PBC_ft_fill_nk1'+aosym)\n else:\n fill = getattr(libpbc, 'PBC_ft_fill_k'+aosym)\n out = numpy.ndarray(shape, dtype=numpy.complex128, buffer=out)\n\n drv(intor, eval_gz, fill, out.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(nkpts), ctypes.c_int(comp), ctypes.c_int(nimgs),\n Ls.ctypes.data_as(ctypes.c_void_p), expkL.ctypes.data_as(ctypes.c_void_p),\n (ctypes.c_int*4)(*shls_slice), ao_loc.ctypes.data_as(ctypes.c_void_p),\n GvT.ctypes.data_as(ctypes.c_void_p), p_b, p_gxyzT, p_gs, ctypes.c_int(nGv),\n atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(cell.natm),\n bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(cell.nbas),\n env.ctypes.data_as(ctypes.c_void_p))\n\n if aosym == 's1hermi':\n for i in range(1,ni):\n out[:,:,:i,i] = out[:,:,i,:i]\n out = numpy.rollaxis(out, -1, 2)\n if comp == 1:\n out = out[:,0]\n return out\n\ndef ft_ao(mol, Gv, shls_slice=None, b=None,\n gxyz=None, Gvbase=None, kpt=numpy.zeros(3), verbose=None):\n if gamma_point(kpt):\n return mol_ft_ao(mol, Gv, shls_slice, b, gxyz, Gvbase, verbose)\n else:\n kG = Gv + kpt\n return mol_ft_ao(mol, kG, shls_slice, None, None, None, verbose)\n\nif __name__ == '__main__':\n import pyscf.pbc.gto as pgto\n import pyscf.dft.numint\n from pyscf.pbc import tools\n\n L = 5.\n n = 10\n cell = pgto.Cell()\n cell.a = numpy.diag([L,L,L])\n cell.gs = numpy.array([n,n,n])\n\n cell.atom = '''C 1.3 .2 .3\n C .1 .1 1.1\n '''\n cell.basis = 'ccpvdz'\n #cell.basis = {'C': [[0, (2.4, .1, .6), (1.0,.8, .4)], [1, (1.1, 1)]]}\n #cell.basis = {'C': [[0, (2.4, 1)]]}\n cell.unit = 'B'\n #cell.verbose = 4\n cell.build(0,0)\n #cell.nimgs = (2,2,2)\n\n ao2 = ft_aopair(cell, cell.Gv)\n nao = cell.nao_nr()\n coords = pyscf.pbc.dft.gen_grid.gen_uniform_grids(cell)\n aoR = pyscf.pbc.dft.numint.eval_ao(cell, coords)\n aoR2 = numpy.einsum('ki,kj->kij', aoR.conj(), aoR)\n ngs = aoR.shape[0]\n\n for i in range(nao):\n for j in range(nao):\n ao2ref = tools.fft(aoR2[:,i,j], cell.gs) * cell.vol/ngs\n print(i, j, numpy.linalg.norm(ao2ref - ao2[:,i,j]))\n\n aoG = ft_ao(cell, cell.Gv)\n for i in range(nao):\n aoref = tools.fft(aoR[:,i], cell.gs) * cell.vol/ngs\n print(i, numpy.linalg.norm(aoref - aoG[:,i]))\n\n"
] | [
[
"numpy.empty"
],
[
"numpy.rollaxis",
"numpy.asarray",
"numpy.ndarray",
"numpy.all",
"numpy.empty"
],
[
"numpy.random.random",
"numpy.einsum",
"numpy.random.seed",
"numpy.tril_indices",
"numpy.cumsum",
"numpy.linalg.norm",
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.dot",
"numpy.random.random",
"numpy.allclose",
"numpy.einsum",
"numpy.random.seed",
"numpy.tril_indices",
"numpy.eye",
"numpy.linalg.norm"
],
[
"numpy.allclose"
],
[
"numpy.random.random",
"numpy.einsum",
"numpy.asarray",
"numpy.array",
"numpy.zeros"
],
[
"numpy.diag",
"numpy.random.random",
"numpy.random.seed",
"numpy.einsum",
"numpy.tril_indices",
"numpy.arange",
"numpy.linalg.norm",
"numpy.zeros_like",
"numpy.array",
"numpy.empty"
],
[
"numpy.asarray",
"numpy.zeros_like",
"numpy.empty"
],
[
"numpy.rollaxis",
"numpy.diag",
"numpy.dot",
"numpy.reshape",
"numpy.asarray",
"numpy.ndarray",
"numpy.linalg.norm",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mcflugen/plume | [
"7fc65ba9461fece372eef4b2bee9ba6e72f42d19"
] | [
"setup.py"
] | [
"from setuptools import setup, find_packages\nfrom distutils.extension import Extension\n\nimport numpy as np\nimport cython_gsl\nimport versioneer\n\n\ndef read_requirements():\n import os\n\n path = os.path.dirname(os.path.abspath(__file__))\n requirements_file = os.path.join(path, 'requirements.txt')\n try:\n with open(requirements_file, 'r') as req_fp:\n requires = req_fp.read().split()\n except IOError:\n return []\n else:\n return [require.split() for require in requires]\n\n\nsetup(name='plume',\n version=versioneer.get_version(),\n description='A hypopycnal sediment-carrying plume entering the ocean',\n author='Eric Hutton',\n author_email='[email protected]',\n url='http://csdms.colorado.edu',\n install_requires=read_requirements(),\n setup_requires=['setuptools', ],\n packages=find_packages(),\n include_dirs = [np.get_include(), cython_gsl.get_include()],\n entry_points={\n 'console_scripts': [\n 'plume=plume.cli:main',\n ],\n },\n ext_modules = [\n Extension('plume.ext.centerline',\n ['plume/ext/centerline.pyx'],\n extra_compile_args=['-O3'],\n libraries=cython_gsl.get_libraries(),\n library_dirs=[cython_gsl.get_library_dir()],\n include_dirs=[cython_gsl.get_cython_include_dir()])],\n cmdclass=versioneer.get_cmdclass(),\n)\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ru-Xiang/x-deeplearning | [
"781545783a4e2bbbda48fc64318fb2c6d8bbb3cc",
"781545783a4e2bbbda48fc64318fb2c6d8bbb3cc",
"781545783a4e2bbbda48fc64318fb2c6d8bbb3cc"
] | [
"xdl-algorithm-solution/Rocket/script/rnn.py",
"xdl/test/python/unit_test/test_constant.py",
"blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/case/node/pool_op_common.py"
] | [
"# Copyright (C) 2016-2018 Alibaba Group Holding Limited\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"RNN helpers for TensorFlow models.\n\n\n@@bidirectional_dynamic_rnn\n@@dynamic_rnn\n@@raw_rnn\n@@static_rnn\n@@static_state_saving_rnn\n@@static_bidirectional_rnn\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.util import nest\n\n\n# pylint: disable=protected-access\n_concat = rnn_cell_impl._concat\nassert_like_rnncell = rnn_cell_impl.assert_like_rnncell\n# pylint: enable=protected-access\n\n\ndef _transpose_batch_time(x):\n \"\"\"Transpose the batch and time dimensions of a Tensor.\n\n Retains as much of the static shape information as possible.\n\n Args:\n x: A tensor of rank 2 or higher.\n\n Returns:\n x transposed along the first two dimensions.\n\n Raises:\n ValueError: if `x` is rank 1 or lower.\n \"\"\"\n x_static_shape = x.get_shape()\n if x_static_shape.ndims is not None and x_static_shape.ndims < 2:\n raise ValueError(\n \"Expected input tensor %s to have rank at least 2, but saw shape: %s\" %\n (x, x_static_shape))\n x_rank = array_ops.rank(x)\n x_t = array_ops.transpose(\n x, array_ops.concat(\n ([1, 0], math_ops.range(2, x_rank)), axis=0))\n x_t.set_shape(\n tensor_shape.TensorShape([\n x_static_shape[1].value, x_static_shape[0].value\n ]).concatenate(x_static_shape[2:]))\n return x_t\n\n\ndef _best_effort_input_batch_size(flat_input):\n \"\"\"Get static input batch size if available, with fallback to the dynamic one.\n\n Args:\n flat_input: An iterable of time major input Tensors of shape [max_time,\n batch_size, ...]. All inputs should have compatible batch sizes.\n\n Returns:\n The batch size in Python integer if available, or a scalar Tensor otherwise.\n\n Raises:\n ValueError: if there is any input with an invalid shape.\n \"\"\"\n for input_ in flat_input:\n shape = input_.shape\n if shape.ndims is None:\n continue\n if shape.ndims < 2:\n raise ValueError(\n \"Expected input tensor %s to have rank at least 2\" % input_)\n batch_size = shape[1].value\n if batch_size is not None:\n return batch_size\n # Fallback to the dynamic batch size of the first input.\n return array_ops.shape(flat_input[0])[1]\n\n\ndef _infer_state_dtype(explicit_dtype, state):\n \"\"\"Infer the dtype of an RNN state.\n\n Args:\n explicit_dtype: explicitly declared dtype or None.\n state: RNN's hidden state. Must be a Tensor or a nested iterable containing\n Tensors.\n\n Returns:\n dtype: inferred dtype of hidden state.\n\n Raises:\n ValueError: if `state` has heterogeneous dtypes or is empty.\n \"\"\"\n if explicit_dtype is not None:\n return explicit_dtype\n elif nest.is_sequence(state):\n inferred_dtypes = [element.dtype for element in nest.flatten(state)]\n if not inferred_dtypes:\n raise ValueError(\"Unable to infer dtype from empty state.\")\n all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])\n if not all_same:\n raise ValueError(\n \"State has tensors of different inferred_dtypes. Unable to infer a \"\n \"single representative dtype.\")\n return inferred_dtypes[0]\n else:\n return state.dtype\n\n\n# pylint: disable=unused-argument\ndef _rnn_step(\n time, sequence_length, min_sequence_length, max_sequence_length,\n zero_output, state, call_cell, state_size, skip_conditionals=False):\n \"\"\"Calculate one step of a dynamic RNN minibatch.\n\n Returns an (output, state) pair conditioned on the sequence_lengths.\n When skip_conditionals=False, the pseudocode is something like:\n\n if t >= max_sequence_length:\n return (zero_output, state)\n if t < min_sequence_length:\n return call_cell()\n\n # Selectively output zeros or output, old state or new state depending\n # on if we've finished calculating each row.\n new_output, new_state = call_cell()\n final_output = np.vstack([\n zero_output if time >= sequence_lengths[r] else new_output_r\n for r, new_output_r in enumerate(new_output)\n ])\n final_state = np.vstack([\n state[r] if time >= sequence_lengths[r] else new_state_r\n for r, new_state_r in enumerate(new_state)\n ])\n return (final_output, final_state)\n\n Args:\n time: Python int, the current time step\n sequence_length: int32 `Tensor` vector of size [batch_size]\n min_sequence_length: int32 `Tensor` scalar, min of sequence_length\n max_sequence_length: int32 `Tensor` scalar, max of sequence_length\n zero_output: `Tensor` vector of shape [output_size]\n state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,\n or a list/tuple of such tensors.\n call_cell: lambda returning tuple of (new_output, new_state) where\n new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.\n new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.\n state_size: The `cell.state_size` associated with the state.\n skip_conditionals: Python bool, whether to skip using the conditional\n calculations. This is useful for `dynamic_rnn`, where the input tensor\n matches `max_sequence_length`, and using conditionals just slows\n everything down.\n\n Returns:\n A tuple of (`final_output`, `final_state`) as given by the pseudocode above:\n final_output is a `Tensor` matrix of shape [batch_size, output_size]\n final_state is either a single `Tensor` matrix, or a tuple of such\n matrices (matching length and shapes of input `state`).\n\n Raises:\n ValueError: If the cell returns a state tuple whose length does not match\n that returned by `state_size`.\n \"\"\"\n\n # Convert state to a list for ease of use\n flat_state = nest.flatten(state)\n flat_zero_output = nest.flatten(zero_output)\n\n def _copy_one_through(output, new_output):\n # If the state contains a scalar value we simply pass it through.\n if output.shape.ndims == 0:\n return new_output\n copy_cond = (time >= sequence_length)\n with ops.colocate_with(new_output):\n return array_ops.where(copy_cond, output, new_output)\n\n def _copy_some_through(flat_new_output, flat_new_state):\n # Use broadcasting select to determine which values should get\n # the previous state & zero output, and which values should get\n # a calculated state & output.\n flat_new_output = [\n _copy_one_through(zero_output, new_output)\n for zero_output, new_output in zip(flat_zero_output, flat_new_output)]\n flat_new_state = [\n _copy_one_through(state, new_state)\n for state, new_state in zip(flat_state, flat_new_state)]\n return flat_new_output + flat_new_state\n\n def _maybe_copy_some_through():\n \"\"\"Run RNN step. Pass through either no or some past state.\"\"\"\n new_output, new_state = call_cell()\n\n nest.assert_same_structure(state, new_state)\n\n flat_new_state = nest.flatten(new_state)\n flat_new_output = nest.flatten(new_output)\n return control_flow_ops.cond(\n # if t < min_seq_len: calculate and return everything\n time < min_sequence_length, lambda: flat_new_output + flat_new_state,\n # else copy some of it through\n lambda: _copy_some_through(flat_new_output, flat_new_state))\n\n # TODO(ebrevdo): skipping these conditionals may cause a slowdown,\n # but benefits from removing cond() and its gradient. We should\n # profile with and without this switch here.\n if skip_conditionals:\n # Instead of using conditionals, perform the selective copy at all time\n # steps. This is faster when max_seq_len is equal to the number of unrolls\n # (which is typical for dynamic_rnn).\n new_output, new_state = call_cell()\n nest.assert_same_structure(state, new_state)\n new_state = nest.flatten(new_state)\n new_output = nest.flatten(new_output)\n final_output_and_state = _copy_some_through(new_output, new_state)\n else:\n empty_update = lambda: flat_zero_output + flat_state\n final_output_and_state = control_flow_ops.cond(\n # if t >= max_seq_len: copy all state through, output zeros\n time >= max_sequence_length, empty_update,\n # otherwise calculation is required: copy some or all of it through\n _maybe_copy_some_through)\n\n if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):\n raise ValueError(\"Internal error: state and output were not concatenated \"\n \"correctly.\")\n final_output = final_output_and_state[:len(flat_zero_output)]\n final_state = final_output_and_state[len(flat_zero_output):]\n\n for output, flat_output in zip(final_output, flat_zero_output):\n output.set_shape(flat_output.get_shape())\n for substate, flat_substate in zip(final_state, flat_state):\n substate.set_shape(flat_substate.get_shape())\n\n final_output = nest.pack_sequence_as(\n structure=zero_output, flat_sequence=final_output)\n final_state = nest.pack_sequence_as(\n structure=state, flat_sequence=final_state)\n\n return final_output, final_state\n\n\ndef _reverse_seq(input_seq, lengths):\n \"\"\"Reverse a list of Tensors up to specified lengths.\n\n Args:\n input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)\n or nested tuples of tensors.\n lengths: A `Tensor` of dimension batch_size, containing lengths for each\n sequence in the batch. If \"None\" is specified, simply reverses\n the list.\n\n Returns:\n time-reversed sequence\n \"\"\"\n if lengths is None:\n return list(reversed(input_seq))\n\n flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)\n\n flat_results = [[] for _ in range(len(input_seq))]\n for sequence in zip(*flat_input_seq):\n input_shape = tensor_shape.unknown_shape(\n ndims=sequence[0].get_shape().ndims)\n for input_ in sequence:\n input_shape.merge_with(input_.get_shape())\n input_.set_shape(input_shape)\n\n # Join into (time, batch_size, depth)\n s_joined = array_ops.stack(sequence)\n\n # Reverse along dimension 0\n s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)\n # Split again into list\n result = array_ops.unstack(s_reversed)\n for r, flat_result in zip(result, flat_results):\n r.set_shape(input_shape)\n flat_result.append(r)\n\n results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)\n for input_, flat_result in zip(input_seq, flat_results)]\n return results\n\n\ndef bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,\n initial_state_fw=None, initial_state_bw=None,\n dtype=None, parallel_iterations=None,\n swap_memory=False, time_major=False, scope=None):\n \"\"\"Creates a dynamic version of bidirectional recurrent neural network.\n\n Takes input and builds independent forward and backward RNNs. The input_size\n of forward and backward cell must match. The initial state for both directions\n is zero by default (but can be set optionally) and no intermediate states are\n ever returned -- the network is fully unrolled for the given (passed in)\n length(s) of the sequence(s) or completely unrolled if length(s) is not\n given.\n\n Args:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: The RNN inputs.\n If time_major == False (default), this must be a tensor of shape:\n `[batch_size, max_time, ...]`, or a nested tuple of such elements.\n If time_major == True, this must be a tensor of shape:\n `[max_time, batch_size, ...]`, or a nested tuple of such elements.\n sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,\n containing the actual lengths for each of the sequences in the batch.\n If not provided, all batch entries are assumed to be full sequences; and\n time reversal is applied from time `0` to `max_time` for each sequence.\n initial_state_fw: (optional) An initial state for the forward RNN.\n This must be a tensor of appropriate type and shape\n `[batch_size, cell_fw.state_size]`.\n If `cell_fw.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.\n initial_state_bw: (optional) Same as for `initial_state_fw`, but using\n the corresponding properties of `cell_bw`.\n dtype: (optional) The data type for the initial states and expected output.\n Required if initial_states are not provided or RNN states have a\n heterogeneous dtype.\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n time_major: The shape format of the `inputs` and `outputs` Tensors.\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\n Using `time_major = True` is a bit more efficient because it avoids\n transposes at the beginning and end of the RNN calculation. However,\n most TensorFlow data is batch-major, so by default this function\n accepts input and emits output in batch-major form.\n scope: VariableScope for the created subgraph; defaults to\n \"bidirectional_rnn\"\n\n Returns:\n A tuple (outputs, output_states) where:\n outputs: A tuple (output_fw, output_bw) containing the forward and\n the backward rnn output `Tensor`.\n If time_major == False (default),\n output_fw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_bw.output_size]`.\n If time_major == True,\n output_fw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_bw.output_size]`.\n It returns a tuple instead of a single concatenated `Tensor`, unlike\n in the `bidirectional_rnn`. If the concatenated one is preferred,\n the forward and backward outputs can be concatenated as\n `tf.concat(outputs, 2)`.\n output_states: A tuple (output_state_fw, output_state_bw) containing\n the forward and the backward final states of bidirectional rnn.\n\n Raises:\n TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.\n \"\"\"\n\n \n assert_like_rnncell(\"cell_fw\", cell_fw)\n assert_like_rnncell(\"cell_bw\", cell_bw)\n with vs.variable_scope(scope or \"bidirectional_rnn\"):\n # Forward direction\n with vs.variable_scope(\"fw\") as fw_scope:\n output_fw, output_state_fw = dynamic_rnn(\n cell=cell_fw, inputs=inputs, sequence_length=sequence_length,\n initial_state=initial_state_fw, dtype=dtype,\n parallel_iterations=parallel_iterations, swap_memory=swap_memory,\n time_major=time_major, scope=fw_scope)\n\n # Backward direction\n if not time_major:\n time_dim = 1\n batch_dim = 0\n else:\n time_dim = 0\n batch_dim = 1\n\n def _reverse(input_, seq_lengths, seq_dim, batch_dim):\n if seq_lengths is not None:\n return array_ops.reverse_sequence(\n input=input_, seq_lengths=seq_lengths,\n seq_dim=seq_dim, batch_dim=batch_dim)\n else:\n return array_ops.reverse(input_, axis=[seq_dim])\n\n with vs.variable_scope(\"bw\") as bw_scope:\n inputs_reverse = _reverse(\n inputs, seq_lengths=sequence_length,\n seq_dim=time_dim, batch_dim=batch_dim)\n tmp, output_state_bw = dynamic_rnn(\n cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,\n initial_state=initial_state_bw, dtype=dtype,\n parallel_iterations=parallel_iterations, swap_memory=swap_memory,\n time_major=time_major, scope=bw_scope)\n\n output_bw = _reverse(\n tmp, seq_lengths=sequence_length,\n seq_dim=time_dim, batch_dim=batch_dim)\n\n outputs = (output_fw, output_bw)\n output_states = (output_state_fw, output_state_bw)\n\n return (outputs, output_states)\n\n\ndef dynamic_rnn(cell, inputs, att_scores=None, sequence_length=None, initial_state=None,\n dtype=None, parallel_iterations=None, swap_memory=False,\n time_major=False, scope=None):\n \"\"\"Creates a recurrent neural network specified by RNNCell `cell`.\n\n Performs fully dynamic unrolling of `inputs`.\n\n Example:\n\n ```python\n # create a BasicRNNCell\n rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)\n\n # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n\n # defining initial state\n initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)\n\n # 'state' is a tensor of shape [batch_size, cell_state_size]\n outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,\n initial_state=initial_state,\n dtype=tf.float32)\n ```\n\n ```python\n # create 2 LSTMCells\n rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]\n\n # create a RNN cell composed sequentially of a number of RNNCells\n multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)\n\n # 'outputs' is a tensor of shape [batch_size, max_time, 256]\n # 'state' is a N-tuple where N is the number of LSTMCells containing a\n # tf.contrib.rnn.LSTMStateTuple for each cell\n outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,\n inputs=data,\n dtype=tf.float32)\n ```\n\n\n Args:\n cell: An instance of RNNCell.\n inputs: The RNN inputs.\n If `time_major == False` (default), this must be a `Tensor` of shape:\n `[batch_size, max_time, ...]`, or a nested tuple of such\n elements.\n If `time_major == True`, this must be a `Tensor` of shape:\n `[max_time, batch_size, ...]`, or a nested tuple of such\n elements.\n This may also be a (possibly nested) tuple of Tensors satisfying\n this property. The first two dimensions must match across all the inputs,\n but otherwise the ranks and other shape components may differ.\n In this case, input to `cell` at each time-step will replicate the\n structure of these tuples, except for the time dimension (from which the\n time is taken).\n The input to `cell` at each time step will be a `Tensor` or (possibly\n nested) tuple of Tensors each with dimensions `[batch_size, ...]`.\n sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.\n Used to copy-through state and zero-out outputs when past a batch\n element's sequence length. So it's more for correctness than performance.\n initial_state: (optional) An initial state for the RNN.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n dtype: (optional) The data type for the initial state and expected output.\n Required if initial_state is not provided or RNN state has a heterogeneous\n dtype.\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n time_major: The shape format of the `inputs` and `outputs` Tensors.\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\n Using `time_major = True` is a bit more efficient because it avoids\n transposes at the beginning and end of the RNN calculation. However,\n most TensorFlow data is batch-major, so by default this function\n accepts input and emits output in batch-major form.\n scope: VariableScope for the created subgraph; defaults to \"rnn\".\n\n Returns:\n A pair (outputs, state) where:\n\n outputs: The RNN output `Tensor`.\n\n If time_major == False (default), this will be a `Tensor` shaped:\n `[batch_size, max_time, cell.output_size]`.\n\n If time_major == True, this will be a `Tensor` shaped:\n `[max_time, batch_size, cell.output_size]`.\n\n Note, if `cell.output_size` is a (possibly nested) tuple of integers\n or `TensorShape` objects, then `outputs` will be a tuple having the\n same structure as `cell.output_size`, containing Tensors having shapes\n corresponding to the shape data in `cell.output_size`.\n\n state: The final state. If `cell.state_size` is an int, this\n will be shaped `[batch_size, cell.state_size]`. If it is a\n `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.\n If it is a (possibly nested) tuple of ints or `TensorShape`, this will\n be a tuple having the corresponding shapes. If cells are `LSTMCells`\n `state` will be a tuple containing a `LSTMStateTuple` for each cell.\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n\n assert_like_rnncell(\"cell\", cell)\n\n # By default, time_major==False and inputs are batch-major: shaped\n # [batch, time, depth]\n # For internal calculations, we transpose to [time, batch, depth]\n flat_input = nest.flatten(inputs)\n\n if not time_major:\n # (B,T,D) => (T,B,D)\n flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]\n flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)\n\n parallel_iterations = parallel_iterations or 32\n if sequence_length is not None:\n sequence_length = math_ops.to_int32(sequence_length)\n if sequence_length.get_shape().ndims not in (None, 1):\n raise ValueError(\n \"sequence_length must be a vector of length batch_size, \"\n \"but saw shape: %s\" % sequence_length.get_shape())\n sequence_length = array_ops.identity( # Just to find it in the graph.\n sequence_length, name=\"sequence_length\")\n\n # Create a new scope in which the caching device is either\n # determined by the parent scope, or is set to place the cached\n # Variable using the same placement as for the rest of the RNN.\n with vs.variable_scope(scope or \"rnn\") as varscope:\n if varscope.caching_device is None:\n varscope.set_caching_device(lambda op: op.device)\n batch_size = _best_effort_input_batch_size(flat_input)\n\n if initial_state is not None:\n state = initial_state\n else:\n if not dtype:\n raise ValueError(\"If there is no initial_state, you must give a dtype.\")\n state = cell.zero_state(batch_size, dtype)\n\n def _assert_has_shape(x, shape):\n x_shape = array_ops.shape(x)\n packed_shape = array_ops.stack(shape)\n return control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),\n [\"Expected shape for Tensor %s is \" % x.name,\n packed_shape, \" but saw shape: \", x_shape])\n\n if sequence_length is not None:\n # Perform some shape validation\n with ops.control_dependencies(\n [_assert_has_shape(sequence_length, [batch_size])]):\n sequence_length = array_ops.identity(\n sequence_length, name=\"CheckSeqLen\")\n\n inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)\n\n (outputs, final_state) = _dynamic_rnn_loop(\n cell,\n inputs,\n state,\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory,\n att_scores = att_scores,\n sequence_length=sequence_length,\n dtype=dtype)\n\n # Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].\n # If we are performing batch-major calculations, transpose output back\n # to shape [batch, time, depth]\n if not time_major:\n # (T,B,D) => (B,T,D)\n outputs = nest.map_structure(_transpose_batch_time, outputs)\n\n return (outputs, final_state)\n\n\ndef _dynamic_rnn_loop(cell,\n inputs,\n initial_state,\n parallel_iterations,\n swap_memory,\n att_scores = None,\n sequence_length=None,\n dtype=None):\n \"\"\"Internal implementation of Dynamic RNN.\n\n Args:\n cell: An instance of RNNCell.\n inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested\n tuple of such elements.\n initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if\n `cell.state_size` is a tuple, then this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n parallel_iterations: Positive Python int.\n swap_memory: A Python boolean\n sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].\n dtype: (optional) Expected dtype of output. If not specified, inferred from\n initial_state.\n\n Returns:\n Tuple `(final_outputs, final_state)`.\n final_outputs:\n A `Tensor` of shape `[time, batch_size, cell.output_size]`. If\n `cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`\n objects, then this returns a (possibly nsted) tuple of Tensors matching\n the corresponding shapes.\n final_state:\n A `Tensor`, or possibly nested tuple of Tensors, matching in length\n and shapes to `initial_state`.\n\n Raises:\n ValueError: If the input depth cannot be inferred via shape inference\n from the inputs.\n \"\"\"\n state = initial_state\n assert isinstance(parallel_iterations, int), \"parallel_iterations must be int\"\n\n state_size = cell.state_size\n\n flat_input = nest.flatten(inputs)\n flat_output_size = nest.flatten(cell.output_size)\n\n # Construct an initial output\n input_shape = array_ops.shape(flat_input[0])\n time_steps = input_shape[0]\n batch_size = _best_effort_input_batch_size(flat_input)\n\n inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)\n for input_ in flat_input)\n\n const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]\n\n for shape in inputs_got_shape:\n if not shape[2:].is_fully_defined():\n raise ValueError(\n \"Input size (depth of inputs) must be accessible via shape inference,\"\n \" but saw value None.\")\n got_time_steps = shape[0].value\n got_batch_size = shape[1].value\n if const_time_steps != got_time_steps:\n raise ValueError(\n \"Time steps is not the same for all the elements in the input in a \"\n \"batch.\")\n if const_batch_size != got_batch_size:\n raise ValueError(\n \"Batch_size is not the same for all the elements in the input.\")\n\n # Prepare dynamic conditional copying of state & output\n def _create_zero_arrays(size):\n size = _concat(batch_size, size)\n return array_ops.zeros(\n array_ops.stack(size), _infer_state_dtype(dtype, state))\n\n flat_zero_output = tuple(_create_zero_arrays(output)\n for output in flat_output_size)\n zero_output = nest.pack_sequence_as(structure=cell.output_size,\n flat_sequence=flat_zero_output)\n\n if sequence_length is not None:\n min_sequence_length = math_ops.reduce_min(sequence_length)\n max_sequence_length = math_ops.reduce_max(sequence_length)\n\n time = array_ops.constant(0, dtype=dtypes.int32, name=\"time\")\n\n with ops.name_scope(\"dynamic_rnn\") as scope:\n base_name = scope\n\n def _create_ta(name, dtype):\n return tensor_array_ops.TensorArray(dtype=dtype,\n size=time_steps,\n tensor_array_name=base_name + name)\n\n output_ta = tuple(_create_ta(\"output_%d\" % i,\n _infer_state_dtype(dtype, state))\n for i in range(len(flat_output_size)))\n input_ta = tuple(_create_ta(\"input_%d\" % i, flat_input[i].dtype)\n for i in range(len(flat_input)))\n\n input_ta = tuple(ta.unstack(input_)\n for ta, input_ in zip(input_ta, flat_input))\n\n def _time_step(time, output_ta_t, state, att_scores=None):\n \"\"\"Take a time step of the dynamic RNN.\n\n Args:\n time: int32 scalar Tensor.\n output_ta_t: List of `TensorArray`s that represent the output.\n state: nested tuple of vector tensors that represent the state.\n\n Returns:\n The tuple (time + 1, output_ta_t with updated flow, new_state).\n \"\"\"\n\n input_t = tuple(ta.read(time) for ta in input_ta)\n # Restore some shape information\n for input_, shape in zip(input_t, inputs_got_shape):\n input_.set_shape(shape[1:])\n\n input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)\n if att_scores is not None:\n att_score = att_scores[:, time, :]\n call_cell = lambda: cell(input_t, state, att_score)\n else:\n call_cell = lambda: cell(input_t, state)\n\n if sequence_length is not None:\n (output, new_state) = _rnn_step(\n time=time,\n sequence_length=sequence_length,\n min_sequence_length=min_sequence_length,\n max_sequence_length=max_sequence_length,\n zero_output=zero_output,\n state=state,\n call_cell=call_cell,\n state_size=state_size,\n skip_conditionals=True)\n else:\n (output, new_state) = call_cell()\n\n # Pack state if using state tuples\n output = nest.flatten(output)\n\n output_ta_t = tuple(\n ta.write(time, out) for ta, out in zip(output_ta_t, output))\n if att_scores is not None:\n return (time + 1, output_ta_t, new_state, att_scores)\n else:\n return (time + 1, output_ta_t, new_state)\n\n if att_scores is not None: \n _, output_final_ta, final_state, _ = control_flow_ops.while_loop(\n cond=lambda time, *_: time < time_steps,\n body=_time_step,\n loop_vars=(time, output_ta, state, att_scores),\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n else:\n _, output_final_ta, final_state = control_flow_ops.while_loop(\n cond=lambda time, *_: time < time_steps,\n body=_time_step,\n loop_vars=(time, output_ta, state),\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n\n # Unpack final output if not using output tuples.\n final_outputs = tuple(ta.stack() for ta in output_final_ta)\n\n # Restore some shape information\n for output, output_size in zip(final_outputs, flat_output_size):\n shape = _concat(\n [const_time_steps, const_batch_size], output_size, static=True)\n output.set_shape(shape)\n\n final_outputs = nest.pack_sequence_as(\n structure=cell.output_size, flat_sequence=final_outputs)\n\n return (final_outputs, final_state)\n\n\ndef raw_rnn(cell, loop_fn,\n parallel_iterations=None, swap_memory=False, scope=None):\n \"\"\"Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.\n\n **NOTE: This method is still in testing, and the API may change.**\n\n This function is a more primitive version of `dynamic_rnn` that provides\n more direct access to the inputs each iteration. It also provides more\n control over when to start and finish reading the sequence, and\n what to emit for the output.\n\n For example, it can be used to implement the dynamic decoder of a seq2seq\n model.\n\n Instead of working with `Tensor` objects, most operations work with\n `TensorArray` objects directly.\n\n The operation of `raw_rnn`, in pseudo-code, is basically the following:\n\n ```python\n time = tf.constant(0, dtype=tf.int32)\n (finished, next_input, initial_state, _, loop_state) = loop_fn(\n time=time, cell_output=None, cell_state=None, loop_state=None)\n emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)\n state = initial_state\n while not all(finished):\n (output, cell_state) = cell(next_input, state)\n (next_finished, next_input, next_state, emit, loop_state) = loop_fn(\n time=time + 1, cell_output=output, cell_state=cell_state,\n loop_state=loop_state)\n # Emit zeros and copy forward state for minibatch entries that are finished.\n state = tf.where(finished, state, next_state)\n emit = tf.where(finished, tf.zeros_like(emit), emit)\n emit_ta = emit_ta.write(time, emit)\n # If any new minibatch entries are marked as finished, mark these.\n finished = tf.logical_or(finished, next_finished)\n time += 1\n return (emit_ta, state, loop_state)\n ```\n\n with the additional properties that output and state may be (possibly nested)\n tuples, as determined by `cell.output_size` and `cell.state_size`, and\n as a result the final `state` and `emit_ta` may themselves be tuples.\n\n A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:\n\n ```python\n inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),\n dtype=tf.float32)\n sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = tf.contrib.rnn.LSTMCell(num_units)\n\n def loop_fn(time, cell_output, cell_state, loop_state):\n emit_output = cell_output # == None for time == 0\n if cell_output is None: # time == 0\n next_cell_state = cell.zero_state(batch_size, tf.float32)\n else:\n next_cell_state = cell_state\n elements_finished = (time >= sequence_length)\n finished = tf.reduce_all(elements_finished)\n next_input = tf.cond(\n finished,\n lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),\n lambda: inputs_ta.read(time))\n next_loop_state = None\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)\n\n outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)\n outputs = outputs_ta.stack()\n ```\n\n Args:\n cell: An instance of RNNCell.\n loop_fn: A callable that takes inputs\n `(time, cell_output, cell_state, loop_state)`\n and returns the tuple\n `(finished, next_input, next_cell_state, emit_output, next_loop_state)`.\n Here `time` is an int32 scalar `Tensor`, `cell_output` is a\n `Tensor` or (possibly nested) tuple of tensors as determined by\n `cell.output_size`, and `cell_state` is a `Tensor`\n or (possibly nested) tuple of tensors, as determined by the `loop_fn`\n on its first call (and should match `cell.state_size`).\n The outputs are: `finished`, a boolean `Tensor` of\n shape `[batch_size]`, `next_input`: the next input to feed to `cell`,\n `next_cell_state`: the next state to feed to `cell`,\n and `emit_output`: the output to store for this iteration.\n\n Note that `emit_output` should be a `Tensor` or (possibly nested)\n tuple of tensors with shapes and structure matching `cell.output_size`\n and `cell_output` above. The parameter `cell_state` and output\n `next_cell_state` may be either a single or (possibly nested) tuple\n of tensors. The parameter `loop_state` and\n output `next_loop_state` may be either a single or (possibly nested) tuple\n of `Tensor` and `TensorArray` objects. This last parameter\n may be ignored by `loop_fn` and the return value may be `None`. If it\n is not `None`, then the `loop_state` will be propagated through the RNN\n loop, for use purely by `loop_fn` to keep track of its own state.\n The `next_loop_state` parameter returned may be `None`.\n\n The first call to `loop_fn` will be `time = 0`, `cell_output = None`,\n `cell_state = None`, and `loop_state = None`. For this call:\n The `next_cell_state` value should be the value with which to initialize\n the cell's state. It may be a final state from a previous RNN or it\n may be the output of `cell.zero_state()`. It should be a\n (possibly nested) tuple structure of tensors.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of\n appropriate type and shape `[batch_size] + cell.state_size`.\n If `cell.state_size` is a (possibly nested) tuple of ints or\n `TensorShape`, this will be a tuple having the corresponding shapes.\n The `emit_output` value may be either `None` or a (possibly nested)\n tuple structure of tensors, e.g.,\n `(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.\n If this first `emit_output` return value is `None`,\n then the `emit_ta` result of `raw_rnn` will have the same structure and\n dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same\n structure, shapes (prepended with a `batch_size` dimension), and dtypes\n as `emit_output`. The actual values returned for `emit_output` at this\n initializing call are ignored. Note, this emit structure must be\n consistent across all time steps.\n\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n scope: VariableScope for the created subgraph; defaults to \"rnn\".\n\n Returns:\n A tuple `(emit_ta, final_state, final_loop_state)` where:\n\n `emit_ta`: The RNN output `TensorArray`.\n If `loop_fn` returns a (possibly nested) set of Tensors for\n `emit_output` during initialization, (inputs `time = 0`,\n `cell_output = None`, and `loop_state = None`), then `emit_ta` will\n have the same structure, dtypes, and shapes as `emit_output` instead.\n If `loop_fn` returns `emit_output = None` during this call,\n the structure of `cell.output_size` is used:\n If `cell.output_size` is a (possibly nested) tuple of integers\n or `TensorShape` objects, then `emit_ta` will be a tuple having the\n same structure as `cell.output_size`, containing TensorArrays whose\n elements' shapes correspond to the shape data in `cell.output_size`.\n\n `final_state`: The final cell state. If `cell.state_size` is an int, this\n will be shaped `[batch_size, cell.state_size]`. If it is a\n `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.\n If it is a (possibly nested) tuple of ints or `TensorShape`, this will\n be a tuple having the corresponding shapes.\n\n `final_loop_state`: The final loop state as returned by `loop_fn`.\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not\n a `callable`.\n \"\"\"\n\n assert_like_rnncell(\"cell\", cell)\n if not callable(loop_fn):\n raise TypeError(\"loop_fn must be a callable\")\n\n parallel_iterations = parallel_iterations or 32\n\n # Create a new scope in which the caching device is either\n # determined by the parent scope, or is set to place the cached\n # Variable using the same placement as for the rest of the RNN.\n with vs.variable_scope(scope or \"rnn\") as varscope:\n if varscope.caching_device is None:\n varscope.set_caching_device(lambda op: op.device)\n\n time = constant_op.constant(0, dtype=dtypes.int32)\n (elements_finished, next_input, initial_state, emit_structure,\n init_loop_state) = loop_fn(\n time, None, None, None) # time, cell_output, cell_state, loop_state\n flat_input = nest.flatten(next_input)\n\n # Need a surrogate loop state for the while_loop if none is available.\n loop_state = (init_loop_state if init_loop_state is not None\n else constant_op.constant(0, dtype=dtypes.int32))\n\n input_shape = [input_.get_shape() for input_ in flat_input]\n static_batch_size = input_shape[0][0]\n\n for input_shape_i in input_shape:\n # Static verification that batch sizes all match\n static_batch_size.merge_with(input_shape_i[0])\n\n batch_size = static_batch_size.value\n if batch_size is None:\n batch_size = array_ops.shape(flat_input[0])[0]\n\n nest.assert_same_structure(initial_state, cell.state_size)\n state = initial_state\n flat_state = nest.flatten(state)\n flat_state = [ops.convert_to_tensor(s) for s in flat_state]\n state = nest.pack_sequence_as(structure=state,\n flat_sequence=flat_state)\n\n if emit_structure is not None:\n flat_emit_structure = nest.flatten(emit_structure)\n flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else\n array_ops.shape(emit) for emit in flat_emit_structure]\n flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]\n else:\n emit_structure = cell.output_size\n flat_emit_size = nest.flatten(emit_structure)\n flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)\n\n flat_emit_ta = [\n tensor_array_ops.TensorArray(\n dtype=dtype_i, dynamic_size=True, size=0, name=\"rnn_output_%d\" % i)\n for i, dtype_i in enumerate(flat_emit_dtypes)]\n emit_ta = nest.pack_sequence_as(structure=emit_structure,\n flat_sequence=flat_emit_ta)\n flat_zero_emit = [\n array_ops.zeros(_concat(batch_size, size_i), dtype_i)\n for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]\n zero_emit = nest.pack_sequence_as(structure=emit_structure,\n flat_sequence=flat_zero_emit)\n\n def condition(unused_time, elements_finished, *_):\n return math_ops.logical_not(math_ops.reduce_all(elements_finished))\n\n def body(time, elements_finished, current_input,\n emit_ta, state, loop_state):\n \"\"\"Internal while loop body for raw_rnn.\n\n Args:\n time: time scalar.\n elements_finished: batch-size vector.\n current_input: possibly nested tuple of input tensors.\n emit_ta: possibly nested tuple of output TensorArrays.\n state: possibly nested tuple of state tensors.\n loop_state: possibly nested tuple of loop state tensors.\n\n Returns:\n Tuple having the same size as Args but with updated values.\n \"\"\"\n (next_output, cell_state) = cell(current_input, state)\n\n nest.assert_same_structure(state, cell_state)\n nest.assert_same_structure(cell.output_size, next_output)\n\n next_time = time + 1\n (next_finished, next_input, next_state, emit_output,\n next_loop_state) = loop_fn(\n next_time, next_output, cell_state, loop_state)\n\n nest.assert_same_structure(state, next_state)\n nest.assert_same_structure(current_input, next_input)\n nest.assert_same_structure(emit_ta, emit_output)\n\n # If loop_fn returns None for next_loop_state, just reuse the\n # previous one.\n loop_state = loop_state if next_loop_state is None else next_loop_state\n\n def _copy_some_through(current, candidate):\n \"\"\"Copy some tensors through via array_ops.where.\"\"\"\n def copy_fn(cur_i, cand_i):\n with ops.colocate_with(cand_i):\n return array_ops.where(elements_finished, cur_i, cand_i)\n return nest.map_structure(copy_fn, current, candidate)\n\n emit_output = _copy_some_through(zero_emit, emit_output)\n next_state = _copy_some_through(state, next_state)\n\n emit_ta = nest.map_structure(\n lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)\n\n elements_finished = math_ops.logical_or(elements_finished, next_finished)\n\n return (next_time, elements_finished, next_input,\n emit_ta, next_state, loop_state)\n\n returned = control_flow_ops.while_loop(\n condition, body, loop_vars=[\n time, elements_finished, next_input,\n emit_ta, state, loop_state],\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n\n (emit_ta, final_state, final_loop_state) = returned[-3:]\n\n if init_loop_state is None:\n final_loop_state = None\n\n return (emit_ta, final_state, final_loop_state)\n\n\ndef static_rnn(cell,\n inputs,\n initial_state=None,\n dtype=None,\n sequence_length=None,\n scope=None):\n \"\"\"Creates a recurrent neural network specified by RNNCell `cell`.\n\n The simplest form of RNN network generated is:\n\n ```python\n state = cell.zero_state(...)\n outputs = []\n for input_ in inputs:\n output, state = cell(input_, state)\n outputs.append(output)\n return (outputs, state)\n ```\n However, a few other options are available:\n\n An initial state can be provided.\n If the sequence_length vector is provided, dynamic calculation is performed.\n This method of calculation does not compute the RNN steps past the maximum\n sequence length of the minibatch (thus saving computational time),\n and properly propagates the state at an example's sequence length\n to the final state output.\n\n The dynamic calculation performed is, at time `t` for batch row `b`,\n\n ```python\n (output, state)(b, t) =\n (t >= sequence_length(b))\n ? (zeros(cell.output_size), states(b, sequence_length(b) - 1))\n : cell(input(b, t), state(b, t - 1))\n ```\n\n Args:\n cell: An instance of RNNCell.\n inputs: A length T list of inputs, each a `Tensor` of shape\n `[batch_size, input_size]`, or a nested tuple of such elements.\n initial_state: (optional) An initial state for the RNN.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n dtype: (optional) The data type for the initial state and expected output.\n Required if initial_state is not provided or RNN state has a heterogeneous\n dtype.\n sequence_length: Specifies the length of each sequence in inputs.\n An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.\n scope: VariableScope for the created subgraph; defaults to \"rnn\".\n\n Returns:\n A pair (outputs, state) where:\n\n - outputs is a length T list of outputs (one for each input), or a nested\n tuple of such elements.\n - state is the final state\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell.\n ValueError: If `inputs` is `None` or an empty list, or if the input depth\n (column size) cannot be inferred from inputs via shape inference.\n \"\"\"\n\n assert_like_rnncell(\"cell\", cell)\n if not nest.is_sequence(inputs):\n raise TypeError(\"inputs must be a sequence\")\n if not inputs:\n raise ValueError(\"inputs must not be empty\")\n\n outputs = []\n # Create a new scope in which the caching device is either\n # determined by the parent scope, or is set to place the cached\n # Variable using the same placement as for the rest of the RNN.\n with vs.variable_scope(scope or \"rnn\") as varscope:\n if varscope.caching_device is None:\n varscope.set_caching_device(lambda op: op.device)\n\n # Obtain the first sequence of the input\n first_input = inputs\n while nest.is_sequence(first_input):\n first_input = first_input[0]\n\n # Temporarily avoid EmbeddingWrapper and seq2seq badness\n # TODO(lukaszkaiser): remove EmbeddingWrapper\n if first_input.get_shape().ndims != 1:\n\n input_shape = first_input.get_shape().with_rank_at_least(2)\n fixed_batch_size = input_shape[0]\n\n flat_inputs = nest.flatten(inputs)\n for flat_input in flat_inputs:\n input_shape = flat_input.get_shape().with_rank_at_least(2)\n batch_size, input_size = input_shape[0], input_shape[1:]\n fixed_batch_size.merge_with(batch_size)\n for i, size in enumerate(input_size):\n if size.value is None:\n raise ValueError(\n \"Input size (dimension %d of inputs) must be accessible via \"\n \"shape inference, but saw value None.\" % i)\n else:\n fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]\n\n if fixed_batch_size.value:\n batch_size = fixed_batch_size.value\n else:\n batch_size = array_ops.shape(first_input)[0]\n if initial_state is not None:\n state = initial_state\n else:\n if not dtype:\n raise ValueError(\"If no initial_state is provided, \"\n \"dtype must be specified\")\n state = cell.zero_state(batch_size, dtype)\n\n if sequence_length is not None: # Prepare variables\n sequence_length = ops.convert_to_tensor(\n sequence_length, name=\"sequence_length\")\n if sequence_length.get_shape().ndims not in (None, 1):\n raise ValueError(\n \"sequence_length must be a vector of length batch_size\")\n\n def _create_zero_output(output_size):\n # convert int to TensorShape if necessary\n size = _concat(batch_size, output_size)\n output = array_ops.zeros(\n array_ops.stack(size), _infer_state_dtype(dtype, state))\n shape = _concat(fixed_batch_size.value, output_size, static=True)\n output.set_shape(tensor_shape.TensorShape(shape))\n return output\n\n output_size = cell.output_size\n flat_output_size = nest.flatten(output_size)\n flat_zero_output = tuple(\n _create_zero_output(size) for size in flat_output_size)\n zero_output = nest.pack_sequence_as(\n structure=output_size, flat_sequence=flat_zero_output)\n\n sequence_length = math_ops.to_int32(sequence_length)\n min_sequence_length = math_ops.reduce_min(sequence_length)\n max_sequence_length = math_ops.reduce_max(sequence_length)\n\n for time, input_ in enumerate(inputs):\n if time > 0:\n varscope.reuse_variables()\n # pylint: disable=cell-var-from-loop\n call_cell = lambda: cell(input_, state)\n # pylint: enable=cell-var-from-loop\n if sequence_length is not None:\n (output, state) = _rnn_step(\n time=time,\n sequence_length=sequence_length,\n min_sequence_length=min_sequence_length,\n max_sequence_length=max_sequence_length,\n zero_output=zero_output,\n state=state,\n call_cell=call_cell,\n state_size=cell.state_size)\n else:\n (output, state) = call_cell()\n\n outputs.append(output)\n\n return (outputs, state)\n\n\ndef static_state_saving_rnn(cell,\n inputs,\n state_saver,\n state_name,\n sequence_length=None,\n scope=None):\n \"\"\"RNN that accepts a state saver for time-truncated RNN calculation.\n\n Args:\n cell: An instance of `RNNCell`.\n inputs: A length T list of inputs, each a `Tensor` of shape\n `[batch_size, input_size]`.\n state_saver: A state saver object with methods `state` and `save_state`.\n state_name: Python string or tuple of strings. The name to use with the\n state_saver. If the cell returns tuples of states (i.e.,\n `cell.state_size` is a tuple) then `state_name` should be a tuple of\n strings having the same length as `cell.state_size`. Otherwise it should\n be a single string.\n sequence_length: (optional) An int32/int64 vector size [batch_size].\n See the documentation for rnn() for more details about sequence_length.\n scope: VariableScope for the created subgraph; defaults to \"rnn\".\n\n Returns:\n A pair (outputs, state) where:\n outputs is a length T list of outputs (one for each input)\n states is the final state\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell.\n ValueError: If `inputs` is `None` or an empty list, or if the arity and\n type of `state_name` does not match that of `cell.state_size`.\n \"\"\"\n state_size = cell.state_size\n state_is_tuple = nest.is_sequence(state_size)\n state_name_tuple = nest.is_sequence(state_name)\n\n if state_is_tuple != state_name_tuple:\n raise ValueError(\"state_name should be the same type as cell.state_size. \"\n \"state_name: %s, cell.state_size: %s\" % (str(state_name),\n str(state_size)))\n\n if state_is_tuple:\n state_name_flat = nest.flatten(state_name)\n state_size_flat = nest.flatten(state_size)\n\n if len(state_name_flat) != len(state_size_flat):\n raise ValueError(\"#elems(state_name) != #elems(state_size): %d vs. %d\" %\n (len(state_name_flat), len(state_size_flat)))\n\n initial_state = nest.pack_sequence_as(\n structure=state_size,\n flat_sequence=[state_saver.state(s) for s in state_name_flat])\n else:\n initial_state = state_saver.state(state_name)\n\n (outputs, state) = static_rnn(\n cell,\n inputs,\n initial_state=initial_state,\n sequence_length=sequence_length,\n scope=scope)\n\n if state_is_tuple:\n flat_state = nest.flatten(state)\n state_name = nest.flatten(state_name)\n save_state = [\n state_saver.save_state(name, substate)\n for name, substate in zip(state_name, flat_state)\n ]\n else:\n save_state = [state_saver.save_state(state_name, state)]\n\n with ops.control_dependencies(save_state):\n last_output = outputs[-1]\n flat_last_output = nest.flatten(last_output)\n flat_last_output = [\n array_ops.identity(output) for output in flat_last_output\n ]\n outputs[-1] = nest.pack_sequence_as(\n structure=last_output, flat_sequence=flat_last_output)\n\n return (outputs, state)\n\n\ndef static_bidirectional_rnn(cell_fw,\n cell_bw,\n inputs,\n initial_state_fw=None,\n initial_state_bw=None,\n dtype=None,\n sequence_length=None,\n scope=None):\n \"\"\"Creates a bidirectional recurrent neural network.\n\n Similar to the unidirectional case above (rnn) but takes input and builds\n independent forward and backward RNNs with the final forward and backward\n outputs depth-concatenated, such that the output will have the format\n [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of\n forward and backward cell must match. The initial state for both directions\n is zero by default (but can be set optionally) and no intermediate states are\n ever returned -- the network is fully unrolled for the given (passed in)\n length(s) of the sequence(s) or completely unrolled if length(s) is not given.\n\n Args:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, input_size], or a nested tuple of such elements.\n initial_state_fw: (optional) An initial state for the forward RNN.\n This must be a tensor of appropriate type and shape\n `[batch_size, cell_fw.state_size]`.\n If `cell_fw.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.\n initial_state_bw: (optional) Same as for `initial_state_fw`, but using\n the corresponding properties of `cell_bw`.\n dtype: (optional) The data type for the initial state. Required if\n either of the initial states are not provided.\n sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,\n containing the actual lengths for each of the sequences.\n scope: VariableScope for the created subgraph; defaults to\n \"bidirectional_rnn\"\n\n Returns:\n A tuple (outputs, output_state_fw, output_state_bw) where:\n outputs is a length `T` list of outputs (one for each input), which\n are depth-concatenated forward and backward outputs.\n output_state_fw is the final state of the forward rnn.\n output_state_bw is the final state of the backward rnn.\n\n Raises:\n TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n\n if not _like_rnncell(cell_fw):\n raise TypeError(\"cell_fw must be an instance of RNNCell\")\n if not _like_rnncell(cell_bw):\n raise TypeError(\"cell_bw must be an instance of RNNCell\")\n if not nest.is_sequence(inputs):\n raise TypeError(\"inputs must be a sequence\")\n if not inputs:\n raise ValueError(\"inputs must not be empty\")\n\n with vs.variable_scope(scope or \"bidirectional_rnn\"):\n # Forward direction\n with vs.variable_scope(\"fw\") as fw_scope:\n output_fw, output_state_fw = static_rnn(\n cell_fw,\n inputs,\n initial_state_fw,\n dtype,\n sequence_length,\n scope=fw_scope)\n\n # Backward direction\n with vs.variable_scope(\"bw\") as bw_scope:\n reversed_inputs = _reverse_seq(inputs, sequence_length)\n tmp, output_state_bw = static_rnn(\n cell_bw,\n reversed_inputs,\n initial_state_bw,\n dtype,\n sequence_length,\n scope=bw_scope)\n\n output_bw = _reverse_seq(tmp, sequence_length)\n # Concat each of the forward/backward outputs\n flat_output_fw = nest.flatten(output_fw)\n flat_output_bw = nest.flatten(output_bw)\n\n flat_outputs = tuple(\n array_ops.concat([fw, bw], 1)\n for fw, bw in zip(flat_output_fw, flat_output_bw))\n\n outputs = nest.pack_sequence_as(\n structure=output_fw, flat_sequence=flat_outputs)\n\n return (outputs, output_state_fw, output_state_bw)\n",
"# Copyright 2018 Alibaba Group. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport xdl\nimport unittest\nimport numpy as np\n\nclass TestConstant(unittest.TestCase):\n def test_constant(self):\n a = xdl.convert_to_tensor(1)\n b = xdl.convert_to_tensor([10, 20])\n c = xdl.convert_to_tensor(np.array([30, 40]))\n a, b, c = xdl.execute([a, b, c])\n self.assertTrue(a == 1)\n self.assertTrue((b == np.array([10, 20])).all())\n self.assertTrue((c == np.array([30, 40])).all())\n\n def test_constant_gpu(self):\n with xdl.device(\"GPU\"):\n a = xdl.convert_to_tensor(1)\n b = xdl.convert_to_tensor([10, 20])\n c = xdl.convert_to_tensor(np.array([30, 40]))\n a, b, c = xdl.execute([a, b, c])\n self.assertTrue(a == 1)\n self.assertTrue((b == np.array([10, 20])).all())\n self.assertTrue((c == np.array([30, 40])).all())\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(TestConstant)\n\nif __name__ == '__main__':\n unittest.TextTestRunner().run(suite())\n",
"import numpy as np # type: ignore\nimport itertools\nfrom typing import Text, Sequence\n\n\ndef get_pad_shape(auto_pad, # type: Text\n input_spatial_shape, # type: np.ndarray\n kernel_spatial_shape, # type: np.ndarray\n strides_spatial, # type: Sequence[int]\n output_spatial_shape # type: Sequence[int]\n ): # type: (...) -> Sequence[int]\n pad_shape = [0] * len(input_spatial_shape)\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n for i in range(len(input_spatial_shape)):\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial[i] + kernel_spatial_shape[i] - \\\n input_spatial_shape[i]\n elif auto_pad == 'VALID':\n pass\n return pad_shape\n\n\ndef get_output_shape(auto_pad, # type: Text\n input_spatial_shape, # type: np.ndarray\n kernel_spatial_shape, # type: np.ndarray\n strides_spatial # type: Sequence[int]\n ): # type: (...) -> Sequence[int]\n out_shape = [0] * len(input_spatial_shape)\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n for i in range(len(input_spatial_shape)):\n out_shape[i] = int(np.ceil(float(input_spatial_shape[i]) / float(strides_spatial[i])))\n elif auto_pad == 'VALID':\n for i in range(len(input_spatial_shape)):\n out_shape[i] = int(\n np.ceil(float(input_spatial_shape[i] - (kernel_spatial_shape[i] - 1)) / float(strides_spatial[i])))\n return out_shape\n\n\ndef pool(padded, # type: np.ndarray\n x_shape, # type: np.ndarray\n kernel_shape, # type: Sequence[int]\n strides_shape, # type: Sequence[int]\n out_shape, # type: Sequence[int]\n pad_shape, # type: Sequence[int]\n pooling_type # type: Text\n ): # type: (...) -> np.ndarray\n spatial_size = len(x_shape) - 2\n y = np.zeros([x_shape[0], x_shape[1]] + list(out_shape))\n\n for shape in itertools.product(range(x_shape[0]),\n range(x_shape[1]),\n *[range(\n int((x_shape[i + 2] + pad_shape[i] - kernel_shape[i]) / strides_shape[i] + 1))\n for i in range(spatial_size)]):\n window = padded[shape[0], shape[1]]\n window_vals = np.array([window[i] for i in list(\n itertools.product(\n *[range(strides_shape[i] * shape[i + 2], strides_shape[i] * shape[i + 2] + kernel_shape[i]) for i in\n range(spatial_size)])\n )])\n if pooling_type == 'AVG':\n f = np.average\n elif pooling_type == 'MAX':\n f = np.max\n else:\n raise NotImplementedError('Pooling type {} does not support. Should be AVG, MAX'.format(pooling_type))\n y[shape] = f(window_vals[np.where(~np.isnan(window_vals))])\n return y.astype(np.float32)\n"
] | [
[
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.math_ops.to_int32",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.ops.math_ops.reduce_min",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.ops.array_ops.reverse_sequence",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.ops.array_ops.reverse",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.array"
],
[
"numpy.isnan"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"2.9",
"1.5",
"1.7",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ankurankan/game_of_life | [
"81cf2f7f70a05019e78206d1ee7a8205aa590186"
] | [
"main.py"
] | [
"from time import sleep\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_initial_state(size):\n return np.random.choice([0, 1], size)\n\ndef compute_next_state(state):\n new_state = np.zeros(state.shape, dtype=int)\n for i in range(state.shape[0]):\n for j in range(state.shape[1]):\n low_x, high_x = max(0, i-1), min(i+2, state.shape[0])\n low_y, high_y = max(0, j-1), min(j+2, state.shape[1])\n n_live = np.sum(state[low_x: high_x, low_y: high_y]) - state[i, j]\n\n if (state[i, j] == 1) and (n_live < 2):\n new_state[i, j] = 0\n elif (state[i, j] == 1) and (2 <= n_live <= 3):\n new_state[i, j] = 1\n elif (state[i, j] == 1) and (n_live > 3):\n new_state[i, j] = 0\n elif (state[i, j] == 0) and (n_live == 3):\n new_state[i, j] = 1\n else:\n new_state[i, j] = state[i, j]\n\n return new_state\n\n\ndef start(initial_state=None, loop_delay=1, size=(200, 200)):\n if initial_state is None:\n state = get_initial_state(size)\n else:\n state = initial_state\n size = state.shape\n\n age = np.zeros(size, dtype=int)\n counter = 0\n\n while True:\n new_state = compute_next_state(state)\n age += new_state\n age = age * new_state\n counter += 1\n plt.imshow(age, cmap='Greys')\n plt.xlim(right=size[1], left=0)\n plt.ylim(top=0, bottom=size[0])\n plt.pause(loop_delay)\n\n if (np.sum(new_state) == 0) or (new_state == state).all():\n print(counter)\n state = get_initial_state(size)\n age = np.zeros(size, dtype=int)\n counter = 0\n\n else:\n state = new_state\n\nif __name__ == \"__main__\":\n start()\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.sum",
"numpy.random.choice",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.zeros",
"matplotlib.pyplot.pause"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ioangatop/srVAE | [
"dfee765c53f11f4653e7c6e7118a339832656867"
] | [
"src/utils/args.py"
] | [
"import torch\nimport argparse\n\n# ----- Parser -----\n\ndef parser():\n PARSER = argparse.ArgumentParser(description='Training parameters.')\n\n # Dataset\n PARSER.add_argument('--dataset', default='CIFAR10', type=str,\n choices=['CIFAR10', 'CelebA', 'Imagenette', 'ImageNet32', 'ImageNet64'],\n help=\"Data to be used.\")\n PARSER.add_argument('--img_resize', default=32, type=int,\n help='Change image resolution.')\n\n # Model\n PARSER.add_argument('--model', default='VAE', type=str,\n choices=['VAE', 'srVAE'],\n help=\"Model to be used.\")\n PARSER.add_argument('--network', default='densenet32', type=str,\n choices=['densenet32', 'densenet16x32'],\n help=\"Neural Network architecture to be used.\")\n\n # Prior\n PARSER.add_argument('--prior', default='MixtureOfGaussians', type=str,\n choices=['StandardNormal', 'MixtureOfGaussians', 'RealNVP'],\n help='Prior type.')\n PARSER.add_argument('--z_dim', default=1024, type=int,\n help='Dimensionality of z latent space.')\n PARSER.add_argument('--u_dim', default=1024, type=int,\n help='Dimensionality of z latent space.')\n\n # data likelihood\n PARSER.add_argument('--likelihood', default='dmol', type=str,\n choices=['dmol'],\n help=\"Type of likelihood.\")\n PARSER.add_argument('--iw_test', default=512, type=int,\n help=\"Number of Importance Weighting samples used for approximating the test log-likelihood.\")\n\n # Training Parameters\n PARSER.add_argument('--batch_size', default=32, type=int,\n help='Batch size.')\n PARSER.add_argument('--epochs', default=2000, type=int,\n help='Number of training epochs.')\n\n # General Configs\n PARSER.add_argument('--seed', default=None, type=int,\n help='Fix random seed.')\n PARSER.add_argument('--n_samples', default=8, type=int,\n help='Number of generated samples.')\n PARSER.add_argument('--log_interval', default=True, type=bool,\n help='Print progress on every batch.')\n PARSER.add_argument('--device', default=None, type=str,\n choices=['cpu', 'cuda'],\n help='Device to run the experiment.')\n\n PARSER.add_argument('--use_tb', default=True, type=bool,\n help='Use TensorBoard.')\n PARSER.add_argument('--tags', default='logs', type=str,\n help='Run tags.')\n\n\n ARGS = PARSER.parse_args()\n\n # Check device\n if ARGS.device is None:\n ARGS.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n return ARGS\n\n\nargs = parser()\n\n\nif __name__ == \"__main__\":\n pass\n"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KanHatakeyama/annealing_project | [
"eac2dfe65c480450a5d12b09db2c1c9f83d03389"
] | [
"lib/composite/LiPolymerDataScaler.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom DataUtility import get_column_names\n\n\nclass LiPolymerDataScaler:\n \"\"\"\n a special class to scale the lithium polymer database\n \"\"\"\n\n def __init__(self):\n self.scaling_dict = {}\n self.main_val_params = [\"SMILES_wt\", \"wt_ratio\", \"inorg_contain_ratio\"]\n self.main_txt_params = [\"structureList\", \"inorg_name\"]\n self.main_params = self.main_val_params+self.main_txt_params\n self.target_param = \"Conductivity\"\n\n def mutual_process(self, df):\n \"\"\"\n convert values (to log, etc)\n \"\"\"\n df[\"Conductivity\"] = np.log10(df[\"Conductivity\"].astype('float'))\n df[\"Temperature\"] = np.log10(df[\"Temperature\"].astype('float')+273)\n\n # fill Nan by zero\n for c in self.main_params:\n target_columns = get_column_names(df, c)\n df[target_columns] = df[target_columns].fillna(0)\n\n # convert molecular weight\n self.mw_column_list = get_column_names(df, \"MWList\")\n for c in self.mw_column_list:\n df[c] = np.log10(df[c].astype('float'))\n\n return df\n\n def fit_transform(self, original_df):\n \"\"\"\n scaling data, etc\n\n Parameters\n ----------------\n original_df: dataframe\n dataframe to be scaled\n\n Returns\n ----------------\n df: dataframe\n scaled dataframe\n \"\"\"\n df = original_df.copy()\n df = self.mutual_process(df)\n\n # fill lacking Molecular weight with average value\n self.average_mw = sum(df[self.mw_column_list].sum()) / \\\n sum(df[self.mw_column_list].count())\n\n for c in self.mw_column_list:\n df[c] = df[c].fillna(self.average_mw)\n\n # scaling\n for v in self.main_val_params + [\"Conductivity\", \"Temperature\"]+self.mw_column_list:\n for c in get_column_names(df, v):\n sc = StandardScaler()\n df[c] = sc.fit_transform(\n df[c].astype('float').values.reshape(-1, 1))\n self.scaling_dict[c] = sc\n\n # onehot encoding\n for v in self.main_txt_params:\n df = pd.get_dummies(df, columns=get_column_names(df, v))\n\n self.use_columns = []\n\n for c in [\"Conductivity\", \"Temperature\"]+self.main_params + self.mw_column_list+[\"fp_list\"]:\n self.use_columns.extend(get_column_names(df, c))\n\n \"\"\" \n **********************************************************\n delete some columns for easiness of machine learning\n following parameters can be useful for machine learning (10.1021/jacs.9b11442), but ignored in this project.\n \"\"\"\n for remove_targets in [\"MWList\", \"wt_ratio\", \"inorg\", \"structure\", \"Temperature\"]:\n del_columns = get_column_names(df, remove_targets)\n for i in del_columns:\n self.use_columns.remove(i)\n\n self.tr_df = df\n return df\n\n def transform(self, original_df):\n \"\"\"\n scaling data, etc\n\n Parameters\n ----------------\n original_df: dataframe\n dataframe to be scaled\n\n Returns\n ----------------\n df: dataframe\n scaled dataframe\n \"\"\"\n df = original_df.copy()\n df = self.mutual_process(df)\n\n for c in self.mw_column_list:\n df[c] = df[c].fillna(self.average_mw)\n\n # scaling\n for v in self.main_val_params + [\"Conductivity\", \"Temperature\"]+self.mw_column_list:\n for c in get_column_names(df, v):\n df[c] = self.scaling_dict[c].transform(\n df[c].astype('float').values.reshape(-1, 1))\n\n # onehot encoding\n for v in self.main_txt_params:\n df = pd.get_dummies(df, columns=get_column_names(df, v))\n\n # for lacking columns, add the most frequent vals\n lacking_columns = set(self.use_columns)-set(df.columns)\n\n for i in lacking_columns:\n df[i] = self.tr_df[i].mode()\n\n return df\n"
] | [
[
"sklearn.preprocessing.StandardScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danielschulz/MONAI | [
"54ef6e9e700f0de3d50184c0148f953be871a58e"
] | [
"monai/metrics/surface_distance.py"
] | [
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import Union\n\nimport numpy as np\nimport torch\n\nfrom monai.metrics.utils import *\nfrom monai.utils import MetricReduction\n\n\nclass SurfaceDistanceMetric:\n \"\"\"\n Compute Surface Distance between two tensors. It can support both multi-classes and multi-labels tasks.\n It supports both symmetric and asymmetric surface distance calculation.\n Input `y_pred` (BNHW[D] where N is number of classes) is compared with ground truth `y` (BNHW[D]).\n `y_preds` is expected to have binarized predictions and `y` should be in one-hot format.\n You can use suitable transforms in ``monai.transforms.post`` first to achieve binarized values.\n\n Args:\n include_background: whether to skip distance computation on the first channel of\n the predicted output. Defaults to ``False``.\n symmetric: whether to calculate the symmetric average surface distance between\n `seg_pred` and `seg_gt`. Defaults to ``False``.\n distance_metric: : [``\"euclidean\"``, ``\"chessboard\"``, ``\"taxicab\"``]\n the metric used to compute surface distance. Defaults to ``\"euclidean\"``.\n reduction: {``\"none\"``, ``\"mean\"``, ``\"sum\"``, ``\"mean_batch\"``, ``\"sum_batch\"``,\n ``\"mean_channel\"``, ``\"sum_channel\"``}\n Define the mode to reduce computation result of 1 batch data. Defaults to ``\"mean\"``.\n\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = False,\n symmetric: bool = False,\n distance_metric: str = \"euclidean\",\n reduction: Union[MetricReduction, str] = MetricReduction.MEAN,\n ) -> None:\n super().__init__()\n self.include_background = include_background\n self.distance_metric = distance_metric\n self.symmetric = symmetric\n self.reduction = reduction\n\n def __call__(self, y_pred: torch.Tensor, y: torch.Tensor):\n \"\"\"\n Args:\n y_pred: input data to compute, typical segmentation model output.\n It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values\n should be binarized.\n y: ground truth to compute the distance. It must be one-hot format and first dim is batch.\n The values should be binarized.\n\n Raises:\n ValueError: when `y` is not a binarized tensor.\n ValueError: when `y_pred` has less than three dimensions.\n \"\"\"\n if not torch.all(y_pred.byte() == y_pred):\n warnings.warn(\"y_pred is not a binarized tensor here!\")\n if not torch.all(y.byte() == y):\n raise ValueError(\"y should be a binarized tensor.\")\n dims = y_pred.ndimension()\n if dims < 3:\n raise ValueError(\"y_pred should have at least three dimensions.\")\n # compute (BxC) for each channel for each batch\n f = compute_average_surface_distance(\n y_pred=y_pred,\n y=y,\n include_background=self.include_background,\n symmetric=self.symmetric,\n distance_metric=self.distance_metric,\n )\n\n # do metric reduction\n f, not_nans = do_metric_reduction(f, self.reduction)\n return f, not_nans\n\n\ndef compute_average_surface_distance(\n y_pred: Union[np.ndarray, torch.Tensor],\n y: Union[np.ndarray, torch.Tensor],\n include_background: bool = False,\n symmetric: bool = False,\n distance_metric: str = \"euclidean\",\n):\n \"\"\"\n This function is used to compute the Average Surface Distance from `y_pred` to `y`\n under the default setting.\n In addition, if sets ``symmetric = True``, the average symmetric surface distance between\n these two inputs will be returned.\n\n Args:\n y_pred: input data to compute, typical segmentation model output.\n It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values\n should be binarized.\n y: ground truth to compute mean the distance. It must be one-hot format and first dim is batch.\n The values should be binarized.\n include_background: whether to skip distance computation on the first channel of\n the predicted output. Defaults to ``False``.\n symmetric: whether to calculate the symmetric average surface distance between\n `seg_pred` and `seg_gt`. Defaults to ``False``.\n distance_metric: : [``\"euclidean\"``, ``\"chessboard\"``, ``\"taxicab\"``]\n the metric used to compute surface distance. Defaults to ``\"euclidean\"``.\n \"\"\"\n\n if not include_background:\n y_pred, y = ignore_background(\n y_pred=y_pred,\n y=y,\n )\n\n y = y.float()\n y_pred = y_pred.float()\n\n if y.shape != y_pred.shape:\n raise ValueError(\"y_pred and y should have same shapes.\")\n\n batch_size, n_class = y_pred.shape[:2]\n asd = np.empty((batch_size, n_class))\n\n for b, c in np.ndindex(batch_size, n_class):\n (edges_pred, edges_gt) = get_mask_edges(y_pred[b, c], y[b, c])\n surface_distance = get_surface_distance(edges_pred, edges_gt, distance_metric=distance_metric)\n if surface_distance.shape == (0,):\n avg_surface_distance = np.nan\n else:\n avg_surface_distance = surface_distance.mean()\n if not symmetric:\n asd[b, c] = avg_surface_distance\n else:\n surface_distance_2 = get_surface_distance(edges_gt, edges_pred, distance_metric=distance_metric)\n if surface_distance_2.shape == (0,):\n avg_surface_distance_2 = np.nan\n else:\n avg_surface_distance_2 = surface_distance_2.mean()\n asd[b, c] = np.mean((avg_surface_distance, avg_surface_distance_2))\n\n return torch.from_numpy(asd)\n"
] | [
[
"numpy.ndindex",
"numpy.mean",
"torch.from_numpy",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
maxi-marufo/my-scipy | [
"be6c2597fcee86419592ac512319301c7ddfc118",
"be6c2597fcee86419592ac512319301c7ddfc118",
"be6c2597fcee86419592ac512319301c7ddfc118",
"be6c2597fcee86419592ac512319301c7ddfc118",
"be6c2597fcee86419592ac512319301c7ddfc118"
] | [
"scipy/integrate/quadrature.py",
"scipy/signal/signaltools.py",
"scipy/sparse/tests/test_construct.py",
"scipy/integrate/_ode.py",
"scipy/spatial/tests/test_spherical_voronoi.py"
] | [
"import functools\nimport numpy as np\nimport math\nimport types\nimport warnings\n\n# trapz is a public function for scipy.integrate,\n# even though it's actually a NumPy function.\nfrom numpy import trapz\nfrom scipy.special import roots_legendre\nfrom scipy.special import gammaln\n\n__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',\n 'cumtrapz', 'newton_cotes']\n\n\n# Make See Also linking for our local copy work properly\ndef _copy_func(f):\n \"\"\"Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)\"\"\"\n g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,\n argdefs=f.__defaults__, closure=f.__closure__)\n g = functools.update_wrapper(g, f)\n g.__kwdefaults__ = f.__kwdefaults__\n return g\n\n\ntrapz = _copy_func(trapz)\nif trapz.__doc__:\n trapz.__doc__ = trapz.__doc__.replace('sum, cumsum', 'numpy.cumsum')\n\n\nclass AccuracyWarning(Warning):\n pass\n\n\ndef _cached_roots_legendre(n):\n \"\"\"\n Cache roots_legendre results to speed up calls of the fixed_quad\n function.\n \"\"\"\n if n in _cached_roots_legendre.cache:\n return _cached_roots_legendre.cache[n]\n\n _cached_roots_legendre.cache[n] = roots_legendre(n)\n return _cached_roots_legendre.cache[n]\n\n\n_cached_roots_legendre.cache = dict()\n\n\ndef fixed_quad(func, a, b, args=(), n=5):\n \"\"\"\n Compute a definite integral using fixed-order Gaussian quadrature.\n\n Integrate `func` from `a` to `b` using Gaussian quadrature of\n order `n`.\n\n Parameters\n ----------\n func : callable\n A Python function or method to integrate (must accept vector inputs).\n If integrating a vector-valued function, the returned array must have\n shape ``(..., len(x))``.\n a : float\n Lower limit of integration.\n b : float\n Upper limit of integration.\n args : tuple, optional\n Extra arguments to pass to function, if any.\n n : int, optional\n Order of quadrature integration. Default is 5.\n\n Returns\n -------\n val : float\n Gaussian quadrature approximation to the integral\n none : None\n Statically returned value of None\n\n\n See Also\n --------\n quad : adaptive quadrature using QUADPACK\n dblquad : double integrals\n tplquad : triple integrals\n romberg : adaptive Romberg quadrature\n quadrature : adaptive Gaussian quadrature\n romb : integrators for sampled data\n simps : integrators for sampled data\n cumtrapz : cumulative integration for sampled data\n ode : ODE integrator\n odeint : ODE integrator\n\n Examples\n --------\n >>> from scipy import integrate\n >>> f = lambda x: x**8\n >>> integrate.fixed_quad(f, 0.0, 1.0, n=4)\n (0.1110884353741496, None)\n >>> integrate.fixed_quad(f, 0.0, 1.0, n=5)\n (0.11111111111111102, None)\n >>> print(1/9.0) # analytical result\n 0.1111111111111111\n\n >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)\n (0.9999999771971152, None)\n >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)\n (1.000000000039565, None)\n >>> np.sin(np.pi/2)-np.sin(0) # analytical result\n 1.0\n\n \"\"\"\n x, w = _cached_roots_legendre(n)\n x = np.real(x)\n if np.isinf(a) or np.isinf(b):\n raise ValueError(\"Gaussian quadrature is only available for \"\n \"finite limits.\")\n y = (b-a)*(x+1)/2.0 + a\n return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None\n\n\ndef vectorize1(func, args=(), vec_func=False):\n \"\"\"Vectorize the call to a function.\n\n This is an internal utility function used by `romberg` and\n `quadrature` to create a vectorized version of a function.\n\n If `vec_func` is True, the function `func` is assumed to take vector\n arguments.\n\n Parameters\n ----------\n func : callable\n User defined function.\n args : tuple, optional\n Extra arguments for the function.\n vec_func : bool, optional\n True if the function func takes vector arguments.\n\n Returns\n -------\n vfunc : callable\n A function that will take a vector argument and return the\n result.\n\n \"\"\"\n if vec_func:\n def vfunc(x):\n return func(x, *args)\n else:\n def vfunc(x):\n if np.isscalar(x):\n return func(x, *args)\n x = np.asarray(x)\n # call with first point to get output type\n y0 = func(x[0], *args)\n n = len(x)\n dtype = getattr(y0, 'dtype', type(y0))\n output = np.empty((n,), dtype=dtype)\n output[0] = y0\n for i in range(1, n):\n output[i] = func(x[i], *args)\n return output\n return vfunc\n\n\ndef quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,\n vec_func=True, miniter=1):\n \"\"\"\n Compute a definite integral using fixed-tolerance Gaussian quadrature.\n\n Integrate `func` from `a` to `b` using Gaussian quadrature\n with absolute tolerance `tol`.\n\n Parameters\n ----------\n func : function\n A Python function or method to integrate.\n a : float\n Lower limit of integration.\n b : float\n Upper limit of integration.\n args : tuple, optional\n Extra arguments to pass to function.\n tol, rtol : float, optional\n Iteration stops when error between last two iterates is less than\n `tol` OR the relative change is less than `rtol`.\n maxiter : int, optional\n Maximum order of Gaussian quadrature.\n vec_func : bool, optional\n True or False if func handles arrays as arguments (is\n a \"vector\" function). Default is True.\n miniter : int, optional\n Minimum order of Gaussian quadrature.\n\n Returns\n -------\n val : float\n Gaussian quadrature approximation (within tolerance) to integral.\n err : float\n Difference between last two estimates of the integral.\n\n See also\n --------\n romberg: adaptive Romberg quadrature\n fixed_quad: fixed-order Gaussian quadrature\n quad: adaptive quadrature using QUADPACK\n dblquad: double integrals\n tplquad: triple integrals\n romb: integrator for sampled data\n simps: integrator for sampled data\n cumtrapz: cumulative integration for sampled data\n ode: ODE integrator\n odeint: ODE integrator\n\n Examples\n --------\n >>> from scipy import integrate\n >>> f = lambda x: x**8\n >>> integrate.quadrature(f, 0.0, 1.0)\n (0.11111111111111106, 4.163336342344337e-17)\n >>> print(1/9.0) # analytical result\n 0.1111111111111111\n\n >>> integrate.quadrature(np.cos, 0.0, np.pi/2)\n (0.9999999999999536, 3.9611425250996035e-11)\n >>> np.sin(np.pi/2)-np.sin(0) # analytical result\n 1.0\n\n \"\"\"\n if not isinstance(args, tuple):\n args = (args,)\n vfunc = vectorize1(func, args, vec_func=vec_func)\n val = np.inf\n err = np.inf\n maxiter = max(miniter+1, maxiter)\n for n in range(miniter, maxiter+1):\n newval = fixed_quad(vfunc, a, b, (), n)[0]\n err = abs(newval-val)\n val = newval\n\n if err < tol or err < rtol*abs(val):\n break\n else:\n warnings.warn(\n \"maxiter (%d) exceeded. Latest difference = %e\" % (maxiter, err),\n AccuracyWarning)\n return val, err\n\n\ndef tupleset(t, i, value):\n l = list(t)\n l[i] = value\n return tuple(l)\n\n\ndef cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):\n \"\"\"\n Cumulatively integrate y(x) using the composite trapezoidal rule.\n\n Parameters\n ----------\n y : array_like\n Values to integrate.\n x : array_like, optional\n The coordinate to integrate along. If None (default), use spacing `dx`\n between consecutive elements in `y`.\n dx : float, optional\n Spacing between elements of `y`. Only used if `x` is None.\n axis : int, optional\n Specifies the axis to cumulate. Default is -1 (last axis).\n initial : scalar, optional\n If given, insert this value at the beginning of the returned result.\n Typically this value should be 0. Default is None, which means no\n value at ``x[0]`` is returned and `res` has one element less than `y`\n along the axis of integration.\n\n Returns\n -------\n res : ndarray\n The result of cumulative integration of `y` along `axis`.\n If `initial` is None, the shape is such that the axis of integration\n has one less value than `y`. If `initial` is given, the shape is equal\n to that of `y`.\n\n See Also\n --------\n numpy.cumsum, numpy.cumprod\n quad: adaptive quadrature using QUADPACK\n romberg: adaptive Romberg quadrature\n quadrature: adaptive Gaussian quadrature\n fixed_quad: fixed-order Gaussian quadrature\n dblquad: double integrals\n tplquad: triple integrals\n romb: integrators for sampled data\n ode: ODE integrators\n odeint: ODE integrators\n\n Examples\n --------\n >>> from scipy import integrate\n >>> import matplotlib.pyplot as plt\n\n >>> x = np.linspace(-2, 2, num=20)\n >>> y = x\n >>> y_int = integrate.cumtrapz(y, x, initial=0)\n >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')\n >>> plt.show()\n\n \"\"\"\n y = np.asarray(y)\n if x is None:\n d = dx\n else:\n x = np.asarray(x)\n if x.ndim == 1:\n d = np.diff(x)\n # reshape to correct shape\n shape = [1] * y.ndim\n shape[axis] = -1\n d = d.reshape(shape)\n elif len(x.shape) != len(y.shape):\n raise ValueError(\"If given, shape of x must be 1-D or the \"\n \"same as y.\")\n else:\n d = np.diff(x, axis=axis)\n\n if d.shape[axis] != y.shape[axis] - 1:\n raise ValueError(\"If given, length of x along axis must be the \"\n \"same as y.\")\n\n nd = len(y.shape)\n slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))\n slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))\n res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)\n\n if initial is not None:\n if not np.isscalar(initial):\n raise ValueError(\"`initial` parameter should be a scalar.\")\n\n shape = list(res.shape)\n shape[axis] = 1\n res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],\n axis=axis)\n\n return res\n\n\ndef _basic_simps(y, start, stop, x, dx, axis):\n nd = len(y.shape)\n if start is None:\n start = 0\n step = 2\n slice_all = (slice(None),)*nd\n slice0 = tupleset(slice_all, axis, slice(start, stop, step))\n slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))\n slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))\n\n if x is None: # Even-spaced Simpson's rule.\n result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),\n axis=axis)\n else:\n # Account for possibly different spacings.\n # Simpson's rule changes a bit.\n h = np.diff(x, axis=axis)\n sl0 = tupleset(slice_all, axis, slice(start, stop, step))\n sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))\n h0 = h[sl0]\n h1 = h[sl1]\n hsum = h0 + h1\n hprod = h0 * h1\n h0divh1 = h0 / h1\n tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +\n y[slice1]*hsum*hsum/hprod +\n y[slice2]*(2-h0divh1))\n result = np.sum(tmp, axis=axis)\n return result\n\n\ndef simps(y, x=None, dx=1, axis=-1, even='avg'):\n \"\"\"\n Integrate y(x) using samples along the given axis and the composite\n Simpson's rule. If x is None, spacing of dx is assumed.\n\n If there are an even number of samples, N, then there are an odd\n number of intervals (N-1), but Simpson's rule requires an even number\n of intervals. The parameter 'even' controls how this is handled.\n\n Parameters\n ----------\n y : array_like\n Array to be integrated.\n x : array_like, optional\n If given, the points at which `y` is sampled.\n dx : int, optional\n Spacing of integration points along axis of `x`. Only used when\n `x` is None. Default is 1.\n axis : int, optional\n Axis along which to integrate. Default is the last axis.\n even : str {'avg', 'first', 'last'}, optional\n 'avg' : Average two results:1) use the first N-2 intervals with\n a trapezoidal rule on the last interval and 2) use the last\n N-2 intervals with a trapezoidal rule on the first interval.\n\n 'first' : Use Simpson's rule for the first N-2 intervals with\n a trapezoidal rule on the last interval.\n\n 'last' : Use Simpson's rule for the last N-2 intervals with a\n trapezoidal rule on the first interval.\n\n See Also\n --------\n quad: adaptive quadrature using QUADPACK\n romberg: adaptive Romberg quadrature\n quadrature: adaptive Gaussian quadrature\n fixed_quad: fixed-order Gaussian quadrature\n dblquad: double integrals\n tplquad: triple integrals\n romb: integrators for sampled data\n cumtrapz: cumulative integration for sampled data\n ode: ODE integrators\n odeint: ODE integrators\n\n Notes\n -----\n For an odd number of samples that are equally spaced the result is\n exact if the function is a polynomial of order 3 or less. If\n the samples are not equally spaced, then the result is exact only\n if the function is a polynomial of order 2 or less.\n\n Examples\n --------\n >>> from scipy import integrate\n >>> x = np.arange(0, 10)\n >>> y = np.arange(0, 10)\n\n >>> integrate.simps(y, x)\n 40.5\n\n >>> y = np.power(x, 3)\n >>> integrate.simps(y, x)\n 1642.5\n >>> integrate.quad(lambda x: x**3, 0, 9)[0]\n 1640.25\n\n >>> integrate.simps(y, x, even='first')\n 1644.5\n\n \"\"\"\n y = np.asarray(y)\n nd = len(y.shape)\n N = y.shape[axis]\n last_dx = dx\n first_dx = dx\n returnshape = 0\n if x is not None:\n x = np.asarray(x)\n if len(x.shape) == 1:\n shapex = [1] * nd\n shapex[axis] = x.shape[0]\n saveshape = x.shape\n returnshape = 1\n x = x.reshape(tuple(shapex))\n elif len(x.shape) != len(y.shape):\n raise ValueError(\"If given, shape of x must be 1-D or the \"\n \"same as y.\")\n if x.shape[axis] != N:\n raise ValueError(\"If given, length of x along axis must be the \"\n \"same as y.\")\n if N % 2 == 0:\n val = 0.0\n result = 0.0\n slice1 = (slice(None),)*nd\n slice2 = (slice(None),)*nd\n if even not in ['avg', 'last', 'first']:\n raise ValueError(\"Parameter 'even' must be \"\n \"'avg', 'last', or 'first'.\")\n # Compute using Simpson's rule on first intervals\n if even in ['avg', 'first']:\n slice1 = tupleset(slice1, axis, -1)\n slice2 = tupleset(slice2, axis, -2)\n if x is not None:\n last_dx = x[slice1] - x[slice2]\n val += 0.5*last_dx*(y[slice1]+y[slice2])\n result = _basic_simps(y, 0, N-3, x, dx, axis)\n # Compute using Simpson's rule on last set of intervals\n if even in ['avg', 'last']:\n slice1 = tupleset(slice1, axis, 0)\n slice2 = tupleset(slice2, axis, 1)\n if x is not None:\n first_dx = x[tuple(slice2)] - x[tuple(slice1)]\n val += 0.5*first_dx*(y[slice2]+y[slice1])\n result += _basic_simps(y, 1, N-2, x, dx, axis)\n if even == 'avg':\n val /= 2.0\n result /= 2.0\n result = result + val\n else:\n result = _basic_simps(y, 0, N-2, x, dx, axis)\n if returnshape:\n x = x.reshape(saveshape)\n return result\n\n\ndef romb(y, dx=1.0, axis=-1, show=False):\n \"\"\"\n Romberg integration using samples of a function.\n\n Parameters\n ----------\n y : array_like\n A vector of ``2**k + 1`` equally-spaced samples of a function.\n dx : float, optional\n The sample spacing. Default is 1.\n axis : int, optional\n The axis along which to integrate. Default is -1 (last axis).\n show : bool, optional\n When `y` is a single 1-D array, then if this argument is True\n print the table showing Richardson extrapolation from the\n samples. Default is False.\n\n Returns\n -------\n romb : ndarray\n The integrated result for `axis`.\n\n See also\n --------\n quad : adaptive quadrature using QUADPACK\n romberg : adaptive Romberg quadrature\n quadrature : adaptive Gaussian quadrature\n fixed_quad : fixed-order Gaussian quadrature\n dblquad : double integrals\n tplquad : triple integrals\n simps : integrators for sampled data\n cumtrapz : cumulative integration for sampled data\n ode : ODE integrators\n odeint : ODE integrators\n\n Examples\n --------\n >>> from scipy import integrate\n >>> x = np.arange(10, 14.25, 0.25)\n >>> y = np.arange(3, 12)\n\n >>> integrate.romb(y)\n 56.0\n\n >>> y = np.sin(np.power(x, 2.5))\n >>> integrate.romb(y)\n -0.742561336672229\n\n >>> integrate.romb(y, show=True)\n Richardson Extrapolation Table for Romberg Integration\n ====================================================================\n -0.81576\n 4.63862 6.45674\n -1.10581 -3.02062 -3.65245\n -2.57379 -3.06311 -3.06595 -3.05664\n -1.34093 -0.92997 -0.78776 -0.75160 -0.74256\n ====================================================================\n -0.742561336672229\n \"\"\"\n y = np.asarray(y)\n nd = len(y.shape)\n Nsamps = y.shape[axis]\n Ninterv = Nsamps-1\n n = 1\n k = 0\n while n < Ninterv:\n n <<= 1\n k += 1\n if n != Ninterv:\n raise ValueError(\"Number of samples must be one plus a \"\n \"non-negative power of 2.\")\n\n R = {}\n slice_all = (slice(None),) * nd\n slice0 = tupleset(slice_all, axis, 0)\n slicem1 = tupleset(slice_all, axis, -1)\n h = Ninterv * np.asarray(dx, dtype=float)\n R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h\n slice_R = slice_all\n start = stop = step = Ninterv\n for i in range(1, k+1):\n start >>= 1\n slice_R = tupleset(slice_R, axis, slice(start, stop, step))\n step >>= 1\n R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))\n for j in range(1, i+1):\n prev = R[(i, j-1)]\n R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)\n h /= 2.0\n\n if show:\n if not np.isscalar(R[(0, 0)]):\n print(\"*** Printing table only supported for integrals\" +\n \" of a single data set.\")\n else:\n try:\n precis = show[0]\n except (TypeError, IndexError):\n precis = 5\n try:\n width = show[1]\n except (TypeError, IndexError):\n width = 8\n formstr = \"%%%d.%df\" % (width, precis)\n\n title = \"Richardson Extrapolation Table for Romberg Integration\"\n print(\"\", title.center(68), \"=\" * 68, sep=\"\\n\", end=\"\\n\")\n for i in range(k+1):\n for j in range(i+1):\n print(formstr % R[(i, j)], end=\" \")\n print()\n print(\"=\" * 68)\n print()\n\n return R[(k, k)]\n\n# Romberg quadratures for numeric integration.\n#\n# Written by Scott M. Ransom <[email protected]>\n# last revision: 14 Nov 98\n#\n# Cosmetic changes by Konrad Hinsen <[email protected]>\n# last revision: 1999-7-21\n#\n# Adapted to SciPy by Travis Oliphant <[email protected]>\n# last revision: Dec 2001\n\n\ndef _difftrap(function, interval, numtraps):\n \"\"\"\n Perform part of the trapezoidal rule to integrate a function.\n Assume that we had called difftrap with all lower powers-of-2\n starting with 1. Calling difftrap only returns the summation\n of the new ordinates. It does _not_ multiply by the width\n of the trapezoids. This must be performed by the caller.\n 'function' is the function to evaluate (must accept vector arguments).\n 'interval' is a sequence with lower and upper limits\n of integration.\n 'numtraps' is the number of trapezoids to use (must be a\n power-of-2).\n \"\"\"\n if numtraps <= 0:\n raise ValueError(\"numtraps must be > 0 in difftrap().\")\n elif numtraps == 1:\n return 0.5*(function(interval[0])+function(interval[1]))\n else:\n numtosum = numtraps/2\n h = float(interval[1]-interval[0])/numtosum\n lox = interval[0] + 0.5 * h\n points = lox + h * np.arange(numtosum)\n s = np.sum(function(points), axis=0)\n return s\n\n\ndef _romberg_diff(b, c, k):\n \"\"\"\n Compute the differences for the Romberg quadrature corrections.\n See Forman Acton's \"Real Computing Made Real,\" p 143.\n \"\"\"\n tmp = 4.0**k\n return (tmp * c - b)/(tmp - 1.0)\n\n\ndef _printresmat(function, interval, resmat):\n # Print the Romberg result matrix.\n i = j = 0\n print('Romberg integration of', repr(function), end=' ')\n print('from', interval)\n print('')\n print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))\n for i in range(len(resmat)):\n print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')\n for j in range(i+1):\n print('%9f' % (resmat[i][j]), end=' ')\n print('')\n print('')\n print('The final result is', resmat[i][j], end=' ')\n print('after', 2**(len(resmat)-1)+1, 'function evaluations.')\n\n\ndef romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,\n divmax=10, vec_func=False):\n \"\"\"\n Romberg integration of a callable function or method.\n\n Returns the integral of `function` (a function of one variable)\n over the interval (`a`, `b`).\n\n If `show` is 1, the triangular array of the intermediate results\n will be printed. If `vec_func` is True (default is False), then\n `function` is assumed to support vector arguments.\n\n Parameters\n ----------\n function : callable\n Function to be integrated.\n a : float\n Lower limit of integration.\n b : float\n Upper limit of integration.\n\n Returns\n -------\n results : float\n Result of the integration.\n\n Other Parameters\n ----------------\n args : tuple, optional\n Extra arguments to pass to function. Each element of `args` will\n be passed as a single argument to `func`. Default is to pass no\n extra arguments.\n tol, rtol : float, optional\n The desired absolute and relative tolerances. Defaults are 1.48e-8.\n show : bool, optional\n Whether to print the results. Default is False.\n divmax : int, optional\n Maximum order of extrapolation. Default is 10.\n vec_func : bool, optional\n Whether `func` handles arrays as arguments (i.e., whether it is a\n \"vector\" function). Default is False.\n\n See Also\n --------\n fixed_quad : Fixed-order Gaussian quadrature.\n quad : Adaptive quadrature using QUADPACK.\n dblquad : Double integrals.\n tplquad : Triple integrals.\n romb : Integrators for sampled data.\n simps : Integrators for sampled data.\n cumtrapz : Cumulative integration for sampled data.\n ode : ODE integrator.\n odeint : ODE integrator.\n\n References\n ----------\n .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method\n\n Examples\n --------\n Integrate a gaussian from 0 to 1 and compare to the error function.\n\n >>> from scipy import integrate\n >>> from scipy.special import erf\n >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)\n >>> result = integrate.romberg(gaussian, 0, 1, show=True)\n Romberg integration of <function vfunc at ...> from [0, 1]\n\n ::\n\n Steps StepSize Results\n 1 1.000000 0.385872\n 2 0.500000 0.412631 0.421551\n 4 0.250000 0.419184 0.421368 0.421356\n 8 0.125000 0.420810 0.421352 0.421350 0.421350\n 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350\n 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350\n\n The final result is 0.421350396475 after 33 function evaluations.\n\n >>> print(\"%g %g\" % (2*result, erf(1)))\n 0.842701 0.842701\n\n \"\"\"\n if np.isinf(a) or np.isinf(b):\n raise ValueError(\"Romberg integration only available \"\n \"for finite limits.\")\n vfunc = vectorize1(function, args, vec_func=vec_func)\n n = 1\n interval = [a, b]\n intrange = b - a\n ordsum = _difftrap(vfunc, interval, n)\n result = intrange * ordsum\n resmat = [[result]]\n err = np.inf\n last_row = resmat[0]\n for i in range(1, divmax+1):\n n *= 2\n ordsum += _difftrap(vfunc, interval, n)\n row = [intrange * ordsum / n]\n for k in range(i):\n row.append(_romberg_diff(last_row[k], row[k], k+1))\n result = row[i]\n lastresult = last_row[i-1]\n if show:\n resmat.append(row)\n err = abs(result - lastresult)\n if err < tol or err < rtol * abs(result):\n break\n last_row = row\n else:\n warnings.warn(\n \"divmax (%d) exceeded. Latest difference = %e\" % (divmax, err),\n AccuracyWarning)\n\n if show:\n _printresmat(vfunc, interval, resmat)\n return result\n\n\n# Coefficients for Newton-Cotes quadrature\n#\n# These are the points being used\n# to construct the local interpolating polynomial\n# a are the weights for Newton-Cotes integration\n# B is the error coefficient.\n# error in these coefficients grows as N gets larger.\n# or as samples are closer and closer together\n\n# You can use maxima to find these rational coefficients\n# for equally spaced data using the commands\n# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);\n# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));\n# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));\n# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));\n#\n# pre-computed for equally-spaced weights\n#\n# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]\n#\n# a = num_a*array(int_a)/den_a\n# B = num_B*1.0 / den_B\n#\n# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)\n# where k = N // 2\n#\n_builtincoeffs = {\n 1: (1,2,[1,1],-1,12),\n 2: (1,3,[1,4,1],-1,90),\n 3: (3,8,[1,3,3,1],-3,80),\n 4: (2,45,[7,32,12,32,7],-8,945),\n 5: (5,288,[19,75,50,50,75,19],-275,12096),\n 6: (1,140,[41,216,27,272,27,216,41],-9,1400),\n 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),\n 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],\n -2368,467775),\n 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,\n 15741,2857], -4671, 394240),\n 10: (5,299376,[16067,106300,-48525,272400,-260550,427368,\n -260550,272400,-48525,106300,16067],\n -673175, 163459296),\n 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,\n 15493566,15493566,-9595542,25226685,-3237113,\n 13486539,2171465], -2224234463, 237758976000),\n 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,\n 87516288,-87797136,87516288,-51491295,35725120,\n -7587864,9903168,1364651], -3012, 875875),\n 13: (13, 402361344000,[8181904909, 56280729661, -31268252574,\n 156074417954,-151659573325,206683437987,\n -43111992612,-43111992612,206683437987,\n -151659573325,156074417954,-31268252574,\n 56280729661,8181904909], -2639651053,\n 344881152000),\n 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,\n -6625093363,12630121616,-16802270373,19534438464,\n -16802270373,12630121616,-6625093363,3501442784,\n -770720657,710986864,90241897], -3740727473,\n 1275983280000)\n }\n\n\ndef newton_cotes(rn, equal=0):\n r\"\"\"\n Return weights and error coefficient for Newton-Cotes integration.\n\n Suppose we have (N+1) samples of f at the positions\n x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the\n integral between x_0 and x_N is:\n\n :math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)\n + B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`\n\n where :math:`\\xi \\in [x_0,x_N]`\n and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.\n\n If the samples are equally-spaced and N is even, then the error\n term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.\n\n Parameters\n ----------\n rn : int\n The integer order for equally-spaced data or the relative positions of\n the samples with the first sample at 0 and the last at N, where N+1 is\n the length of `rn`. N is the order of the Newton-Cotes integration.\n equal : int, optional\n Set to 1 to enforce equally spaced data.\n\n Returns\n -------\n an : ndarray\n 1-D array of weights to apply to the function at the provided sample\n positions.\n B : float\n Error coefficient.\n\n Examples\n --------\n Compute the integral of sin(x) in [0, :math:`\\pi`]:\n\n >>> from scipy.integrate import newton_cotes\n >>> def f(x):\n ... return np.sin(x)\n >>> a = 0\n >>> b = np.pi\n >>> exact = 2\n >>> for N in [2, 4, 6, 8, 10]:\n ... x = np.linspace(a, b, N + 1)\n ... an, B = newton_cotes(N, 1)\n ... dx = (b - a) / N\n ... quad = dx * np.sum(an * f(x))\n ... error = abs(quad - exact)\n ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))\n ...\n 2 2.094395102 9.43951e-02\n 4 1.998570732 1.42927e-03\n 6 2.000017814 1.78136e-05\n 8 1.999999835 1.64725e-07\n 10 2.000000001 1.14677e-09\n\n Notes\n -----\n Normally, the Newton-Cotes rules are used on smaller integration\n regions and a composite rule is used to return the total integral.\n\n \"\"\"\n try:\n N = len(rn)-1\n if equal:\n rn = np.arange(N+1)\n elif np.all(np.diff(rn) == 1):\n equal = 1\n except Exception:\n N = rn\n rn = np.arange(N+1)\n equal = 1\n\n if equal and N in _builtincoeffs:\n na, da, vi, nb, db = _builtincoeffs[N]\n an = na * np.array(vi, dtype=float) / da\n return an, float(nb)/db\n\n if (rn[0] != 0) or (rn[-1] != N):\n raise ValueError(\"The sample positions must start at 0\"\n \" and end at N\")\n yi = rn / float(N)\n ti = 2 * yi - 1\n nvec = np.arange(N+1)\n C = ti ** nvec[:, np.newaxis]\n Cinv = np.linalg.inv(C)\n # improve precision of result\n for i in range(2):\n Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)\n vec = 2.0 / (nvec[::2]+1)\n ai = Cinv[:, ::2].dot(vec) * (N / 2.)\n\n if (N % 2 == 0) and equal:\n BN = N/(N+3.)\n power = N+2\n else:\n BN = N/(N+2.)\n power = N+1\n\n BN = BN - np.dot(yi**power, ai)\n p1 = power+1\n fac = power*math.log(N) - gammaln(p1)\n fac = math.exp(fac)\n return ai, BN*fac\n",
"# Author: Travis Oliphant\n# 1999 -- 2002\n\nimport operator\nimport math\nimport timeit\nfrom scipy.spatial import cKDTree\nfrom . import sigtools, dlti\nfrom ._upfirdn import upfirdn, _output_len, _upfirdn_modes\nfrom scipy import linalg, fft as sp_fft\nfrom scipy.fft._helper import _init_nd_shape_and_axes\nimport numpy as np\nfrom scipy.special import lambertw\nfrom .windows import get_window\nfrom ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext\nfrom .filter_design import cheby1, _validate_sos\nfrom .fir_filter_design import firwin\nfrom ._sosfilt import _sosfilt\nimport warnings\n\n\n__all__ = ['correlate', 'correlate2d',\n 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',\n 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',\n 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',\n 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',\n 'residuez', 'resample', 'resample_poly', 'detrend',\n 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',\n 'filtfilt', 'decimate', 'vectorstrength']\n\n\n_modedict = {'valid': 0, 'same': 1, 'full': 2}\n\n_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,\n 'symmetric': 1, 'reflect': 4}\n\n\ndef _valfrommode(mode):\n try:\n return _modedict[mode]\n except KeyError:\n raise ValueError(\"Acceptable mode flags are 'valid',\"\n \" 'same', or 'full'.\")\n\n\ndef _bvalfromboundary(boundary):\n try:\n return _boundarydict[boundary] << 2\n except KeyError:\n raise ValueError(\"Acceptable boundary flags are 'fill', 'circular' \"\n \"(or 'wrap'), and 'symmetric' (or 'symm').\")\n\n\ndef _inputs_swap_needed(mode, shape1, shape2, axes=None):\n \"\"\"Determine if inputs arrays need to be swapped in `\"valid\"` mode.\n\n If in `\"valid\"` mode, returns whether or not the input arrays need to be\n swapped depending on whether `shape1` is at least as large as `shape2` in\n every calculated dimension.\n\n This is important for some of the correlation and convolution\n implementations in this module, where the larger array input needs to come\n before the smaller array input when operating in this mode.\n\n Note that if the mode provided is not 'valid', False is immediately\n returned.\n\n \"\"\"\n if mode != 'valid':\n return False\n\n if not shape1:\n return False\n\n if axes is None:\n axes = range(len(shape1))\n\n ok1 = all(shape1[i] >= shape2[i] for i in axes)\n ok2 = all(shape2[i] >= shape1[i] for i in axes)\n\n if not (ok1 or ok2):\n raise ValueError(\"For 'valid' mode, one must be at least \"\n \"as large as the other in every dimension\")\n\n return not ok1\n\n\ndef correlate(in1, in2, mode='full', method='auto'):\n r\"\"\"\n Cross-correlate two N-dimensional arrays.\n\n Cross-correlate `in1` and `in2`, with the output size determined by the\n `mode` argument.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear cross-correlation\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n method : str {'auto', 'direct', 'fft'}, optional\n A string indicating which method to use to calculate the correlation.\n\n ``direct``\n The correlation is determined directly from sums, the definition of\n correlation.\n ``fft``\n The Fast Fourier Transform is used to perform the correlation more\n quickly (only available for numerical arrays.)\n ``auto``\n Automatically chooses direct or Fourier method based on an estimate\n of which is faster (default). See `convolve` Notes for more detail.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n correlate : array\n An N-dimensional array containing a subset of the discrete linear\n cross-correlation of `in1` with `in2`.\n\n See Also\n --------\n choose_conv_method : contains more documentation on `method`.\n\n Notes\n -----\n The correlation z of two d-dimensional arrays x and y is defined as::\n\n z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])\n\n This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``\n then\n\n .. math::\n\n z[k] = (x * y)(k - N + 1)\n = \\sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}\n\n for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`\n\n where :math:`||x||` is the length of ``x``, :math:`N = \\max(||x||,||y||)`,\n and :math:`y_m` is 0 when m is outside the range of y.\n\n ``method='fft'`` only works for numerical arrays as it relies on\n `fftconvolve`. In certain cases (i.e., arrays of objects or when\n rounding integers can lose precision), ``method='direct'`` is always used.\n\n When using \"same\" mode with even-length inputs, the outputs of `correlate`\n and `correlate2d` differ: There is a 1-index offset between them.\n\n Examples\n --------\n Implement a matched filter using cross-correlation, to recover a signal\n that has passed through a noisy channel.\n\n >>> from scipy import signal\n >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)\n >>> sig_noise = sig + np.random.randn(len(sig))\n >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128\n\n >>> import matplotlib.pyplot as plt\n >>> clock = np.arange(64, len(sig), 128)\n >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)\n >>> ax_orig.plot(sig)\n >>> ax_orig.plot(clock, sig[clock], 'ro')\n >>> ax_orig.set_title('Original signal')\n >>> ax_noise.plot(sig_noise)\n >>> ax_noise.set_title('Signal with noise')\n >>> ax_corr.plot(corr)\n >>> ax_corr.plot(clock, corr[clock], 'ro')\n >>> ax_corr.axhline(0.5, ls=':')\n >>> ax_corr.set_title('Cross-correlated with rectangular pulse')\n >>> ax_orig.margins(0, 0.1)\n >>> fig.tight_layout()\n >>> fig.show()\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if in1.ndim == in2.ndim == 0:\n return in1 * in2.conj()\n elif in1.ndim != in2.ndim:\n raise ValueError(\"in1 and in2 should have the same dimensionality\")\n\n # Don't use _valfrommode, since correlate should not accept numeric modes\n try:\n val = _modedict[mode]\n except KeyError:\n raise ValueError(\"Acceptable mode flags are 'valid',\"\n \" 'same', or 'full'.\")\n\n # this either calls fftconvolve or this function with method=='direct'\n if method in ('fft', 'auto'):\n return convolve(in1, _reverse_and_conj(in2), mode, method)\n\n elif method == 'direct':\n # fastpath to faster numpy.correlate for 1d inputs when possible\n if _np_conv_ok(in1, in2, mode):\n return np.correlate(in1, in2, mode)\n\n # _correlateND is far slower when in2.size > in1.size, so swap them\n # and then undo the effect afterward if mode == 'full'. Also, it fails\n # with 'valid' mode if in2 is larger than in1, so swap those, too.\n # Don't swap inputs for 'same' mode, since shape of in1 matters.\n swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or\n _inputs_swap_needed(mode, in1.shape, in2.shape))\n\n if swapped_inputs:\n in1, in2 = in2, in1\n\n if mode == 'valid':\n ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]\n out = np.empty(ps, in1.dtype)\n\n z = sigtools._correlateND(in1, in2, out, val)\n\n else:\n ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]\n\n # zero pad input\n in1zpadded = np.zeros(ps, in1.dtype)\n sc = tuple(slice(0, i) for i in in1.shape)\n in1zpadded[sc] = in1.copy()\n\n if mode == 'full':\n out = np.empty(ps, in1.dtype)\n elif mode == 'same':\n out = np.empty(in1.shape, in1.dtype)\n\n z = sigtools._correlateND(in1zpadded, in2, out, val)\n\n if swapped_inputs:\n # Reverse and conjugate to undo the effect of swapping inputs\n z = _reverse_and_conj(z)\n\n return z\n\n else:\n raise ValueError(\"Acceptable method flags are 'auto',\"\n \" 'direct', or 'fft'.\")\n\n\ndef _centered(arr, newshape):\n # Return the center newshape portion of the array.\n newshape = np.asarray(newshape)\n currshape = np.array(arr.shape)\n startind = (currshape - newshape) // 2\n endind = startind + newshape\n myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]\n return arr[tuple(myslice)]\n\n\ndef _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):\n \"\"\"Handle the axes argument for frequency-domain convolution.\n\n Returns the inputs and axes in a standard form, eliminating redundant axes,\n swapping the inputs if necessary, and checking for various potential\n errors.\n\n Parameters\n ----------\n in1 : array\n First input.\n in2 : array\n Second input.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output.\n See the documentation `fftconvolve` for more information.\n axes : list of ints\n Axes over which to compute the FFTs.\n sorted_axes : bool, optional\n If `True`, sort the axes.\n Default is `False`, do not sort.\n\n Returns\n -------\n in1 : array\n The first input, possible swapped with the second input.\n in2 : array\n The second input, possible swapped with the first input.\n axes : list of ints\n Axes over which to compute the FFTs.\n\n \"\"\"\n s1 = in1.shape\n s2 = in2.shape\n noaxes = axes is None\n\n _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes)\n\n if not noaxes and not len(axes):\n raise ValueError(\"when provided, axes cannot be empty\")\n\n # Axes of length 1 can rely on broadcasting rules for multipy,\n # no fft needed.\n axes = [a for a in axes if s1[a] != 1 and s2[a] != 1]\n\n if sorted_axes:\n axes.sort()\n\n if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1\n for a in range(in1.ndim) if a not in axes):\n raise ValueError(\"incompatible shapes for in1 and in2:\"\n \" {0} and {1}\".format(s1, s2))\n\n # Check that input sizes are compatible with 'valid' mode.\n if _inputs_swap_needed(mode, s1, s2, axes=axes):\n # Convolution is commutative; order doesn't have any effect on output.\n in1, in2 = in2, in1\n\n return in1, in2, axes\n\n\ndef _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):\n \"\"\"Convolve two arrays in the frequency domain.\n\n This function implements only base the FFT-related operations.\n Specifically, it converts the signals to the frequency domain, multiplies\n them, then converts them back to the time domain. Calculations of axes,\n shapes, convolution mode, etc. are implemented in higher level-functions,\n such as `fftconvolve` and `oaconvolve`. Those functions should be used\n instead of this one.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n axes : array_like of ints\n Axes over which to compute the FFTs.\n shape : array_like of ints\n The sizes of the FFTs.\n calc_fast_len : bool, optional\n If `True`, set each value of `shape` to the next fast FFT length.\n Default is `False`, use `axes` as-is.\n\n Returns\n -------\n out : array\n An N-dimensional array containing the discrete linear convolution of\n `in1` with `in2`.\n\n \"\"\"\n if not len(axes):\n return in1 * in2\n\n complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')\n\n if calc_fast_len:\n # Speed up FFT by padding to optimal size.\n fshape = [\n sp_fft.next_fast_len(shape[a], not complex_result) for a in axes]\n else:\n fshape = shape\n\n if not complex_result:\n fft, ifft = sp_fft.rfftn, sp_fft.irfftn\n else:\n fft, ifft = sp_fft.fftn, sp_fft.ifftn\n\n sp1 = fft(in1, fshape, axes=axes)\n sp2 = fft(in2, fshape, axes=axes)\n\n ret = ifft(sp1 * sp2, fshape, axes=axes)\n\n if calc_fast_len:\n fslice = tuple([slice(sz) for sz in shape])\n ret = ret[fslice]\n\n return ret\n\n\ndef _apply_conv_mode(ret, s1, s2, mode, axes):\n \"\"\"Calculate the convolution result shape based on the `mode` argument.\n\n Returns the result sliced to the correct size for the given mode.\n\n Parameters\n ----------\n ret : array\n The result array, with the appropriate shape for the 'full' mode.\n s1 : list of int\n The shape of the first input.\n s2 : list of int\n The shape of the second input.\n mode : str {'full', 'valid', 'same'}\n A string indicating the size of the output.\n See the documentation `fftconvolve` for more information.\n axes : list of ints\n Axes over which to compute the convolution.\n\n Returns\n -------\n ret : array\n A copy of `res`, sliced to the correct size for the given `mode`.\n\n \"\"\"\n if mode == \"full\":\n return ret.copy()\n elif mode == \"same\":\n return _centered(ret, s1).copy()\n elif mode == \"valid\":\n shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1\n for a in range(ret.ndim)]\n return _centered(ret, shape_valid).copy()\n else:\n raise ValueError(\"acceptable mode flags are 'valid',\"\n \" 'same', or 'full'\")\n\n\ndef fftconvolve(in1, in2, mode=\"full\", axes=None):\n \"\"\"Convolve two N-dimensional arrays using FFT.\n\n Convolve `in1` and `in2` using the fast Fourier transform method, with\n the output size determined by the `mode` argument.\n\n This is generally much faster than `convolve` for large arrays (n > ~500),\n but can be slower when only a few output values are needed, and can only\n output float arrays (int or object array inputs will be cast to float).\n\n As of v0.19, `convolve` automatically chooses this method or the direct\n method based on an estimation of which is faster.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n axes : int or array_like of ints or None, optional\n Axes over which to compute the convolution.\n The default is over all axes.\n\n Returns\n -------\n out : array\n An N-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n See Also\n --------\n convolve : Uses the direct convolution or FFT convolution algorithm\n depending on which is faster.\n oaconvolve : Uses the overlap-add method to do convolution, which is\n generally faster when the input arrays are large and\n significantly different in size.\n\n Examples\n --------\n Autocorrelation of white noise is an impulse.\n\n >>> from scipy import signal\n >>> sig = np.random.randn(1000)\n >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)\n >>> ax_orig.plot(sig)\n >>> ax_orig.set_title('White noise')\n >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)\n >>> ax_mag.set_title('Autocorrelation')\n >>> fig.tight_layout()\n >>> fig.show()\n\n Gaussian blur implemented using FFT convolution. Notice the dark borders\n around the image, due to the zero-padding beyond its boundaries.\n The `convolve2d` function allows for other types of image boundaries,\n but is far slower.\n\n >>> from scipy import misc\n >>> face = misc.face(gray=True)\n >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))\n >>> blurred = signal.fftconvolve(face, kernel, mode='same')\n\n >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,\n ... figsize=(6, 15))\n >>> ax_orig.imshow(face, cmap='gray')\n >>> ax_orig.set_title('Original')\n >>> ax_orig.set_axis_off()\n >>> ax_kernel.imshow(kernel, cmap='gray')\n >>> ax_kernel.set_title('Gaussian kernel')\n >>> ax_kernel.set_axis_off()\n >>> ax_blurred.imshow(blurred, cmap='gray')\n >>> ax_blurred.set_title('Blurred')\n >>> ax_blurred.set_axis_off()\n >>> fig.show()\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if in1.ndim == in2.ndim == 0: # scalar inputs\n return in1 * in2\n elif in1.ndim != in2.ndim:\n raise ValueError(\"in1 and in2 should have the same dimensionality\")\n elif in1.size == 0 or in2.size == 0: # empty arrays\n return np.array([])\n\n in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,\n sorted_axes=False)\n\n s1 = in1.shape\n s2 = in2.shape\n\n shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1\n for i in range(in1.ndim)]\n\n ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True)\n\n return _apply_conv_mode(ret, s1, s2, mode, axes)\n\n\ndef _calc_oa_lens(s1, s2):\n \"\"\"Calculate the optimal FFT lengths for overlapp-add convolution.\n\n The calculation is done for a single dimension.\n\n Parameters\n ----------\n s1 : int\n Size of the dimension for the first array.\n s2 : int\n Size of the dimension for the second array.\n\n Returns\n -------\n block_size : int\n The size of the FFT blocks.\n overlap : int\n The amount of overlap between two blocks.\n in1_step : int\n The size of each step for the first array.\n in2_step : int\n The size of each step for the first array.\n\n \"\"\"\n # Set up the arguments for the conventional FFT approach.\n fallback = (s1+s2-1, None, s1, s2)\n\n # Use conventional FFT convolve if sizes are same.\n if s1 == s2 or s1 == 1 or s2 == 1:\n return fallback\n\n if s2 > s1:\n s1, s2 = s2, s1\n swapped = True\n else:\n swapped = False\n\n # There cannot be a useful block size if s2 is more than half of s1.\n if s2 >= s1/2:\n return fallback\n\n # Derivation of optimal block length\n # For original formula see:\n # https://en.wikipedia.org/wiki/Overlap-add_method\n #\n # Formula:\n # K = overlap = s2-1\n # N = block_size\n # C = complexity\n # e = exponential, exp(1)\n #\n # C = (N*(log2(N)+1))/(N-K)\n # C = (N*log2(2N))/(N-K)\n # C = N/(N-K) * log2(2N)\n # C1 = N/(N-K)\n # C2 = log2(2N) = ln(2N)/ln(2)\n #\n # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2\n # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2))\n #\n # dC/dN = dC1/dN*C2 + dC2/dN*C1\n # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K))\n # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K))\n # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2)\n # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2)\n # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)\n #\n # Solve for minimum, where dC/dN = 0\n # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)\n # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K\n # 0 = N - K*ln(2N) - K\n # 0 = N - K*(ln(2N) + 1)\n # 0 = N - K*ln(2Ne)\n # N = K*ln(2Ne)\n # N/K = ln(2Ne)\n #\n # e^(N/K) = e^ln(2Ne)\n # e^(N/K) = 2Ne\n # 1/e^(N/K) = 1/(2*N*e)\n # e^(N/-K) = 1/(2*N*e)\n # e^(N/-K) = K/N*1/(2*K*e)\n # N/K*e^(N/-K) = 1/(2*e*K)\n # N/-K*e^(N/-K) = -1/(2*e*K)\n #\n # Using Lambert W function\n # https://en.wikipedia.org/wiki/Lambert_W_function\n # x = W(y) It is the solution to y = x*e^x\n # x = N/-K\n # y = -1/(2*e*K)\n #\n # N/-K = W(-1/(2*e*K))\n #\n # N = -K*W(-1/(2*e*K))\n overlap = s2-1\n opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real\n block_size = sp_fft.next_fast_len(math.ceil(opt_size))\n\n # Use conventional FFT convolve if there is only going to be one block.\n if block_size >= s1:\n return fallback\n\n if not swapped:\n in1_step = block_size-s2+1\n in2_step = s2\n else:\n in1_step = s2\n in2_step = block_size-s2+1\n\n return block_size, overlap, in1_step, in2_step\n\n\ndef oaconvolve(in1, in2, mode=\"full\", axes=None):\n \"\"\"Convolve two N-dimensional arrays using the overlap-add method.\n\n Convolve `in1` and `in2` using the overlap-add method, with\n the output size determined by the `mode` argument.\n\n This is generally much faster than `convolve` for large arrays (n > ~500),\n and generally much faster than `fftconvolve` when one array is much\n larger than the other, but can be slower when only a few output values are\n needed or when the arrays are very similar in shape, and can only\n output float arrays (int or object array inputs will be cast to float).\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n axes : int or array_like of ints or None, optional\n Axes over which to compute the convolution.\n The default is over all axes.\n\n Returns\n -------\n out : array\n An N-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n See Also\n --------\n convolve : Uses the direct convolution or FFT convolution algorithm\n depending on which is faster.\n fftconvolve : An implementation of convolution using FFT.\n\n Notes\n -----\n .. versionadded:: 1.4.0\n\n Examples\n --------\n Convolve a 100,000 sample signal with a 512-sample filter.\n\n >>> from scipy import signal\n >>> sig = np.random.randn(100000)\n >>> filt = signal.firwin(512, 0.01)\n >>> fsig = signal.oaconvolve(sig, filt)\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)\n >>> ax_orig.plot(sig)\n >>> ax_orig.set_title('White noise')\n >>> ax_mag.plot(fsig)\n >>> ax_mag.set_title('Filtered noise')\n >>> fig.tight_layout()\n >>> fig.show()\n\n References\n ----------\n .. [1] Wikipedia, \"Overlap-add_method\".\n https://en.wikipedia.org/wiki/Overlap-add_method\n .. [2] Richard G. Lyons. Understanding Digital Signal Processing,\n Third Edition, 2011. Chapter 13.10.\n ISBN 13: 978-0137-02741-5\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if in1.ndim == in2.ndim == 0: # scalar inputs\n return in1 * in2\n elif in1.ndim != in2.ndim:\n raise ValueError(\"in1 and in2 should have the same dimensionality\")\n elif in1.size == 0 or in2.size == 0: # empty arrays\n return np.array([])\n elif in1.shape == in2.shape: # Equivalent to fftconvolve\n return fftconvolve(in1, in2, mode=mode, axes=axes)\n\n in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,\n sorted_axes=True)\n\n s1 = in1.shape\n s2 = in2.shape\n\n if not axes:\n ret = in1 * in2\n return _apply_conv_mode(ret, s1, s2, mode, axes)\n\n # Calculate this now since in1 is changed later\n shape_final = [None if i not in axes else\n s1[i] + s2[i] - 1 for i in range(in1.ndim)]\n\n # Calculate the block sizes for the output, steps, first and second inputs.\n # It is simpler to calculate them all together than doing them in separate\n # loops due to all the special cases that need to be handled.\n optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else\n _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim))\n block_size, overlaps, \\\n in1_step, in2_step = zip(*optimal_sizes)\n\n # Fall back to fftconvolve if there is only one block in every dimension.\n if in1_step == s1 and in2_step == s2:\n return fftconvolve(in1, in2, mode=mode, axes=axes)\n\n # Figure out the number of steps and padding.\n # This would get too complicated in a list comprehension.\n nsteps1 = []\n nsteps2 = []\n pad_size1 = []\n pad_size2 = []\n for i in range(in1.ndim):\n if i not in axes:\n pad_size1 += [(0, 0)]\n pad_size2 += [(0, 0)]\n continue\n\n if s1[i] > in1_step[i]:\n curnstep1 = math.ceil((s1[i]+1)/in1_step[i])\n if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]:\n curnstep1 += 1\n\n curpad1 = curnstep1*in1_step[i] - s1[i]\n else:\n curnstep1 = 1\n curpad1 = 0\n\n if s2[i] > in2_step[i]:\n curnstep2 = math.ceil((s2[i]+1)/in2_step[i])\n if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]:\n curnstep2 += 1\n\n curpad2 = curnstep2*in2_step[i] - s2[i]\n else:\n curnstep2 = 1\n curpad2 = 0\n\n nsteps1 += [curnstep1]\n nsteps2 += [curnstep2]\n pad_size1 += [(0, curpad1)]\n pad_size2 += [(0, curpad2)]\n\n # Pad the array to a size that can be reshaped to the desired shape\n # if necessary.\n if not all(curpad == (0, 0) for curpad in pad_size1):\n in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0)\n\n if not all(curpad == (0, 0) for curpad in pad_size2):\n in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0)\n\n # Reshape the overlap-add parts to input block sizes.\n split_axes = [iax+i for i, iax in enumerate(axes)]\n fft_axes = [iax+1 for iax in split_axes]\n\n # We need to put each new dimension before the corresponding dimension\n # being reshaped in order to get the data in the right layout at the end.\n reshape_size1 = list(in1_step)\n reshape_size2 = list(in2_step)\n for i, iax in enumerate(split_axes):\n reshape_size1.insert(iax, nsteps1[i])\n reshape_size2.insert(iax, nsteps2[i])\n\n in1 = in1.reshape(*reshape_size1)\n in2 = in2.reshape(*reshape_size2)\n\n # Do the convolution.\n fft_shape = [block_size[i] for i in axes]\n ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False)\n\n # Do the overlap-add.\n for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes):\n overlap = overlaps[ax]\n if overlap is None:\n continue\n\n ret, overpart = np.split(ret, [-overlap], ax_fft)\n overpart = np.split(overpart, [-1], ax_split)[0]\n\n ret_overpart = np.split(ret, [overlap], ax_fft)[0]\n ret_overpart = np.split(ret_overpart, [1], ax_split)[1]\n ret_overpart += overpart\n\n # Reshape back to the correct dimensionality.\n shape_ret = [ret.shape[i] if i not in fft_axes else\n ret.shape[i]*ret.shape[i-1]\n for i in range(ret.ndim) if i not in split_axes]\n ret = ret.reshape(*shape_ret)\n\n # Slice to the correct size.\n slice_final = tuple([slice(islice) for islice in shape_final])\n ret = ret[slice_final]\n\n return _apply_conv_mode(ret, s1, s2, mode, axes)\n\n\ndef _numeric_arrays(arrays, kinds='buifc'):\n \"\"\"\n See if a list of arrays are all numeric.\n\n Parameters\n ----------\n ndarrays : array or list of arrays\n arrays to check if numeric.\n numeric_kinds : string-like\n The dtypes of the arrays to be checked. If the dtype.kind of\n the ndarrays are not in this string the function returns False and\n otherwise returns True.\n \"\"\"\n if type(arrays) == np.ndarray:\n return arrays.dtype.kind in kinds\n for array_ in arrays:\n if array_.dtype.kind not in kinds:\n return False\n return True\n\n\ndef _prod(iterable):\n \"\"\"\n Product of a list of numbers.\n Faster than np.prod for short lists like array shapes.\n \"\"\"\n product = 1\n for x in iterable:\n product *= x\n return product\n\n\ndef _conv_ops(x_shape, h_shape, mode):\n \"\"\"\n Find the number of operations required for direct/fft methods of\n convolution. The direct operations were recorded by making a dummy class to\n record the number of operations by overriding ``__mul__`` and ``__add__``.\n The FFT operations rely on the (well-known) computational complexity of the\n FFT (and the implementation of ``_freq_domain_conv``).\n\n \"\"\"\n if mode == \"full\":\n out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]\n elif mode == \"valid\":\n out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)]\n elif mode == \"same\":\n out_shape = x_shape\n else:\n raise ValueError(\"Acceptable mode flags are 'valid',\"\n \" 'same', or 'full', not mode={}\".format(mode))\n\n s1, s2 = x_shape, h_shape\n if len(x_shape) == 1:\n s1, s2 = s1[0], s2[0]\n if mode == \"full\":\n direct_ops = s1 * s2\n elif mode == \"valid\":\n direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2\n elif mode == \"same\":\n direct_ops = (s1 * s2 if s1 < s2 else\n s1 * s2 - (s2 // 2) * ((s2 + 1) // 2))\n else:\n if mode == \"full\":\n direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)\n elif mode == \"valid\":\n direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)\n elif mode == \"same\":\n direct_ops = _prod(s1) * _prod(s2)\n\n full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]\n N = _prod(full_out_shape)\n fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape\n return fft_ops, direct_ops\n\n\ndef _fftconv_faster(x, h, mode):\n \"\"\"\n See if using fftconvolve or convolve is faster.\n\n Parameters\n ----------\n x : np.ndarray\n Signal\n h : np.ndarray\n Kernel\n mode : str\n Mode passed to convolve\n\n Returns\n -------\n fft_faster : bool\n\n Notes\n -----\n See docstring of `choose_conv_method` for details on tuning hardware.\n\n See pull request 11031 for more detail:\n https://github.com/scipy/scipy/pull/11031.\n\n \"\"\"\n fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)\n offset = -1e-3 if x.ndim == 1 else -1e-4\n constants = {\n \"valid\": (1.89095737e-9, 2.1364985e-10, offset),\n \"full\": (1.7649070e-9, 2.1414831e-10, offset),\n \"same\": (3.2646654e-9, 2.8478277e-10, offset)\n if h.size <= x.size\n else (3.21635404e-9, 1.1773253e-8, -1e-5),\n } if x.ndim == 1 else {\n \"valid\": (1.85927e-9, 2.11242e-8, offset),\n \"full\": (1.99817e-9, 1.66174e-8, offset),\n \"same\": (2.04735e-9, 1.55367e-8, offset),\n }\n O_fft, O_direct, O_offset = constants[mode]\n return O_fft * fft_ops < O_direct * direct_ops + O_offset\n\n\ndef _reverse_and_conj(x):\n \"\"\"\n Reverse array `x` in all dimensions and perform the complex conjugate\n \"\"\"\n reverse = (slice(None, None, -1),) * x.ndim\n return x[reverse].conj()\n\n\ndef _np_conv_ok(volume, kernel, mode):\n \"\"\"\n See if numpy supports convolution of `volume` and `kernel` (i.e. both are\n 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the\n size of the larger input, while SciPy's uses the size of the first input.\n\n Invalid mode strings will return False and be caught by the calling func.\n \"\"\"\n if volume.ndim == kernel.ndim == 1:\n if mode in ('full', 'valid'):\n return True\n elif mode == 'same':\n return volume.size >= kernel.size\n else:\n return False\n\n\ndef _timeit_fast(stmt=\"pass\", setup=\"pass\", repeat=3):\n \"\"\"\n Returns the time the statement/function took, in seconds.\n\n Faster, less precise version of IPython's timeit. `stmt` can be a statement\n written as a string or a callable.\n\n Will do only 1 loop (like IPython's timeit) with no repetitions\n (unlike IPython) for very slow functions. For fast functions, only does\n enough loops to take 5 ms, which seems to produce similar results (on\n Windows at least), and avoids doing an extraneous cycle that isn't\n measured.\n\n \"\"\"\n timer = timeit.Timer(stmt, setup)\n\n # determine number of calls per rep so total time for 1 rep >= 5 ms\n x = 0\n for p in range(0, 10):\n number = 10**p\n x = timer.timeit(number) # seconds\n if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one\n break\n if x > 1: # second\n # If it's macroscopic, don't bother with repetitions\n best = x\n else:\n number *= 10\n r = timer.repeat(repeat, number)\n best = min(r)\n\n sec = best / number\n return sec\n\n\ndef choose_conv_method(in1, in2, mode='full', measure=False):\n \"\"\"\n Find the fastest convolution/correlation method.\n\n This primarily exists to be called during the ``method='auto'`` option in\n `convolve` and `correlate`. It can also be used to determine the value of\n ``method`` for many different convolutions of the same dtype/shape.\n In addition, it supports timing the convolution to adapt the value of\n ``method`` to a particular set of inputs and/or hardware.\n\n Parameters\n ----------\n in1 : array_like\n The first argument passed into the convolution function.\n in2 : array_like\n The second argument passed into the convolution function.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n measure : bool, optional\n If True, run and time the convolution of `in1` and `in2` with both\n methods and return the fastest. If False (default), predict the fastest\n method using precomputed values.\n\n Returns\n -------\n method : str\n A string indicating which convolution method is fastest, either\n 'direct' or 'fft'\n times : dict, optional\n A dictionary containing the times (in seconds) needed for each method.\n This value is only returned if ``measure=True``.\n\n See Also\n --------\n convolve\n correlate\n\n Notes\n -----\n Generally, this method is 99% accurate for 2D signals and 85% accurate\n for 1D signals for randomly chosen input sizes. For precision, use\n ``measure=True`` to find the fastest method by timing the convolution.\n This can be used to avoid the minimal overhead of finding the fastest\n ``method`` later, or to adapt the value of ``method`` to a particular set\n of inputs.\n\n Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this\n function. These experiments measured the ratio between the time required\n when using ``method='auto'`` and the time required for the fastest method\n (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these\n experiments, we found:\n\n * There is a 95% chance of this ratio being less than 1.5 for 1D signals\n and a 99% chance of being less than 2.5 for 2D signals.\n * The ratio was always less than 2.5/5 for 1D/2D signals respectively.\n * This function is most inaccurate for 1D convolutions that take between 1\n and 10 milliseconds with ``method='direct'``. A good proxy for this\n (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``.\n\n The 2D results almost certainly generalize to 3D/4D/etc because the\n implementation is the same (the 1D implementation is different).\n\n All the numbers above are specific to the EC2 machine. However, we did find\n that this function generalizes fairly decently across hardware. The speed\n tests were of similar quality (and even slightly better) than the same\n tests performed on the machine to tune this function's numbers (a mid-2014\n 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor).\n\n There are cases when `fftconvolve` supports the inputs but this function\n returns `direct` (e.g., to protect against floating point integer\n precision).\n\n .. versionadded:: 0.19\n\n Examples\n --------\n Estimate the fastest method for a given input:\n\n >>> from scipy import signal\n >>> img = np.random.rand(32, 32)\n >>> filter = np.random.rand(8, 8)\n >>> method = signal.choose_conv_method(img, filter, mode='same')\n >>> method\n 'fft'\n\n This can then be applied to other arrays of the same dtype and shape:\n\n >>> img2 = np.random.rand(32, 32)\n >>> filter2 = np.random.rand(8, 8)\n >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method)\n >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method)\n\n The output of this function (``method``) works with `correlate` and\n `convolve`.\n\n \"\"\"\n volume = np.asarray(in1)\n kernel = np.asarray(in2)\n\n if measure:\n times = {}\n for method in ['fft', 'direct']:\n times[method] = _timeit_fast(lambda: convolve(volume, kernel,\n mode=mode, method=method))\n\n chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'\n return chosen_method, times\n\n # for integer input,\n # catch when more precision required than float provides (representing an\n # integer as float can lose precision in fftconvolve if larger than 2**52)\n if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):\n max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())\n max_value *= int(min(volume.size, kernel.size))\n if max_value > 2**np.finfo('float').nmant - 1:\n return 'direct'\n\n if _numeric_arrays([volume, kernel], kinds='b'):\n return 'direct'\n\n if _numeric_arrays([volume, kernel]):\n if _fftconv_faster(volume, kernel, mode):\n return 'fft'\n\n return 'direct'\n\n\ndef convolve(in1, in2, mode='full', method='auto'):\n \"\"\"\n Convolve two N-dimensional arrays.\n\n Convolve `in1` and `in2`, with the output size determined by the\n `mode` argument.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n method : str {'auto', 'direct', 'fft'}, optional\n A string indicating which method to use to calculate the convolution.\n\n ``direct``\n The convolution is determined directly from sums, the definition of\n convolution.\n ``fft``\n The Fourier Transform is used to perform the convolution by calling\n `fftconvolve`.\n ``auto``\n Automatically chooses direct or Fourier method based on an estimate\n of which is faster (default). See Notes for more detail.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n convolve : array\n An N-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n See Also\n --------\n numpy.polymul : performs polynomial multiplication (same operation, but\n also accepts poly1d objects)\n choose_conv_method : chooses the fastest appropriate convolution method\n fftconvolve : Always uses the FFT method.\n oaconvolve : Uses the overlap-add method to do convolution, which is\n generally faster when the input arrays are large and\n significantly different in size.\n\n Notes\n -----\n By default, `convolve` and `correlate` use ``method='auto'``, which calls\n `choose_conv_method` to choose the fastest method using pre-computed\n values (`choose_conv_method` can also measure real-world timing with a\n keyword argument). Because `fftconvolve` relies on floating point numbers,\n there are certain constraints that may force `method=direct` (more detail\n in `choose_conv_method` docstring).\n\n Examples\n --------\n Smooth a square pulse using a Hann window:\n\n >>> from scipy import signal\n >>> sig = np.repeat([0., 1., 0.], 100)\n >>> win = signal.hann(50)\n >>> filtered = signal.convolve(sig, win, mode='same') / sum(win)\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)\n >>> ax_orig.plot(sig)\n >>> ax_orig.set_title('Original pulse')\n >>> ax_orig.margins(0, 0.1)\n >>> ax_win.plot(win)\n >>> ax_win.set_title('Filter impulse response')\n >>> ax_win.margins(0, 0.1)\n >>> ax_filt.plot(filtered)\n >>> ax_filt.set_title('Filtered signal')\n >>> ax_filt.margins(0, 0.1)\n >>> fig.tight_layout()\n >>> fig.show()\n\n \"\"\"\n volume = np.asarray(in1)\n kernel = np.asarray(in2)\n\n if volume.ndim == kernel.ndim == 0:\n return volume * kernel\n elif volume.ndim != kernel.ndim:\n raise ValueError(\"volume and kernel should have the same \"\n \"dimensionality\")\n\n if _inputs_swap_needed(mode, volume.shape, kernel.shape):\n # Convolution is commutative; order doesn't have any effect on output\n volume, kernel = kernel, volume\n\n if method == 'auto':\n method = choose_conv_method(volume, kernel, mode=mode)\n\n if method == 'fft':\n out = fftconvolve(volume, kernel, mode=mode)\n result_type = np.result_type(volume, kernel)\n if result_type.kind in {'u', 'i'}:\n out = np.around(out)\n return out.astype(result_type)\n elif method == 'direct':\n # fastpath to faster numpy.convolve for 1d inputs when possible\n if _np_conv_ok(volume, kernel, mode):\n return np.convolve(volume, kernel, mode)\n\n return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')\n else:\n raise ValueError(\"Acceptable method flags are 'auto',\"\n \" 'direct', or 'fft'.\")\n\n\ndef order_filter(a, domain, rank):\n \"\"\"\n Perform an order filter on an N-D array.\n\n Perform an order filter on the array in. The domain argument acts as a\n mask centered over each pixel. The non-zero elements of domain are\n used to select elements surrounding each input pixel which are placed\n in a list. The list is sorted, and the output for that pixel is the\n element corresponding to rank in the sorted list.\n\n Parameters\n ----------\n a : ndarray\n The N-dimensional input array.\n domain : array_like\n A mask array with the same number of dimensions as `a`.\n Each dimension should have an odd number of elements.\n rank : int\n A non-negative integer which selects the element from the\n sorted list (0 corresponds to the smallest element, 1 is the\n next smallest element, etc.).\n\n Returns\n -------\n out : ndarray\n The results of the order filter in an array with the same\n shape as `a`.\n\n Examples\n --------\n >>> from scipy import signal\n >>> x = np.arange(25).reshape(5, 5)\n >>> domain = np.identity(3)\n >>> x\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n >>> signal.order_filter(x, domain, 0)\n array([[ 0., 0., 0., 0., 0.],\n [ 0., 0., 1., 2., 0.],\n [ 0., 5., 6., 7., 0.],\n [ 0., 10., 11., 12., 0.],\n [ 0., 0., 0., 0., 0.]])\n >>> signal.order_filter(x, domain, 2)\n array([[ 6., 7., 8., 9., 4.],\n [ 11., 12., 13., 14., 9.],\n [ 16., 17., 18., 19., 14.],\n [ 21., 22., 23., 24., 19.],\n [ 20., 21., 22., 23., 24.]])\n\n \"\"\"\n domain = np.asarray(domain)\n size = domain.shape\n for k in range(len(size)):\n if (size[k] % 2) != 1:\n raise ValueError(\"Each dimension of domain argument \"\n \" should have an odd number of elements.\")\n return sigtools._order_filterND(a, domain, rank)\n\n\ndef medfilt(volume, kernel_size=None):\n \"\"\"\n Perform a median filter on an N-dimensional array.\n\n Apply a median filter to the input array using a local window-size\n given by `kernel_size`. The array will automatically be zero-padded.\n\n Parameters\n ----------\n volume : array_like\n An N-dimensional input array.\n kernel_size : array_like, optional\n A scalar or an N-length list giving the size of the median filter\n window in each dimension. Elements of `kernel_size` should be odd.\n If `kernel_size` is a scalar, then this scalar is used as the size in\n each dimension. Default size is 3 for each dimension.\n\n Returns\n -------\n out : ndarray\n An array the same size as input containing the median filtered\n result.\n\n Warns\n -----\n UserWarning\n If array size is smaller than kernel size along any dimension\n\n See Also\n --------\n scipy.ndimage.median_filter\n\n Notes\n -------\n The more general function `scipy.ndimage.median_filter` has a more\n efficient implementation of a median filter and therefore runs much faster.\n \"\"\"\n volume = np.atleast_1d(volume)\n if kernel_size is None:\n kernel_size = [3] * volume.ndim\n kernel_size = np.asarray(kernel_size)\n if kernel_size.shape == ():\n kernel_size = np.repeat(kernel_size.item(), volume.ndim)\n\n for k in range(volume.ndim):\n if (kernel_size[k] % 2) != 1:\n raise ValueError(\"Each element of kernel_size should be odd.\")\n if any(k > s for k, s in zip(kernel_size, volume.shape)):\n warnings.warn('kernel_size exceeds volume extent: the volume will be '\n 'zero-padded.')\n\n domain = np.ones(kernel_size)\n\n numels = np.prod(kernel_size, axis=0)\n order = numels // 2\n return sigtools._order_filterND(volume, domain, order)\n\n\ndef wiener(im, mysize=None, noise=None):\n \"\"\"\n Perform a Wiener filter on an N-dimensional array.\n\n Apply a Wiener filter to the N-dimensional array `im`.\n\n Parameters\n ----------\n im : ndarray\n An N-dimensional array.\n mysize : int or array_like, optional\n A scalar or an N-length list giving the size of the Wiener filter\n window in each dimension. Elements of mysize should be odd.\n If mysize is a scalar, then this scalar is used as the size\n in each dimension.\n noise : float, optional\n The noise-power to use. If None, then noise is estimated as the\n average of the local variance of the input.\n\n Returns\n -------\n out : ndarray\n Wiener filtered result with the same shape as `im`.\n\n Examples\n --------\n\n >>> from scipy.misc import face\n >>> from scipy.signal.signaltools import wiener\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>> img = np.random.random((40, 40)) #Create a random image\n >>> filtered_img = wiener(img, (5, 5)) #Filter the image\n >>> f, (plot1, plot2) = plt.subplots(1, 2)\n >>> plot1.imshow(img)\n >>> plot2.imshow(filtered_img)\n >>> plt.show()\n\n Notes\n -----\n This implementation is similar to wiener2 in Matlab/Octave.\n For more details see [1]_\n\n References\n ----------\n .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing,\n Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548.\n\n\n \"\"\"\n im = np.asarray(im)\n if mysize is None:\n mysize = [3] * im.ndim\n mysize = np.asarray(mysize)\n if mysize.shape == ():\n mysize = np.repeat(mysize.item(), im.ndim)\n\n # Estimate the local mean\n lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0)\n\n # Estimate the local variance\n lVar = (correlate(im ** 2, np.ones(mysize), 'same') /\n np.prod(mysize, axis=0) - lMean ** 2)\n\n # Estimate the noise power if needed.\n if noise is None:\n noise = np.mean(np.ravel(lVar), axis=0)\n\n res = (im - lMean)\n res *= (1 - noise / lVar)\n res += lMean\n out = np.where(lVar < noise, lMean, res)\n\n return out\n\n\ndef convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):\n \"\"\"\n Convolve two 2-dimensional arrays.\n\n Convolve `in1` and `in2` with output size determined by `mode`, and\n boundary conditions determined by `boundary` and `fillvalue`.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear convolution\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n boundary : str {'fill', 'wrap', 'symm'}, optional\n A flag indicating how to handle boundaries:\n\n ``fill``\n pad input arrays with fillvalue. (default)\n ``wrap``\n circular boundary conditions.\n ``symm``\n symmetrical boundary conditions.\n\n fillvalue : scalar, optional\n Value to fill pad input arrays with. Default is 0.\n\n Returns\n -------\n out : ndarray\n A 2-dimensional array containing a subset of the discrete linear\n convolution of `in1` with `in2`.\n\n Examples\n --------\n Compute the gradient of an image by 2D convolution with a complex Scharr\n operator. (Horizontal operator is real, vertical is imaginary.) Use\n symmetric boundary condition to avoid creating edges at the image\n boundaries.\n\n >>> from scipy import signal\n >>> from scipy import misc\n >>> ascent = misc.ascent()\n >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],\n ... [-10+0j, 0+ 0j, +10 +0j],\n ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy\n >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))\n >>> ax_orig.imshow(ascent, cmap='gray')\n >>> ax_orig.set_title('Original')\n >>> ax_orig.set_axis_off()\n >>> ax_mag.imshow(np.absolute(grad), cmap='gray')\n >>> ax_mag.set_title('Gradient magnitude')\n >>> ax_mag.set_axis_off()\n >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles\n >>> ax_ang.set_title('Gradient orientation')\n >>> ax_ang.set_axis_off()\n >>> fig.show()\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if not in1.ndim == in2.ndim == 2:\n raise ValueError('convolve2d inputs must both be 2-D arrays')\n\n if _inputs_swap_needed(mode, in1.shape, in2.shape):\n in1, in2 = in2, in1\n\n val = _valfrommode(mode)\n bval = _bvalfromboundary(boundary)\n out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)\n return out\n\n\ndef correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):\n \"\"\"\n Cross-correlate two 2-dimensional arrays.\n\n Cross correlate `in1` and `in2` with output size determined by `mode`, and\n boundary conditions determined by `boundary` and `fillvalue`.\n\n Parameters\n ----------\n in1 : array_like\n First input.\n in2 : array_like\n Second input. Should have the same number of dimensions as `in1`.\n mode : str {'full', 'valid', 'same'}, optional\n A string indicating the size of the output:\n\n ``full``\n The output is the full discrete linear cross-correlation\n of the inputs. (Default)\n ``valid``\n The output consists only of those elements that do not\n rely on the zero-padding. In 'valid' mode, either `in1` or `in2`\n must be at least as large as the other in every dimension.\n ``same``\n The output is the same size as `in1`, centered\n with respect to the 'full' output.\n boundary : str {'fill', 'wrap', 'symm'}, optional\n A flag indicating how to handle boundaries:\n\n ``fill``\n pad input arrays with fillvalue. (default)\n ``wrap``\n circular boundary conditions.\n ``symm``\n symmetrical boundary conditions.\n\n fillvalue : scalar, optional\n Value to fill pad input arrays with. Default is 0.\n\n Returns\n -------\n correlate2d : ndarray\n A 2-dimensional array containing a subset of the discrete linear\n cross-correlation of `in1` with `in2`.\n\n Notes\n -----\n When using \"same\" mode with even-length inputs, the outputs of `correlate`\n and `correlate2d` differ: There is a 1-index offset between them.\n\n Examples\n --------\n Use 2D cross-correlation to find the location of a template in a noisy\n image:\n\n >>> from scipy import signal\n >>> from scipy import misc\n >>> face = misc.face(gray=True) - misc.face(gray=True).mean()\n >>> template = np.copy(face[300:365, 670:750]) # right eye\n >>> template -= template.mean()\n >>> face = face + np.random.randn(*face.shape) * 50 # add noise\n >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')\n >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match\n\n >>> import matplotlib.pyplot as plt\n >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,\n ... figsize=(6, 15))\n >>> ax_orig.imshow(face, cmap='gray')\n >>> ax_orig.set_title('Original')\n >>> ax_orig.set_axis_off()\n >>> ax_template.imshow(template, cmap='gray')\n >>> ax_template.set_title('Template')\n >>> ax_template.set_axis_off()\n >>> ax_corr.imshow(corr, cmap='gray')\n >>> ax_corr.set_title('Cross-correlation')\n >>> ax_corr.set_axis_off()\n >>> ax_orig.plot(x, y, 'ro')\n >>> fig.show()\n\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n if not in1.ndim == in2.ndim == 2:\n raise ValueError('correlate2d inputs must both be 2-D arrays')\n\n swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)\n if swapped_inputs:\n in1, in2 = in2, in1\n\n val = _valfrommode(mode)\n bval = _bvalfromboundary(boundary)\n out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)\n\n if swapped_inputs:\n out = out[::-1, ::-1]\n\n return out\n\n\ndef medfilt2d(input, kernel_size=3):\n \"\"\"\n Median filter a 2-dimensional array.\n\n Apply a median filter to the `input` array using a local window-size\n given by `kernel_size` (must be odd). The array is zero-padded\n automatically.\n\n Parameters\n ----------\n input : array_like\n A 2-dimensional input array.\n kernel_size : array_like, optional\n A scalar or a list of length 2, giving the size of the\n median filter window in each dimension. Elements of\n `kernel_size` should be odd. If `kernel_size` is a scalar,\n then this scalar is used as the size in each dimension.\n Default is a kernel of size (3, 3).\n\n Returns\n -------\n out : ndarray\n An array the same size as input containing the median filtered\n result.\n\n See also\n --------\n scipy.ndimage.median_filter\n\n Notes\n -------\n The more general function `scipy.ndimage.median_filter` has a more\n efficient implementation of a median filter and therefore runs much faster.\n \"\"\"\n image = np.asarray(input)\n if kernel_size is None:\n kernel_size = [3] * 2\n kernel_size = np.asarray(kernel_size)\n if kernel_size.shape == ():\n kernel_size = np.repeat(kernel_size.item(), 2)\n\n for size in kernel_size:\n if (size % 2) != 1:\n raise ValueError(\"Each element of kernel_size should be odd.\")\n\n return sigtools._medfilt2d(image, kernel_size)\n\n\ndef lfilter(b, a, x, axis=-1, zi=None):\n \"\"\"\n Filter data along one-dimension with an IIR or FIR filter.\n\n Filter a data sequence, `x`, using a digital filter. This works for many\n fundamental data types (including Object type). The filter is a direct\n form II transposed implementation of the standard difference equation\n (see Notes).\n\n The function `sosfilt` (and filter design using ``output='sos'``) should be\n preferred over `lfilter` for most filtering tasks, as second-order sections\n have fewer numerical problems.\n\n Parameters\n ----------\n b : array_like\n The numerator coefficient vector in a 1-D sequence.\n a : array_like\n The denominator coefficient vector in a 1-D sequence. If ``a[0]``\n is not 1, then both `a` and `b` are normalized by ``a[0]``.\n x : array_like\n An N-dimensional input array.\n axis : int, optional\n The axis of the input data array along which to apply the\n linear filter. The filter is applied to each subarray along\n this axis. Default is -1.\n zi : array_like, optional\n Initial conditions for the filter delays. It is a vector\n (or array of vectors for an N-dimensional input) of length\n ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then\n initial rest is assumed. See `lfiltic` for more information.\n\n Returns\n -------\n y : array\n The output of the digital filter.\n zf : array, optional\n If `zi` is None, this is not returned, otherwise, `zf` holds the\n final filter delay values.\n\n See Also\n --------\n lfiltic : Construct initial conditions for `lfilter`.\n lfilter_zi : Compute initial state (steady state of step response) for\n `lfilter`.\n filtfilt : A forward-backward filter, to obtain a filter with linear phase.\n savgol_filter : A Savitzky-Golay filter.\n sosfilt: Filter data using cascaded second-order sections.\n sosfiltfilt: A forward-backward filter using second-order sections.\n\n Notes\n -----\n The filter function is implemented as a direct II transposed structure.\n This means that the filter implements::\n\n a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]\n - a[1]*y[n-1] - ... - a[N]*y[n-N]\n\n where `M` is the degree of the numerator, `N` is the degree of the\n denominator, and `n` is the sample number. It is implemented using\n the following difference equations (assuming M = N)::\n\n a[0]*y[n] = b[0] * x[n] + d[0][n-1]\n d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]\n d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]\n ...\n d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]\n d[N-1][n] = b[N] * x[n] - a[N] * y[n]\n\n where `d` are the state variables.\n\n The rational transfer function describing this filter in the\n z-transform domain is::\n\n -1 -M\n b[0] + b[1]z + ... + b[M] z\n Y(z) = -------------------------------- X(z)\n -1 -N\n a[0] + a[1]z + ... + a[N] z\n\n Examples\n --------\n Generate a noisy signal to be filtered:\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n >>> t = np.linspace(-1, 1, 201)\n >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +\n ... 0.1*np.sin(2*np.pi*1.25*t + 1) +\n ... 0.18*np.cos(2*np.pi*3.85*t))\n >>> xn = x + np.random.randn(len(t)) * 0.08\n\n Create an order 3 lowpass butterworth filter:\n\n >>> b, a = signal.butter(3, 0.05)\n\n Apply the filter to xn. Use lfilter_zi to choose the initial condition of\n the filter:\n\n >>> zi = signal.lfilter_zi(b, a)\n >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])\n\n Apply the filter again, to have a result filtered at an order the same as\n filtfilt:\n\n >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])\n\n Use filtfilt to apply the filter:\n\n >>> y = signal.filtfilt(b, a, xn)\n\n Plot the original signal and the various filtered versions:\n\n >>> plt.figure\n >>> plt.plot(t, xn, 'b', alpha=0.75)\n >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')\n >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',\n ... 'filtfilt'), loc='best')\n >>> plt.grid(True)\n >>> plt.show()\n\n \"\"\"\n a = np.atleast_1d(a)\n if len(a) == 1:\n # This path only supports types fdgFDGO to mirror _linear_filter below.\n # Any of b, a, x, or zi can set the dtype, but there is no default\n # casting of other types; instead a NotImplementedError is raised.\n b = np.asarray(b)\n a = np.asarray(a)\n if b.ndim != 1 and a.ndim != 1:\n raise ValueError('object of too small depth for desired array')\n x = _validate_x(x)\n inputs = [b, a, x]\n if zi is not None:\n # _linear_filter does not broadcast zi, but does do expansion of\n # singleton dims.\n zi = np.asarray(zi)\n if zi.ndim != x.ndim:\n raise ValueError('object of too small depth for desired array')\n expected_shape = list(x.shape)\n expected_shape[axis] = b.shape[0] - 1\n expected_shape = tuple(expected_shape)\n # check the trivial case where zi is the right shape first\n if zi.shape != expected_shape:\n strides = zi.ndim * [None]\n if axis < 0:\n axis += zi.ndim\n for k in range(zi.ndim):\n if k == axis and zi.shape[k] == expected_shape[k]:\n strides[k] = zi.strides[k]\n elif k != axis and zi.shape[k] == expected_shape[k]:\n strides[k] = zi.strides[k]\n elif k != axis and zi.shape[k] == 1:\n strides[k] = 0\n else:\n raise ValueError('Unexpected shape for zi: expected '\n '%s, found %s.' %\n (expected_shape, zi.shape))\n zi = np.lib.stride_tricks.as_strided(zi, expected_shape,\n strides)\n inputs.append(zi)\n dtype = np.result_type(*inputs)\n\n if dtype.char not in 'fdgFDGO':\n raise NotImplementedError(\"input type '%s' not supported\" % dtype)\n\n b = np.array(b, dtype=dtype)\n a = np.array(a, dtype=dtype, copy=False)\n b /= a[0]\n x = np.array(x, dtype=dtype, copy=False)\n\n out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)\n ind = out_full.ndim * [slice(None)]\n if zi is not None:\n ind[axis] = slice(zi.shape[axis])\n out_full[tuple(ind)] += zi\n\n ind[axis] = slice(out_full.shape[axis] - len(b) + 1)\n out = out_full[tuple(ind)]\n\n if zi is None:\n return out\n else:\n ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)\n zf = out_full[tuple(ind)]\n return out, zf\n else:\n if zi is None:\n return sigtools._linear_filter(b, a, x, axis)\n else:\n return sigtools._linear_filter(b, a, x, axis, zi)\n\n\ndef lfiltic(b, a, y, x=None):\n \"\"\"\n Construct initial conditions for lfilter given input and output vectors.\n\n Given a linear filter (b, a) and initial conditions on the output `y`\n and the input `x`, return the initial conditions on the state vector zi\n which is used by `lfilter` to generate the output given the input.\n\n Parameters\n ----------\n b : array_like\n Linear filter term.\n a : array_like\n Linear filter term.\n y : array_like\n Initial conditions.\n\n If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.\n\n If `y` is too short, it is padded with zeros.\n x : array_like, optional\n Initial conditions.\n\n If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.\n\n If `x` is not given, its initial conditions are assumed zero.\n\n If `x` is too short, it is padded with zeros.\n\n Returns\n -------\n zi : ndarray\n The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,\n where ``K = max(M, N)``.\n\n See Also\n --------\n lfilter, lfilter_zi\n\n \"\"\"\n N = np.size(a) - 1\n M = np.size(b) - 1\n K = max(M, N)\n y = np.asarray(y)\n if y.dtype.kind in 'bui':\n # ensure calculations are floating point\n y = y.astype(np.float64)\n zi = np.zeros(K, y.dtype)\n if x is None:\n x = np.zeros(M, y.dtype)\n else:\n x = np.asarray(x)\n L = np.size(x)\n if L < M:\n x = np.r_[x, np.zeros(M - L)]\n L = np.size(y)\n if L < N:\n y = np.r_[y, np.zeros(N - L)]\n\n for m in range(M):\n zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)\n\n for m in range(N):\n zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)\n\n return zi\n\n\ndef deconvolve(signal, divisor):\n \"\"\"Deconvolves ``divisor`` out of ``signal`` using inverse filtering.\n\n Returns the quotient and remainder such that\n ``signal = convolve(divisor, quotient) + remainder``\n\n Parameters\n ----------\n signal : array_like\n Signal data, typically a recorded signal\n divisor : array_like\n Divisor data, typically an impulse response or filter that was\n applied to the original signal\n\n Returns\n -------\n quotient : ndarray\n Quotient, typically the recovered original signal\n remainder : ndarray\n Remainder\n\n Examples\n --------\n Deconvolve a signal that's been filtered:\n\n >>> from scipy import signal\n >>> original = [0, 1, 0, 0, 1, 1, 0, 0]\n >>> impulse_response = [2, 1]\n >>> recorded = signal.convolve(impulse_response, original)\n >>> recorded\n array([0, 2, 1, 0, 2, 3, 1, 0, 0])\n >>> recovered, remainder = signal.deconvolve(recorded, impulse_response)\n >>> recovered\n array([ 0., 1., 0., 0., 1., 1., 0., 0.])\n\n See Also\n --------\n numpy.polydiv : performs polynomial division (same operation, but\n also accepts poly1d objects)\n\n \"\"\"\n num = np.atleast_1d(signal)\n den = np.atleast_1d(divisor)\n N = len(num)\n D = len(den)\n if D > N:\n quot = []\n rem = num\n else:\n input = np.zeros(N - D + 1, float)\n input[0] = 1\n quot = lfilter(num, den, input)\n rem = num - convolve(den, quot, mode='full')\n return quot, rem\n\n\ndef hilbert(x, N=None, axis=-1):\n \"\"\"\n Compute the analytic signal, using the Hilbert transform.\n\n The transformation is done along the last axis by default.\n\n Parameters\n ----------\n x : array_like\n Signal data. Must be real.\n N : int, optional\n Number of Fourier components. Default: ``x.shape[axis]``\n axis : int, optional\n Axis along which to do the transformation. Default: -1.\n\n Returns\n -------\n xa : ndarray\n Analytic signal of `x`, of each 1-D array along `axis`\n\n Notes\n -----\n The analytic signal ``x_a(t)`` of signal ``x(t)`` is:\n\n .. math:: x_a = F^{-1}(F(x) 2U) = x + i y\n\n where `F` is the Fourier transform, `U` the unit step function,\n and `y` the Hilbert transform of `x`. [1]_\n\n In other words, the negative half of the frequency spectrum is zeroed\n out, turning the real-valued signal into a complex signal. The Hilbert\n transformed signal can be obtained from ``np.imag(hilbert(x))``, and the\n original signal from ``np.real(hilbert(x))``.\n\n Examples\n ---------\n In this example we use the Hilbert transform to determine the amplitude\n envelope and instantaneous frequency of an amplitude-modulated signal.\n\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from scipy.signal import hilbert, chirp\n\n >>> duration = 1.0\n >>> fs = 400.0\n >>> samples = int(fs*duration)\n >>> t = np.arange(samples) / fs\n\n We create a chirp of which the frequency increases from 20 Hz to 100 Hz and\n apply an amplitude modulation.\n\n >>> signal = chirp(t, 20.0, t[-1], 100.0)\n >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )\n\n The amplitude envelope is given by magnitude of the analytic signal. The\n instantaneous frequency can be obtained by differentiating the\n instantaneous phase in respect to time. The instantaneous phase corresponds\n to the phase angle of the analytic signal.\n\n >>> analytic_signal = hilbert(signal)\n >>> amplitude_envelope = np.abs(analytic_signal)\n >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))\n >>> instantaneous_frequency = (np.diff(instantaneous_phase) /\n ... (2.0*np.pi) * fs)\n\n >>> fig = plt.figure()\n >>> ax0 = fig.add_subplot(211)\n >>> ax0.plot(t, signal, label='signal')\n >>> ax0.plot(t, amplitude_envelope, label='envelope')\n >>> ax0.set_xlabel(\"time in seconds\")\n >>> ax0.legend()\n >>> ax1 = fig.add_subplot(212)\n >>> ax1.plot(t[1:], instantaneous_frequency)\n >>> ax1.set_xlabel(\"time in seconds\")\n >>> ax1.set_ylim(0.0, 120.0)\n\n References\n ----------\n .. [1] Wikipedia, \"Analytic signal\".\n https://en.wikipedia.org/wiki/Analytic_signal\n .. [2] Leon Cohen, \"Time-Frequency Analysis\", 1995. Chapter 2.\n .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal\n Processing, Third Edition, 2009. Chapter 12.\n ISBN 13: 978-1292-02572-8\n\n \"\"\"\n x = np.asarray(x)\n if np.iscomplexobj(x):\n raise ValueError(\"x must be real.\")\n if N is None:\n N = x.shape[axis]\n if N <= 0:\n raise ValueError(\"N must be positive.\")\n\n Xf = sp_fft.fft(x, N, axis=axis)\n h = np.zeros(N)\n if N % 2 == 0:\n h[0] = h[N // 2] = 1\n h[1:N // 2] = 2\n else:\n h[0] = 1\n h[1:(N + 1) // 2] = 2\n\n if x.ndim > 1:\n ind = [np.newaxis] * x.ndim\n ind[axis] = slice(None)\n h = h[tuple(ind)]\n x = sp_fft.ifft(Xf * h, axis=axis)\n return x\n\n\ndef hilbert2(x, N=None):\n \"\"\"\n Compute the '2-D' analytic signal of `x`\n\n Parameters\n ----------\n x : array_like\n 2-D signal data.\n N : int or tuple of two ints, optional\n Number of Fourier components. Default is ``x.shape``\n\n Returns\n -------\n xa : ndarray\n Analytic signal of `x` taken along axes (0,1).\n\n References\n ----------\n .. [1] Wikipedia, \"Analytic signal\",\n https://en.wikipedia.org/wiki/Analytic_signal\n\n \"\"\"\n x = np.atleast_2d(x)\n if x.ndim > 2:\n raise ValueError(\"x must be 2-D.\")\n if np.iscomplexobj(x):\n raise ValueError(\"x must be real.\")\n if N is None:\n N = x.shape\n elif isinstance(N, int):\n if N <= 0:\n raise ValueError(\"N must be positive.\")\n N = (N, N)\n elif len(N) != 2 or np.any(np.asarray(N) <= 0):\n raise ValueError(\"When given as a tuple, N must hold exactly \"\n \"two positive integers\")\n\n Xf = sp_fft.fft2(x, N, axes=(0, 1))\n h1 = np.zeros(N[0], 'd')\n h2 = np.zeros(N[1], 'd')\n for p in range(2):\n h = eval(\"h%d\" % (p + 1))\n N1 = N[p]\n if N1 % 2 == 0:\n h[0] = h[N1 // 2] = 1\n h[1:N1 // 2] = 2\n else:\n h[0] = 1\n h[1:(N1 + 1) // 2] = 2\n exec(\"h%d = h\" % (p + 1), globals(), locals())\n\n h = h1[:, np.newaxis] * h2[np.newaxis, :]\n k = x.ndim\n while k > 2:\n h = h[:, np.newaxis]\n k -= 1\n x = sp_fft.ifft2(Xf * h, axes=(0, 1))\n return x\n\n\ndef cmplx_sort(p):\n \"\"\"Sort roots based on magnitude.\n\n Parameters\n ----------\n p : array_like\n The roots to sort, as a 1-D array.\n\n Returns\n -------\n p_sorted : ndarray\n Sorted roots.\n indx : ndarray\n Array of indices needed to sort the input `p`.\n\n Examples\n --------\n >>> from scipy import signal\n >>> vals = [1, 4, 1+1.j, 3]\n >>> p_sorted, indx = signal.cmplx_sort(vals)\n >>> p_sorted\n array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])\n >>> indx\n array([0, 2, 3, 1])\n \"\"\"\n p = np.asarray(p)\n indx = np.argsort(abs(p))\n return np.take(p, indx, 0), indx\n\n\ndef unique_roots(p, tol=1e-3, rtype='min'):\n \"\"\"Determine unique roots and their multiplicities from a list of roots.\n\n Parameters\n ----------\n p : array_like\n The list of roots.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. Refer to Notes about\n the details on roots grouping.\n rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional\n How to determine the returned root if multiple roots are within\n `tol` of each other.\n\n - 'max', 'maximum': pick the maximum of those roots\n - 'min', 'minimum': pick the minimum of those roots\n - 'avg', 'mean': take the average of those roots\n\n When finding minimum or maximum among complex roots they are compared\n first by the real part and then by the imaginary part.\n\n Returns\n -------\n unique : ndarray\n The list of unique roots.\n multiplicity : ndarray\n The multiplicity of each root.\n\n Notes\n -----\n If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to\n ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it\n doesn't necessarily mean that ``a`` is close to ``c``. It means that roots\n grouping is not unique. In this function we use \"greedy\" grouping going\n through the roots in the order they are given in the input `p`.\n\n This utility function is not specific to roots but can be used for any\n sequence of values for which uniqueness and multiplicity has to be\n determined. For a more general routine, see `numpy.unique`.\n\n Examples\n --------\n >>> from scipy import signal\n >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]\n >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')\n\n Check which roots have multiplicity larger than 1:\n\n >>> uniq[mult > 1]\n array([ 1.305])\n \"\"\"\n if rtype in ['max', 'maximum']:\n reduce = np.max\n elif rtype in ['min', 'minimum']:\n reduce = np.min\n elif rtype in ['avg', 'mean']:\n reduce = np.mean\n else:\n raise ValueError(\"`rtype` must be one of \"\n \"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}\")\n\n p = np.asarray(p)\n\n points = np.empty((len(p), 2))\n points[:, 0] = np.real(p)\n points[:, 1] = np.imag(p)\n tree = cKDTree(points)\n\n p_unique = []\n p_multiplicity = []\n used = np.zeros(len(p), dtype=bool)\n for i in range(len(p)):\n if used[i]:\n continue\n\n group = tree.query_ball_point(points[i], tol)\n group = [x for x in group if not used[x]]\n\n p_unique.append(reduce(p[group]))\n p_multiplicity.append(len(group))\n\n used[group] = True\n\n return np.asarray(p_unique), np.asarray(p_multiplicity)\n\n\ndef invres(r, p, k, tol=1e-3, rtype='avg'):\n \"\"\"Compute b(s) and a(s) from partial fraction expansion.\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]\n H(s) = ------ = ------------------------------------------\n a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]\n\n then the partial-fraction expansion H(s) is defined as::\n\n r[0] r[1] r[-1]\n = -------- + -------- + ... + --------- + k(s)\n (s-p[0]) (s-p[1]) (s-p[-1])\n\n If there are any repeated roots (closer together than `tol`), then H(s)\n has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------- + ----------- + ... + -----------\n (s-p[i]) (s-p[i])**2 (s-p[i])**n\n\n This function is used for polynomials in positive powers of s or z,\n such as analog filters or digital filters in controls engineering. For\n negative powers of z (typical for digital filters in DSP), use `invresz`.\n\n Parameters\n ----------\n r : array_like\n Residues corresponding to the poles. For repeated poles, the residues\n must be ordered to correspond to ascending by power fractions.\n p : array_like\n Poles. Equal poles must be adjacent.\n k : array_like\n Coefficients of the direct polynomial term.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n b : ndarray\n Numerator polynomial coefficients.\n a : ndarray\n Denominator polynomial coefficients.\n\n See Also\n --------\n residue, invresz, unique_roots\n\n \"\"\"\n r = np.atleast_1d(r)\n p = np.atleast_1d(p)\n k = np.trim_zeros(np.atleast_1d(k), 'f')\n\n unique_poles, multiplicity = _group_poles(p, tol, rtype)\n factors, denominator = _compute_factors(unique_poles, multiplicity,\n include_powers=True)\n\n if len(k) == 0:\n numerator = 0\n else:\n numerator = np.polymul(k, denominator)\n\n for residue, factor in zip(r, factors):\n numerator = np.polyadd(numerator, residue * factor)\n\n return numerator, denominator\n\n\ndef _compute_factors(roots, multiplicity, include_powers=False):\n \"\"\"Compute the total polynomial divided by factors for each root.\"\"\"\n current = np.array([1])\n suffixes = [current]\n for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):\n monomial = np.array([1, -pole])\n for _ in range(mult):\n current = np.polymul(current, monomial)\n suffixes.append(current)\n suffixes = suffixes[::-1]\n\n factors = []\n current = np.array([1])\n for pole, mult, suffix in zip(roots, multiplicity, suffixes):\n monomial = np.array([1, -pole])\n block = []\n for i in range(mult):\n if i == 0 or include_powers:\n block.append(np.polymul(current, suffix))\n current = np.polymul(current, monomial)\n factors.extend(reversed(block))\n\n return factors, current\n\n\ndef _compute_residues(poles, multiplicity, numerator):\n denominator_factors, _ = _compute_factors(poles, multiplicity)\n numerator = numerator.astype(poles.dtype)\n\n residues = []\n for pole, mult, factor in zip(poles, multiplicity,\n denominator_factors):\n if mult == 1:\n residues.append(np.polyval(numerator, pole) /\n np.polyval(factor, pole))\n else:\n numer = numerator.copy()\n monomial = np.array([1, -pole])\n factor, d = np.polydiv(factor, monomial)\n\n block = []\n for _ in range(mult):\n numer, n = np.polydiv(numer, monomial)\n r = n[0] / d[0]\n numer = np.polysub(numer, r * factor)\n block.append(r)\n\n residues.extend(reversed(block))\n\n return np.asarray(residues)\n\n\ndef residue(b, a, tol=1e-3, rtype='avg'):\n \"\"\"Compute partial-fraction expansion of b(s) / a(s).\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]\n H(s) = ------ = ------------------------------------------\n a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]\n\n then the partial-fraction expansion H(s) is defined as::\n\n r[0] r[1] r[-1]\n = -------- + -------- + ... + --------- + k(s)\n (s-p[0]) (s-p[1]) (s-p[-1])\n\n If there are any repeated roots (closer together than `tol`), then H(s)\n has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------- + ----------- + ... + -----------\n (s-p[i]) (s-p[i])**2 (s-p[i])**n\n\n This function is used for polynomials in positive powers of s or z,\n such as analog filters or digital filters in controls engineering. For\n negative powers of z (typical for digital filters in DSP), use `residuez`.\n\n See Notes for details about the algorithm.\n\n Parameters\n ----------\n b : array_like\n Numerator polynomial coefficients.\n a : array_like\n Denominator polynomial coefficients.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n r : ndarray\n Residues corresponding to the poles. For repeated poles, the residues\n are ordered to correspond to ascending by power fractions.\n p : ndarray\n Poles ordered by magnitude in ascending order.\n k : ndarray\n Coefficients of the direct polynomial term.\n\n See Also\n --------\n invres, residuez, numpy.poly, unique_roots\n\n Notes\n -----\n The \"deflation through subtraction\" algorithm is used for\n computations --- method 6 in [1]_.\n\n The form of partial fraction expansion depends on poles multiplicity in\n the exact mathematical sense. However there is no way to exactly\n determine multiplicity of roots of a polynomial in numerical computing.\n Thus you should think of the result of `residue` with given `tol` as\n partial fraction expansion computed for the denominator composed of the\n computed poles with empirically determined multiplicity. The choice of\n `tol` can drastically change the result if there are close poles.\n\n References\n ----------\n .. [1] J. F. Mahoney, B. D. Sivazlian, \"Partial fractions expansion: a\n review of computational methodology and efficiency\", Journal of\n Computational and Applied Mathematics, Vol. 9, 1983.\n \"\"\"\n b = np.asarray(b)\n a = np.asarray(a)\n if (np.issubdtype(b.dtype, np.complexfloating)\n or np.issubdtype(a.dtype, np.complexfloating)):\n b = b.astype(complex)\n a = a.astype(complex)\n else:\n b = b.astype(float)\n a = a.astype(float)\n\n b = np.trim_zeros(np.atleast_1d(b), 'f')\n a = np.trim_zeros(np.atleast_1d(a), 'f')\n\n if a.size == 0:\n raise ValueError(\"Denominator `a` is zero.\")\n\n poles = np.roots(a)\n if b.size == 0:\n return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])\n\n if len(b) < len(a):\n k = np.empty(0)\n else:\n k, b = np.polydiv(b, a)\n\n unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)\n unique_poles, order = cmplx_sort(unique_poles)\n multiplicity = multiplicity[order]\n\n residues = _compute_residues(unique_poles, multiplicity, b)\n\n index = 0\n for pole, mult in zip(unique_poles, multiplicity):\n poles[index:index + mult] = pole\n index += mult\n\n return residues / a[0], poles, k\n\n\ndef residuez(b, a, tol=1e-3, rtype='avg'):\n \"\"\"Compute partial-fraction expansion of b(z) / a(z).\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)\n H(z) = ------ = ------------------------------------------\n a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)\n\n then the partial-fraction expansion H(z) is defined as::\n\n r[0] r[-1]\n = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...\n (1-p[0]z**(-1)) (1-p[-1]z**(-1))\n\n If there are any repeated roots (closer than `tol`), then the partial\n fraction expansion has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------------- + ------------------ + ... + ------------------\n (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n\n\n This function is used for polynomials in negative powers of z,\n such as digital filters in DSP. For positive powers, use `residue`.\n\n See Notes of `residue` for details about the algorithm.\n\n Parameters\n ----------\n b : array_like\n Numerator polynomial coefficients.\n a : array_like\n Denominator polynomial coefficients.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n r : ndarray\n Residues corresponding to the poles. For repeated poles, the residues\n are ordered to correspond to ascending by power fractions.\n p : ndarray\n Poles ordered by magnitude in ascending order.\n k : ndarray\n Coefficients of the direct polynomial term.\n\n See Also\n --------\n invresz, residue, unique_roots\n \"\"\"\n b = np.asarray(b)\n a = np.asarray(a)\n if (np.issubdtype(b.dtype, np.complexfloating)\n or np.issubdtype(a.dtype, np.complexfloating)):\n b = b.astype(complex)\n a = a.astype(complex)\n else:\n b = b.astype(float)\n a = a.astype(float)\n\n b = np.trim_zeros(np.atleast_1d(b), 'b')\n a = np.trim_zeros(np.atleast_1d(a), 'b')\n\n if a.size == 0:\n raise ValueError(\"Denominator `a` is zero.\")\n elif a[0] == 0:\n raise ValueError(\"First coefficient of determinant `a` must be \"\n \"non-zero.\")\n\n poles = np.roots(a)\n if b.size == 0:\n return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])\n\n b_rev = b[::-1]\n a_rev = a[::-1]\n\n if len(b_rev) < len(a_rev):\n k_rev = np.empty(0)\n else:\n k_rev, b_rev = np.polydiv(b_rev, a_rev)\n\n unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)\n unique_poles, order = cmplx_sort(unique_poles)\n multiplicity = multiplicity[order]\n\n residues = _compute_residues(1 / unique_poles, multiplicity, b_rev)\n\n index = 0\n powers = np.empty(len(residues), dtype=int)\n for pole, mult in zip(unique_poles, multiplicity):\n poles[index:index + mult] = pole\n powers[index:index + mult] = 1 + np.arange(mult)\n index += mult\n\n residues *= (-poles) ** powers / a_rev[0]\n\n return residues, poles, k_rev[::-1]\n\n\ndef _group_poles(poles, tol, rtype):\n if rtype in ['max', 'maximum']:\n reduce = np.max\n elif rtype in ['min', 'minimum']:\n reduce = np.min\n elif rtype in ['avg', 'mean']:\n reduce = np.mean\n else:\n raise ValueError(\"`rtype` must be one of \"\n \"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}\")\n\n unique = []\n multiplicity = []\n\n pole = poles[0]\n block = [pole]\n for i in range(1, len(poles)):\n if abs(poles[i] - pole) <= tol:\n block.append(pole)\n else:\n unique.append(reduce(block))\n multiplicity.append(len(block))\n pole = poles[i]\n block = [pole]\n\n unique.append(reduce(block))\n multiplicity.append(len(block))\n\n return np.asarray(unique), np.asarray(multiplicity)\n\n\ndef invresz(r, p, k, tol=1e-3, rtype='avg'):\n \"\"\"Compute b(z) and a(z) from partial fraction expansion.\n\n If `M` is the degree of numerator `b` and `N` the degree of denominator\n `a`::\n\n b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)\n H(z) = ------ = ------------------------------------------\n a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)\n\n then the partial-fraction expansion H(z) is defined as::\n\n r[0] r[-1]\n = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...\n (1-p[0]z**(-1)) (1-p[-1]z**(-1))\n\n If there are any repeated roots (closer than `tol`), then the partial\n fraction expansion has terms like::\n\n r[i] r[i+1] r[i+n-1]\n -------------- + ------------------ + ... + ------------------\n (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n\n\n This function is used for polynomials in negative powers of z,\n such as digital filters in DSP. For positive powers, use `invres`.\n\n Parameters\n ----------\n r : array_like\n Residues corresponding to the poles. For repeated poles, the residues\n must be ordered to correspond to ascending by power fractions.\n p : array_like\n Poles. Equal poles must be adjacent.\n k : array_like\n Coefficients of the direct polynomial term.\n tol : float, optional\n The tolerance for two roots to be considered equal in terms of\n the distance between them. Default is 1e-3. See `unique_roots`\n for further details.\n rtype : {'avg', 'min', 'max'}, optional\n Method for computing a root to represent a group of identical roots.\n Default is 'avg'. See `unique_roots` for further details.\n\n Returns\n -------\n b : ndarray\n Numerator polynomial coefficients.\n a : ndarray\n Denominator polynomial coefficients.\n\n See Also\n --------\n residuez, unique_roots, invres\n\n \"\"\"\n r = np.atleast_1d(r)\n p = np.atleast_1d(p)\n k = np.trim_zeros(np.atleast_1d(k), 'b')\n\n unique_poles, multiplicity = _group_poles(p, tol, rtype)\n factors, denominator = _compute_factors(unique_poles, multiplicity,\n include_powers=True)\n\n if len(k) == 0:\n numerator = 0\n else:\n numerator = np.polymul(k[::-1], denominator[::-1])\n\n for residue, factor in zip(r, factors):\n numerator = np.polyadd(numerator, residue * factor[::-1])\n\n return numerator[::-1], denominator\n\n\ndef resample(x, num, t=None, axis=0, window=None, domain='time'):\n \"\"\"\n Resample `x` to `num` samples using Fourier method along the given axis.\n\n The resampled signal starts at the same value as `x` but is sampled\n with a spacing of ``len(x) / num * (spacing of x)``. Because a\n Fourier method is used, the signal is assumed to be periodic.\n\n Parameters\n ----------\n x : array_like\n The data to be resampled.\n num : int\n The number of samples in the resampled signal.\n t : array_like, optional\n If `t` is given, it is assumed to be the equally spaced sample\n positions associated with the signal data in `x`.\n axis : int, optional\n The axis of `x` that is resampled. Default is 0.\n window : array_like, callable, string, float, or tuple, optional\n Specifies the window applied to the signal in the Fourier\n domain. See below for details.\n domain : string, optional\n A string indicating the domain of the input `x`:\n ``time`` Consider the input `x` as time-domain (Default),\n ``freq`` Consider the input `x` as frequency-domain.\n\n Returns\n -------\n resampled_x or (resampled_x, resampled_t)\n Either the resampled array, or, if `t` was given, a tuple\n containing the resampled array and the corresponding resampled\n positions.\n\n See Also\n --------\n decimate : Downsample the signal after applying an FIR or IIR filter.\n resample_poly : Resample using polyphase filtering and an FIR filter.\n\n Notes\n -----\n The argument `window` controls a Fourier-domain window that tapers\n the Fourier spectrum before zero-padding to alleviate ringing in\n the resampled values for sampled signals you didn't intend to be\n interpreted as band-limited.\n\n If `window` is a function, then it is called with a vector of inputs\n indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).\n\n If `window` is an array of the same length as `x.shape[axis]` it is\n assumed to be the window to be applied directly in the Fourier\n domain (with dc and low-frequency first).\n\n For any other type of `window`, the function `scipy.signal.get_window`\n is called to generate the window.\n\n The first sample of the returned vector is the same as the first\n sample of the input vector. The spacing between samples is changed\n from ``dx`` to ``dx * len(x) / num``.\n\n If `t` is not None, then it is used solely to calculate the resampled\n positions `resampled_t`\n\n As noted, `resample` uses FFT transformations, which can be very\n slow if the number of input or output samples is large and prime;\n see `scipy.fft.fft`.\n\n Examples\n --------\n Note that the end of the resampled data rises to meet the first\n sample of the next cycle:\n\n >>> from scipy import signal\n\n >>> x = np.linspace(0, 10, 20, endpoint=False)\n >>> y = np.cos(-x**2/6.0)\n >>> f = signal.resample(y, 100)\n >>> xnew = np.linspace(0, 10, 100, endpoint=False)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')\n >>> plt.legend(['data', 'resampled'], loc='best')\n >>> plt.show()\n \"\"\"\n\n if domain not in ('time', 'freq'):\n raise ValueError(\"Acceptable domain flags are 'time' or\"\n \" 'freq', not domain={}\".format(domain))\n\n x = np.asarray(x)\n Nx = x.shape[axis]\n\n # Check if we can use faster real FFT\n real_input = np.isrealobj(x)\n\n if domain == 'time':\n # Forward transform\n if real_input:\n X = sp_fft.rfft(x, axis=axis)\n else: # Full complex FFT\n X = sp_fft.fft(x, axis=axis)\n else: # domain == 'freq'\n X = x\n\n # Apply window to spectrum\n if window is not None:\n if callable(window):\n W = window(sp_fft.fftfreq(Nx))\n elif isinstance(window, np.ndarray):\n if window.shape != (Nx,):\n raise ValueError('window must have the same length as data')\n W = window\n else:\n W = sp_fft.ifftshift(get_window(window, Nx))\n\n newshape_W = [1] * x.ndim\n newshape_W[axis] = X.shape[axis]\n if real_input:\n # Fold the window back on itself to mimic complex behavior\n W_real = W.copy()\n W_real[1:] += W_real[-1:0:-1]\n W_real[1:] *= 0.5\n X *= W_real[:newshape_W[axis]].reshape(newshape_W)\n else:\n X *= W.reshape(newshape_W)\n\n # Copy each half of the original spectrum to the output spectrum, either\n # truncating high frequences (downsampling) or zero-padding them\n # (upsampling)\n\n # Placeholder array for output spectrum\n newshape = list(x.shape)\n if real_input:\n newshape[axis] = num // 2 + 1\n else:\n newshape[axis] = num\n Y = np.zeros(newshape, X.dtype)\n\n # Copy positive frequency components (and Nyquist, if present)\n N = min(num, Nx)\n nyq = N // 2 + 1 # Slice index that includes Nyquist if present\n sl = [slice(None)] * x.ndim\n sl[axis] = slice(0, nyq)\n Y[tuple(sl)] = X[tuple(sl)]\n if not real_input:\n # Copy negative frequency components\n if N > 2: # (slice expression doesn't collapse to empty array)\n sl[axis] = slice(nyq - N, None)\n Y[tuple(sl)] = X[tuple(sl)]\n\n # Split/join Nyquist component(s) if present\n # So far we have set Y[+N/2]=X[+N/2]\n if N % 2 == 0:\n if num < Nx: # downsampling\n if real_input:\n sl[axis] = slice(N//2, N//2 + 1)\n Y[tuple(sl)] *= 2.\n else:\n # select the component of Y at frequency +N/2,\n # add the component of X at -N/2\n sl[axis] = slice(-N//2, -N//2 + 1)\n Y[tuple(sl)] += X[tuple(sl)]\n elif Nx < num: # upsampling\n # select the component at frequency +N/2 and halve it\n sl[axis] = slice(N//2, N//2 + 1)\n Y[tuple(sl)] *= 0.5\n if not real_input:\n temp = Y[tuple(sl)]\n # set the component at -N/2 equal to the component at +N/2\n sl[axis] = slice(num-N//2, num-N//2 + 1)\n Y[tuple(sl)] = temp\n\n # Inverse transform\n if real_input:\n y = sp_fft.irfft(Y, num, axis=axis)\n else:\n y = sp_fft.ifft(Y, axis=axis, overwrite_x=True)\n\n y *= (float(num) / float(Nx))\n\n if t is None:\n return y\n else:\n new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]\n return y, new_t\n\n\ndef resample_poly(x, up, down, axis=0, window=('kaiser', 5.0),\n padtype='constant', cval=None):\n \"\"\"\n Resample `x` along the given axis using polyphase filtering.\n\n The signal `x` is upsampled by the factor `up`, a zero-phase low-pass\n FIR filter is applied, and then it is downsampled by the factor `down`.\n The resulting sample rate is ``up / down`` times the original sample\n rate. By default, values beyond the boundary of the signal are assumed\n to be zero during the filtering step.\n\n Parameters\n ----------\n x : array_like\n The data to be resampled.\n up : int\n The upsampling factor.\n down : int\n The downsampling factor.\n axis : int, optional\n The axis of `x` that is resampled. Default is 0.\n window : string, tuple, or array_like, optional\n Desired window to use to design the low-pass filter, or the FIR filter\n coefficients to employ. See below for details.\n padtype : string, optional\n `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of\n the other signal extension modes supported by `scipy.signal.upfirdn`.\n Changes assumptions on values beyond the boundary. If `constant`,\n assumed to be `cval` (default zero). If `line` assumed to continue a\n linear trend defined by the first and last points. `mean`, `median`,\n `maximum` and `minimum` work as in `np.pad` and assume that the values\n beyond the boundary are the mean, median, maximum or minimum\n respectively of the array along the axis.\n\n .. versionadded:: 1.4.0\n cval : float, optional\n Value to use if `padtype='constant'`. Default is zero.\n\n .. versionadded:: 1.4.0\n\n Returns\n -------\n resampled_x : array\n The resampled array.\n\n See Also\n --------\n decimate : Downsample the signal after applying an FIR or IIR filter.\n resample : Resample up or down using the FFT method.\n\n Notes\n -----\n This polyphase method will likely be faster than the Fourier method\n in `scipy.signal.resample` when the number of samples is large and\n prime, or when the number of samples is large and `up` and `down`\n share a large greatest common denominator. The length of the FIR\n filter used will depend on ``max(up, down) // gcd(up, down)``, and\n the number of operations during polyphase filtering will depend on\n the filter length and `down` (see `scipy.signal.upfirdn` for details).\n\n The argument `window` specifies the FIR low-pass filter design.\n\n If `window` is an array_like it is assumed to be the FIR filter\n coefficients. Note that the FIR filter is applied after the upsampling\n step, so it should be designed to operate on a signal at a sampling\n frequency higher than the original by a factor of `up//gcd(up, down)`.\n This function's output will be centered with respect to this array, so it\n is best to pass a symmetric filter with an odd number of samples if, as\n is usually the case, a zero-phase filter is desired.\n\n For any other type of `window`, the functions `scipy.signal.get_window`\n and `scipy.signal.firwin` are called to generate the appropriate filter\n coefficients.\n\n The first sample of the returned vector is the same as the first\n sample of the input vector. The spacing between samples is changed\n from ``dx`` to ``dx * down / float(up)``.\n\n Examples\n --------\n By default, the end of the resampled data rises to meet the first\n sample of the next cycle for the FFT method, and gets closer to zero\n for the polyphase method:\n\n >>> from scipy import signal\n\n >>> x = np.linspace(0, 10, 20, endpoint=False)\n >>> y = np.cos(-x**2/6.0)\n >>> f_fft = signal.resample(y, 100)\n >>> f_poly = signal.resample_poly(y, 100, 20)\n >>> xnew = np.linspace(0, 10, 100, endpoint=False)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')\n >>> plt.plot(x, y, 'ko-')\n >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries\n >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')\n >>> plt.show()\n\n This default behaviour can be changed by using the padtype option:\n\n >>> import numpy as np\n >>> from scipy import signal\n\n >>> N = 5\n >>> x = np.linspace(0, 1, N, endpoint=False)\n >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x)\n >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x)\n >>> Y = np.stack([y, y2], axis=-1)\n >>> up = 4\n >>> xr = np.linspace(0, 1, N*up, endpoint=False)\n\n >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant')\n >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean')\n >>> y4 = signal.resample_poly(Y, up, 1, padtype='line')\n\n >>> import matplotlib.pyplot as plt\n >>> for i in [0,1]:\n ... plt.figure()\n ... plt.plot(xr, y4[:,i], 'g.', label='line')\n ... plt.plot(xr, y3[:,i], 'y.', label='mean')\n ... plt.plot(xr, y2[:,i], 'r.', label='constant')\n ... plt.plot(x, Y[:,i], 'k-')\n ... plt.legend()\n >>> plt.show()\n\n \"\"\"\n x = np.asarray(x)\n if up != int(up):\n raise ValueError(\"up must be an integer\")\n if down != int(down):\n raise ValueError(\"down must be an integer\")\n up = int(up)\n down = int(down)\n if up < 1 or down < 1:\n raise ValueError('up and down must be >= 1')\n if cval is not None and padtype != 'constant':\n raise ValueError('cval has no effect when padtype is ', padtype)\n\n # Determine our up and down factors\n # Use a rational approximation to save computation time on really long\n # signals\n g_ = math.gcd(up, down)\n up //= g_\n down //= g_\n if up == down == 1:\n return x.copy()\n n_in = x.shape[axis]\n n_out = n_in * up\n n_out = n_out // down + bool(n_out % down)\n\n if isinstance(window, (list, np.ndarray)):\n window = np.array(window) # use array to force a copy (we modify it)\n if window.ndim > 1:\n raise ValueError('window must be 1-D')\n half_len = (window.size - 1) // 2\n h = window\n else:\n # Design a linear-phase low-pass FIR filter\n max_rate = max(up, down)\n f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)\n half_len = 10 * max_rate # reasonable cutoff for our sinc-like function\n h = firwin(2 * half_len + 1, f_c, window=window)\n h *= up\n\n # Zero-pad our filter to put the output samples at the center\n n_pre_pad = (down - half_len % down)\n n_post_pad = 0\n n_pre_remove = (half_len + n_pre_pad) // down\n # We should rarely need to do this given our filter lengths...\n while _output_len(len(h) + n_pre_pad + n_post_pad, n_in,\n up, down) < n_out + n_pre_remove:\n n_post_pad += 1\n h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,\n np.zeros(n_post_pad, dtype=h.dtype)))\n n_pre_remove_end = n_pre_remove + n_out\n\n # Remove background depending on the padtype option\n funcs = {'mean': np.mean, 'median': np.median,\n 'minimum': np.amin, 'maximum': np.amax}\n upfirdn_kwargs = {'mode': 'constant', 'cval': 0}\n if padtype in funcs:\n background_values = funcs[padtype](x, axis=axis, keepdims=True)\n elif padtype in _upfirdn_modes:\n upfirdn_kwargs = {'mode': padtype}\n if padtype == 'constant':\n if cval is None:\n cval = 0\n upfirdn_kwargs['cval'] = cval\n else:\n raise ValueError(\n 'padtype must be one of: maximum, mean, median, minimum, ' +\n ', '.join(_upfirdn_modes))\n\n if padtype in funcs:\n x = x - background_values\n\n # filter then remove excess\n y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs)\n keep = [slice(None), ]*x.ndim\n keep[axis] = slice(n_pre_remove, n_pre_remove_end)\n y_keep = y[tuple(keep)]\n\n # Add background back\n if padtype in funcs:\n y_keep += background_values\n\n return y_keep\n\n\ndef vectorstrength(events, period):\n '''\n Determine the vector strength of the events corresponding to the given\n period.\n\n The vector strength is a measure of phase synchrony, how well the\n timing of the events is synchronized to a single period of a periodic\n signal.\n\n If multiple periods are used, calculate the vector strength of each.\n This is called the \"resonating vector strength\".\n\n Parameters\n ----------\n events : 1D array_like\n An array of time points containing the timing of the events.\n period : float or array_like\n The period of the signal that the events should synchronize to.\n The period is in the same units as `events`. It can also be an array\n of periods, in which case the outputs are arrays of the same length.\n\n Returns\n -------\n strength : float or 1D array\n The strength of the synchronization. 1.0 is perfect synchronization\n and 0.0 is no synchronization. If `period` is an array, this is also\n an array with each element containing the vector strength at the\n corresponding period.\n phase : float or array\n The phase that the events are most strongly synchronized to in radians.\n If `period` is an array, this is also an array with each element\n containing the phase for the corresponding period.\n\n References\n ----------\n van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector\n strength: Auditory system, electric fish, and noise.\n Chaos 21, 047508 (2011);\n :doi:`10.1063/1.3670512`.\n van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:\n biological and mathematical perspectives. Biol Cybern.\n 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.\n van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens\n when we vary the \"probing\" frequency while keeping the spike times\n fixed. Biol Cybern. 2013 Aug;107(4):491-94.\n :doi:`10.1007/s00422-013-0560-8`.\n '''\n events = np.asarray(events)\n period = np.asarray(period)\n if events.ndim > 1:\n raise ValueError('events cannot have dimensions more than 1')\n if period.ndim > 1:\n raise ValueError('period cannot have dimensions more than 1')\n\n # we need to know later if period was originally a scalar\n scalarperiod = not period.ndim\n\n events = np.atleast_2d(events)\n period = np.atleast_2d(period)\n if (period <= 0).any():\n raise ValueError('periods must be positive')\n\n # this converts the times to vectors\n vectors = np.exp(np.dot(2j*np.pi/period.T, events))\n\n # the vector strength is just the magnitude of the mean of the vectors\n # the vector phase is the angle of the mean of the vectors\n vectormean = np.mean(vectors, axis=1)\n strength = abs(vectormean)\n phase = np.angle(vectormean)\n\n # if the original period was a scalar, return scalars\n if scalarperiod:\n strength = strength[0]\n phase = phase[0]\n return strength, phase\n\n\ndef detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False):\n \"\"\"\n Remove linear trend along axis from data.\n\n Parameters\n ----------\n data : array_like\n The input data.\n axis : int, optional\n The axis along which to detrend the data. By default this is the\n last axis (-1).\n type : {'linear', 'constant'}, optional\n The type of detrending. If ``type == 'linear'`` (default),\n the result of a linear least-squares fit to `data` is subtracted\n from `data`.\n If ``type == 'constant'``, only the mean of `data` is subtracted.\n bp : array_like of ints, optional\n A sequence of break points. If given, an individual linear fit is\n performed for each part of `data` between two break points.\n Break points are specified as indices into `data`. This parameter\n only has an effect when ``type == 'linear'``.\n overwrite_data : bool, optional\n If True, perform in place detrending and avoid a copy. Default is False\n\n Returns\n -------\n ret : ndarray\n The detrended input data.\n\n Examples\n --------\n >>> from scipy import signal\n >>> randgen = np.random.RandomState(9)\n >>> npoints = 1000\n >>> noise = randgen.randn(npoints)\n >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise\n >>> (signal.detrend(x) - noise).max() < 0.01\n True\n\n \"\"\"\n if type not in ['linear', 'l', 'constant', 'c']:\n raise ValueError(\"Trend type must be 'linear' or 'constant'.\")\n data = np.asarray(data)\n dtype = data.dtype.char\n if dtype not in 'dfDF':\n dtype = 'd'\n if type in ['constant', 'c']:\n ret = data - np.expand_dims(np.mean(data, axis), axis)\n return ret\n else:\n dshape = data.shape\n N = dshape[axis]\n bp = np.sort(np.unique(np.r_[0, bp, N]))\n if np.any(bp > N):\n raise ValueError(\"Breakpoints must be less than length \"\n \"of data along given axis.\")\n Nreg = len(bp) - 1\n # Restructure data so that axis is along first dimension and\n # all other dimensions are collapsed into second dimension\n rnk = len(dshape)\n if axis < 0:\n axis = axis + rnk\n newdims = np.r_[axis, 0:axis, axis + 1:rnk]\n newdata = np.reshape(np.transpose(data, tuple(newdims)),\n (N, _prod(dshape) // N))\n if not overwrite_data:\n newdata = newdata.copy() # make sure we have a copy\n if newdata.dtype.char not in 'dfDF':\n newdata = newdata.astype(dtype)\n # Find leastsq fit and remove it for each piece\n for m in range(Nreg):\n Npts = bp[m + 1] - bp[m]\n A = np.ones((Npts, 2), dtype)\n A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)\n sl = slice(bp[m], bp[m + 1])\n coef, resids, rank, s = linalg.lstsq(A, newdata[sl])\n newdata[sl] = newdata[sl] - np.dot(A, coef)\n # Put data back in original shape.\n tdshape = np.take(dshape, newdims, 0)\n ret = np.reshape(newdata, tuple(tdshape))\n vals = list(range(1, rnk))\n olddims = vals[:axis] + [0] + vals[axis:]\n ret = np.transpose(ret, tuple(olddims))\n return ret\n\n\ndef lfilter_zi(b, a):\n \"\"\"\n Construct initial conditions for lfilter for step response steady-state.\n\n Compute an initial state `zi` for the `lfilter` function that corresponds\n to the steady state of the step response.\n\n A typical use of this function is to set the initial state so that the\n output of the filter starts at the same value as the first element of\n the signal to be filtered.\n\n Parameters\n ----------\n b, a : array_like (1-D)\n The IIR filter coefficients. See `lfilter` for more\n information.\n\n Returns\n -------\n zi : 1-D ndarray\n The initial state for the filter.\n\n See Also\n --------\n lfilter, lfiltic, filtfilt\n\n Notes\n -----\n A linear filter with order m has a state space representation (A, B, C, D),\n for which the output y of the filter can be expressed as::\n\n z(n+1) = A*z(n) + B*x(n)\n y(n) = C*z(n) + D*x(n)\n\n where z(n) is a vector of length m, A has shape (m, m), B has shape\n (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is\n a scalar). lfilter_zi solves::\n\n zi = A*zi + B\n\n In other words, it finds the initial condition for which the response\n to an input of all ones is a constant.\n\n Given the filter coefficients `a` and `b`, the state space matrices\n for the transposed direct form II implementation of the linear filter,\n which is the implementation used by scipy.signal.lfilter, are::\n\n A = scipy.linalg.companion(a).T\n B = b[1:] - a[1:]*b[0]\n\n assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first\n divided by a[0].\n\n Examples\n --------\n The following code creates a lowpass Butterworth filter. Then it\n applies that filter to an array whose values are all 1.0; the\n output is also all 1.0, as expected for a lowpass filter. If the\n `zi` argument of `lfilter` had not been given, the output would have\n shown the transient signal.\n\n >>> from numpy import array, ones\n >>> from scipy.signal import lfilter, lfilter_zi, butter\n >>> b, a = butter(5, 0.25)\n >>> zi = lfilter_zi(b, a)\n >>> y, zo = lfilter(b, a, ones(10), zi=zi)\n >>> y\n array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\n\n Another example:\n\n >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])\n >>> y, zf = lfilter(b, a, x, zi=zi*x[0])\n >>> y\n array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,\n 0.44399389, 0.35505241])\n\n Note that the `zi` argument to `lfilter` was computed using\n `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no\n transient until the input drops from 0.5 to 0.0.\n\n \"\"\"\n\n # FIXME: Can this function be replaced with an appropriate\n # use of lfiltic? For example, when b,a = butter(N,Wn),\n # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).\n #\n\n # We could use scipy.signal.normalize, but it uses warnings in\n # cases where a ValueError is more appropriate, and it allows\n # b to be 2D.\n b = np.atleast_1d(b)\n if b.ndim != 1:\n raise ValueError(\"Numerator b must be 1-D.\")\n a = np.atleast_1d(a)\n if a.ndim != 1:\n raise ValueError(\"Denominator a must be 1-D.\")\n\n while len(a) > 1 and a[0] == 0.0:\n a = a[1:]\n if a.size < 1:\n raise ValueError(\"There must be at least one nonzero `a` coefficient.\")\n\n if a[0] != 1.0:\n # Normalize the coefficients so a[0] == 1.\n b = b / a[0]\n a = a / a[0]\n\n n = max(len(a), len(b))\n\n # Pad a or b with zeros so they are the same length.\n if len(a) < n:\n a = np.r_[a, np.zeros(n - len(a))]\n elif len(b) < n:\n b = np.r_[b, np.zeros(n - len(b))]\n\n IminusA = np.eye(n - 1) - linalg.companion(a).T\n B = b[1:] - a[1:] * b[0]\n # Solve zi = A*zi + B\n zi = np.linalg.solve(IminusA, B)\n\n # For future reference: we could also use the following\n # explicit formulas to solve the linear system:\n #\n # zi = np.zeros(n - 1)\n # zi[0] = B.sum() / IminusA[:,0].sum()\n # asum = 1.0\n # csum = 0.0\n # for k in range(1,n-1):\n # asum += a[k]\n # csum += b[k] - a[k]*b[0]\n # zi[k] = asum*zi[0] - csum\n\n return zi\n\n\ndef sosfilt_zi(sos):\n \"\"\"\n Construct initial conditions for sosfilt for step response steady-state.\n\n Compute an initial state `zi` for the `sosfilt` function that corresponds\n to the steady state of the step response.\n\n A typical use of this function is to set the initial state so that the\n output of the filter starts at the same value as the first element of\n the signal to be filtered.\n\n Parameters\n ----------\n sos : array_like\n Array of second-order filter coefficients, must have shape\n ``(n_sections, 6)``. See `sosfilt` for the SOS filter format\n specification.\n\n Returns\n -------\n zi : ndarray\n Initial conditions suitable for use with ``sosfilt``, shape\n ``(n_sections, 2)``.\n\n See Also\n --------\n sosfilt, zpk2sos\n\n Notes\n -----\n .. versionadded:: 0.16.0\n\n Examples\n --------\n Filter a rectangular pulse that begins at time 0, with and without\n the use of the `zi` argument of `scipy.signal.sosfilt`.\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n\n >>> sos = signal.butter(9, 0.125, output='sos')\n >>> zi = signal.sosfilt_zi(sos)\n >>> x = (np.arange(250) < 100).astype(int)\n >>> f1 = signal.sosfilt(sos, x)\n >>> f2, zo = signal.sosfilt(sos, x, zi=zi)\n\n >>> plt.plot(x, 'k--', label='x')\n >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')\n >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')\n >>> plt.legend(loc='best')\n >>> plt.show()\n\n \"\"\"\n sos = np.asarray(sos)\n if sos.ndim != 2 or sos.shape[1] != 6:\n raise ValueError('sos must be shape (n_sections, 6)')\n\n n_sections = sos.shape[0]\n zi = np.empty((n_sections, 2))\n scale = 1.0\n for section in range(n_sections):\n b = sos[section, :3]\n a = sos[section, 3:]\n zi[section] = scale * lfilter_zi(b, a)\n # If H(z) = B(z)/A(z) is this section's transfer function, then\n # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady\n # state value of this section's step response.\n scale *= b.sum() / a.sum()\n\n return zi\n\n\ndef _filtfilt_gust(b, a, x, axis=-1, irlen=None):\n \"\"\"Forward-backward IIR filter that uses Gustafsson's method.\n\n Apply the IIR filter defined by `(b,a)` to `x` twice, first forward\n then backward, using Gustafsson's initial conditions [1]_.\n\n Let ``y_fb`` be the result of filtering first forward and then backward,\n and let ``y_bf`` be the result of filtering first backward then forward.\n Gustafsson's method is to compute initial conditions for the forward\n pass and the backward pass such that ``y_fb == y_bf``.\n\n Parameters\n ----------\n b : scalar or 1-D ndarray\n Numerator coefficients of the filter.\n a : scalar or 1-D ndarray\n Denominator coefficients of the filter.\n x : ndarray\n Data to be filtered.\n axis : int, optional\n Axis of `x` to be filtered. Default is -1.\n irlen : int or None, optional\n The length of the nonnegligible part of the impulse response.\n If `irlen` is None, or if the length of the signal is less than\n ``2 * irlen``, then no part of the impulse response is ignored.\n\n Returns\n -------\n y : ndarray\n The filtered data.\n x0 : ndarray\n Initial condition for the forward filter.\n x1 : ndarray\n Initial condition for the backward filter.\n\n Notes\n -----\n Typically the return values `x0` and `x1` are not needed by the\n caller. The intended use of these return values is in unit tests.\n\n References\n ----------\n .. [1] F. Gustaffson. Determining the initial states in forward-backward\n filtering. Transactions on Signal Processing, 46(4):988-992, 1996.\n\n \"\"\"\n # In the comments, \"Gustafsson's paper\" and [1] refer to the\n # paper referenced in the docstring.\n\n b = np.atleast_1d(b)\n a = np.atleast_1d(a)\n\n order = max(len(b), len(a)) - 1\n if order == 0:\n # The filter is just scalar multiplication, with no state.\n scale = (b[0] / a[0])**2\n y = scale * x\n return y, np.array([]), np.array([])\n\n if axis != -1 or axis != x.ndim - 1:\n # Move the axis containing the data to the end.\n x = np.swapaxes(x, axis, x.ndim - 1)\n\n # n is the number of samples in the data to be filtered.\n n = x.shape[-1]\n\n if irlen is None or n <= 2*irlen:\n m = n\n else:\n m = irlen\n\n # Create Obs, the observability matrix (called O in the paper).\n # This matrix can be interpreted as the operator that propagates\n # an arbitrary initial state to the output, assuming the input is\n # zero.\n # In Gustafsson's paper, the forward and backward filters are not\n # necessarily the same, so he has both O_f and O_b. We use the same\n # filter in both directions, so we only need O. The same comment\n # applies to S below.\n Obs = np.zeros((m, order))\n zi = np.zeros(order)\n zi[0] = 1\n Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]\n for k in range(1, order):\n Obs[k:, k] = Obs[:-k, 0]\n\n # Obsr is O^R (Gustafsson's notation for row-reversed O)\n Obsr = Obs[::-1]\n\n # Create S. S is the matrix that applies the filter to the reversed\n # propagated initial conditions. That is,\n # out = S.dot(zi)\n # is the same as\n # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.\n # out = lfilter(b, a, tmp[::-1]) # Reverse and filter.\n\n # Equations (5) & (6) of [1]\n S = lfilter(b, a, Obs[::-1], axis=0)\n\n # Sr is S^R (row-reversed S)\n Sr = S[::-1]\n\n # M is [(S^R - O), (O^R - S)]\n if m == n:\n M = np.hstack((Sr - Obs, Obsr - S))\n else:\n # Matrix described in section IV of [1].\n M = np.zeros((2*m, 2*order))\n M[:m, :order] = Sr - Obs\n M[m:, order:] = Obsr - S\n\n # Naive forward-backward and backward-forward filters.\n # These have large transients because the filters use zero initial\n # conditions.\n y_f = lfilter(b, a, x)\n y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]\n\n y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]\n y_bf = lfilter(b, a, y_b)\n\n delta_y_bf_fb = y_bf - y_fb\n if m == n:\n delta = delta_y_bf_fb\n else:\n start_m = delta_y_bf_fb[..., :m]\n end_m = delta_y_bf_fb[..., -m:]\n delta = np.concatenate((start_m, end_m), axis=-1)\n\n # ic_opt holds the \"optimal\" initial conditions.\n # The following code computes the result shown in the formula\n # of the paper between equations (6) and (7).\n if delta.ndim == 1:\n ic_opt = linalg.lstsq(M, delta)[0]\n else:\n # Reshape delta so it can be used as an array of multiple\n # right-hand-sides in linalg.lstsq.\n delta2d = delta.reshape(-1, delta.shape[-1]).T\n ic_opt0 = linalg.lstsq(M, delta2d)[0].T\n ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))\n\n # Now compute the filtered signal using equation (7) of [1].\n # First, form [S^R, O^R] and call it W.\n if m == n:\n W = np.hstack((Sr, Obsr))\n else:\n W = np.zeros((2*m, 2*order))\n W[:m, :order] = Sr\n W[m:, order:] = Obsr\n\n # Equation (7) of [1] says\n # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]\n # `wic` is (almost) the product on the right.\n # W has shape (m, 2*order), and ic_opt has shape (..., 2*order),\n # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,\n # so wic has shape (..., m).\n wic = ic_opt.dot(W.T)\n\n # `wic` is \"almost\" the product of W and the optimal ICs in equation\n # (7)--if we're using a truncated impulse response (m < n), `wic`\n # contains only the adjustments required for the ends of the signal.\n # Here we form y_opt, taking this into account if necessary.\n y_opt = y_fb\n if m == n:\n y_opt += wic\n else:\n y_opt[..., :m] += wic[..., :m]\n y_opt[..., -m:] += wic[..., -m:]\n\n x0 = ic_opt[..., :order]\n x1 = ic_opt[..., -order:]\n if axis != -1 or axis != x.ndim - 1:\n # Restore the data axis to its original position.\n x0 = np.swapaxes(x0, axis, x.ndim - 1)\n x1 = np.swapaxes(x1, axis, x.ndim - 1)\n y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)\n\n return y_opt, x0, x1\n\n\ndef filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',\n irlen=None):\n \"\"\"\n Apply a digital filter forward and backward to a signal.\n\n This function applies a linear digital filter twice, once forward and\n once backwards. The combined filter has zero phase and a filter order\n twice that of the original.\n\n The function provides options for handling the edges of the signal.\n\n The function `sosfiltfilt` (and filter design using ``output='sos'``)\n should be preferred over `filtfilt` for most filtering tasks, as\n second-order sections have fewer numerical problems.\n\n Parameters\n ----------\n b : (N,) array_like\n The numerator coefficient vector of the filter.\n a : (N,) array_like\n The denominator coefficient vector of the filter. If ``a[0]``\n is not 1, then both `a` and `b` are normalized by ``a[0]``.\n x : array_like\n The array of data to be filtered.\n axis : int, optional\n The axis of `x` to which the filter is applied.\n Default is -1.\n padtype : str or None, optional\n Must be 'odd', 'even', 'constant', or None. This determines the\n type of extension to use for the padded signal to which the filter\n is applied. If `padtype` is None, no padding is used. The default\n is 'odd'.\n padlen : int or None, optional\n The number of elements by which to extend `x` at both ends of\n `axis` before applying the filter. This value must be less than\n ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.\n The default value is ``3 * max(len(a), len(b))``.\n method : str, optional\n Determines the method for handling the edges of the signal, either\n \"pad\" or \"gust\". When `method` is \"pad\", the signal is padded; the\n type of padding is determined by `padtype` and `padlen`, and `irlen`\n is ignored. When `method` is \"gust\", Gustafsson's method is used,\n and `padtype` and `padlen` are ignored.\n irlen : int or None, optional\n When `method` is \"gust\", `irlen` specifies the length of the\n impulse response of the filter. If `irlen` is None, no part\n of the impulse response is ignored. For a long signal, specifying\n `irlen` can significantly improve the performance of the filter.\n\n Returns\n -------\n y : ndarray\n The filtered output with the same shape as `x`.\n\n See Also\n --------\n sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt\n\n Notes\n -----\n When `method` is \"pad\", the function pads the data along the given axis\n in one of three ways: odd, even or constant. The odd and even extensions\n have the corresponding symmetry about the end point of the data. The\n constant extension extends the data with the values at the end points. On\n both the forward and backward passes, the initial condition of the\n filter is found by using `lfilter_zi` and scaling it by the end point of\n the extended data.\n\n When `method` is \"gust\", Gustafsson's method [1]_ is used. Initial\n conditions are chosen for the forward and backward passes so that the\n forward-backward filter gives the same result as the backward-forward\n filter.\n\n The option to use Gustaffson's method was added in scipy version 0.16.0.\n\n References\n ----------\n .. [1] F. Gustaffson, \"Determining the initial states in forward-backward\n filtering\", Transactions on Signal Processing, Vol. 46, pp. 988-992,\n 1996.\n\n Examples\n --------\n The examples will use several functions from `scipy.signal`.\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n\n First we create a one second signal that is the sum of two pure sine\n waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.\n\n >>> t = np.linspace(0, 1.0, 2001)\n >>> xlow = np.sin(2 * np.pi * 5 * t)\n >>> xhigh = np.sin(2 * np.pi * 250 * t)\n >>> x = xlow + xhigh\n\n Now create a lowpass Butterworth filter with a cutoff of 0.125 times\n the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.\n The result should be approximately ``xlow``, with no phase shift.\n\n >>> b, a = signal.butter(8, 0.125)\n >>> y = signal.filtfilt(b, a, x, padlen=150)\n >>> np.abs(y - xlow).max()\n 9.1086182074789912e-06\n\n We get a fairly clean result for this artificial example because\n the odd extension is exact, and with the moderately long padding,\n the filter's transients have dissipated by the time the actual data\n is reached. In general, transient effects at the edges are\n unavoidable.\n\n The following example demonstrates the option ``method=\"gust\"``.\n\n First, create a filter.\n\n >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.\n >>> np.random.seed(123456)\n\n `sig` is a random input signal to be filtered.\n\n >>> n = 60\n >>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()\n\n Apply `filtfilt` to `sig`, once using the Gustafsson method, and\n once using padding, and plot the results for comparison.\n\n >>> fgust = signal.filtfilt(b, a, sig, method=\"gust\")\n >>> fpad = signal.filtfilt(b, a, sig, padlen=50)\n >>> plt.plot(sig, 'k-', label='input')\n >>> plt.plot(fgust, 'b-', linewidth=4, label='gust')\n >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')\n >>> plt.legend(loc='best')\n >>> plt.show()\n\n The `irlen` argument can be used to improve the performance\n of Gustafsson's method.\n\n Estimate the impulse response length of the filter.\n\n >>> z, p, k = signal.tf2zpk(b, a)\n >>> eps = 1e-9\n >>> r = np.max(np.abs(p))\n >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))\n >>> approx_impulse_len\n 137\n\n Apply the filter to a longer signal, with and without the `irlen`\n argument. The difference between `y1` and `y2` is small. For long\n signals, using `irlen` gives a significant performance improvement.\n\n >>> x = np.random.randn(5000)\n >>> y1 = signal.filtfilt(b, a, x, method='gust')\n >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)\n >>> print(np.max(np.abs(y1 - y2)))\n 1.80056858312e-10\n\n \"\"\"\n b = np.atleast_1d(b)\n a = np.atleast_1d(a)\n x = np.asarray(x)\n\n if method not in [\"pad\", \"gust\"]:\n raise ValueError(\"method must be 'pad' or 'gust'.\")\n\n if method == \"gust\":\n y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)\n return y\n\n # method == \"pad\"\n edge, ext = _validate_pad(padtype, padlen, x, axis,\n ntaps=max(len(a), len(b)))\n\n # Get the steady state of the filter's step response.\n zi = lfilter_zi(b, a)\n\n # Reshape zi and create x0 so that zi*x0 broadcasts\n # to the correct value for the 'zi' keyword argument\n # to lfilter.\n zi_shape = [1] * x.ndim\n zi_shape[axis] = zi.size\n zi = np.reshape(zi, zi_shape)\n x0 = axis_slice(ext, stop=1, axis=axis)\n\n # Forward filter.\n (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)\n\n # Backward filter.\n # Create y0 so zi*y0 broadcasts appropriately.\n y0 = axis_slice(y, start=-1, axis=axis)\n (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)\n\n # Reverse y.\n y = axis_reverse(y, axis=axis)\n\n if edge > 0:\n # Slice the actual signal from the extended signal.\n y = axis_slice(y, start=edge, stop=-edge, axis=axis)\n\n return y\n\n\ndef _validate_pad(padtype, padlen, x, axis, ntaps):\n \"\"\"Helper to validate padding for filtfilt\"\"\"\n if padtype not in ['even', 'odd', 'constant', None]:\n raise ValueError((\"Unknown value '%s' given to padtype. padtype \"\n \"must be 'even', 'odd', 'constant', or None.\") %\n padtype)\n\n if padtype is None:\n padlen = 0\n\n if padlen is None:\n # Original padding; preserved for backwards compatibility.\n edge = ntaps * 3\n else:\n edge = padlen\n\n # x's 'axis' dimension must be bigger than edge.\n if x.shape[axis] <= edge:\n raise ValueError(\"The length of the input vector x must be greater \"\n \"than padlen, which is %d.\" % edge)\n\n if padtype is not None and edge > 0:\n # Make an extension of length `edge` at each\n # end of the input array.\n if padtype == 'even':\n ext = even_ext(x, edge, axis=axis)\n elif padtype == 'odd':\n ext = odd_ext(x, edge, axis=axis)\n else:\n ext = const_ext(x, edge, axis=axis)\n else:\n ext = x\n return edge, ext\n\n\ndef _validate_x(x):\n x = np.asarray(x)\n if x.ndim == 0:\n raise ValueError('x must be at least 1-D')\n return x\n\n\ndef sosfilt(sos, x, axis=-1, zi=None):\n \"\"\"\n Filter data along one dimension using cascaded second-order sections.\n\n Filter a data sequence, `x`, using a digital IIR filter defined by\n `sos`.\n\n Parameters\n ----------\n sos : array_like\n Array of second-order filter coefficients, must have shape\n ``(n_sections, 6)``. Each row corresponds to a second-order\n section, with the first three columns providing the numerator\n coefficients and the last three providing the denominator\n coefficients.\n x : array_like\n An N-dimensional input array.\n axis : int, optional\n The axis of the input data array along which to apply the\n linear filter. The filter is applied to each subarray along\n this axis. Default is -1.\n zi : array_like, optional\n Initial conditions for the cascaded filter delays. It is a (at\n least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where\n ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``\n replaced by 2. If `zi` is None or is not given then initial rest\n (i.e. all zeros) is assumed.\n Note that these initial conditions are *not* the same as the initial\n conditions given by `lfiltic` or `lfilter_zi`.\n\n Returns\n -------\n y : ndarray\n The output of the digital filter.\n zf : ndarray, optional\n If `zi` is None, this is not returned, otherwise, `zf` holds the\n final filter delay values.\n\n See Also\n --------\n zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz\n\n Notes\n -----\n The filter function is implemented as a series of second-order filters\n with direct-form II transposed structure. It is designed to minimize\n numerical precision errors for high-order filters.\n\n .. versionadded:: 0.16.0\n\n Examples\n --------\n Plot a 13th-order filter's impulse response using both `lfilter` and\n `sosfilt`, showing the instability that results from trying to do a\n 13th-order filter in a single stage (the numerical error pushes some poles\n outside of the unit circle):\n\n >>> import matplotlib.pyplot as plt\n >>> from scipy import signal\n >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')\n >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')\n >>> x = signal.unit_impulse(700)\n >>> y_tf = signal.lfilter(b, a, x)\n >>> y_sos = signal.sosfilt(sos, x)\n >>> plt.plot(y_tf, 'r', label='TF')\n >>> plt.plot(y_sos, 'k', label='SOS')\n >>> plt.legend(loc='best')\n >>> plt.show()\n\n \"\"\"\n x = _validate_x(x)\n sos, n_sections = _validate_sos(sos)\n x_zi_shape = list(x.shape)\n x_zi_shape[axis] = 2\n x_zi_shape = tuple([n_sections] + x_zi_shape)\n inputs = [sos, x]\n if zi is not None:\n inputs.append(np.asarray(zi))\n dtype = np.result_type(*inputs)\n if dtype.char not in 'fdgFDGO':\n raise NotImplementedError(\"input type '%s' not supported\" % dtype)\n if zi is not None:\n zi = np.array(zi, dtype) # make a copy so that we can operate in place\n if zi.shape != x_zi_shape:\n raise ValueError('Invalid zi shape. With axis=%r, an input with '\n 'shape %r, and an sos array with %d sections, zi '\n 'must have shape %r, got %r.' %\n (axis, x.shape, n_sections, x_zi_shape, zi.shape))\n return_zi = True\n else:\n zi = np.zeros(x_zi_shape, dtype=dtype)\n return_zi = False\n axis = axis % x.ndim # make positive\n x = np.moveaxis(x, axis, -1)\n zi = np.moveaxis(zi, [0, axis + 1], [-2, -1])\n x_shape, zi_shape = x.shape, zi.shape\n x = np.reshape(x, (-1, x.shape[-1]))\n x = np.array(x, dtype, order='C') # make a copy, can modify in place\n zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2)))\n sos = sos.astype(dtype, copy=False)\n _sosfilt(sos, x, zi)\n x.shape = x_shape\n x = np.moveaxis(x, -1, axis)\n if return_zi:\n zi.shape = zi_shape\n zi = np.moveaxis(zi, [-2, -1], [0, axis + 1])\n out = (x, zi)\n else:\n out = x\n return out\n\n\ndef sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):\n \"\"\"\n A forward-backward digital filter using cascaded second-order sections.\n\n See `filtfilt` for more complete information about this method.\n\n Parameters\n ----------\n sos : array_like\n Array of second-order filter coefficients, must have shape\n ``(n_sections, 6)``. Each row corresponds to a second-order\n section, with the first three columns providing the numerator\n coefficients and the last three providing the denominator\n coefficients.\n x : array_like\n The array of data to be filtered.\n axis : int, optional\n The axis of `x` to which the filter is applied.\n Default is -1.\n padtype : str or None, optional\n Must be 'odd', 'even', 'constant', or None. This determines the\n type of extension to use for the padded signal to which the filter\n is applied. If `padtype` is None, no padding is used. The default\n is 'odd'.\n padlen : int or None, optional\n The number of elements by which to extend `x` at both ends of\n `axis` before applying the filter. This value must be less than\n ``x.shape[axis] - 1``. ``padlen=0`` implies no padding.\n The default value is::\n\n 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),\n (sos[:, 5] == 0).sum()))\n\n The extra subtraction at the end attempts to compensate for poles\n and zeros at the origin (e.g. for odd-order filters) to yield\n equivalent estimates of `padlen` to those of `filtfilt` for\n second-order section filters built with `scipy.signal` functions.\n\n Returns\n -------\n y : ndarray\n The filtered output with the same shape as `x`.\n\n See Also\n --------\n filtfilt, sosfilt, sosfilt_zi, sosfreqz\n\n Notes\n -----\n .. versionadded:: 0.18.0\n\n Examples\n --------\n >>> from scipy.signal import sosfiltfilt, butter\n >>> import matplotlib.pyplot as plt\n\n Create an interesting signal to filter.\n\n >>> n = 201\n >>> t = np.linspace(0, 1, n)\n >>> np.random.seed(123)\n >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n)\n\n Create a lowpass Butterworth filter, and use it to filter `x`.\n\n >>> sos = butter(4, 0.125, output='sos')\n >>> y = sosfiltfilt(sos, x)\n\n For comparison, apply an 8th order filter using `sosfilt`. The filter\n is initialized using the mean of the first four values of `x`.\n\n >>> from scipy.signal import sosfilt, sosfilt_zi\n >>> sos8 = butter(8, 0.125, output='sos')\n >>> zi = x[:4].mean() * sosfilt_zi(sos8)\n >>> y2, zo = sosfilt(sos8, x, zi=zi)\n\n Plot the results. Note that the phase of `y` matches the input, while\n `y2` has a significant phase delay.\n\n >>> plt.plot(t, x, alpha=0.5, label='x(t)')\n >>> plt.plot(t, y, label='y(t)')\n >>> plt.plot(t, y2, label='y2(t)')\n >>> plt.legend(framealpha=1, shadow=True)\n >>> plt.grid(alpha=0.25)\n >>> plt.xlabel('t')\n >>> plt.show()\n\n \"\"\"\n sos, n_sections = _validate_sos(sos)\n x = _validate_x(x)\n\n # `method` is \"pad\"...\n ntaps = 2 * n_sections + 1\n ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())\n edge, ext = _validate_pad(padtype, padlen, x, axis,\n ntaps=ntaps)\n\n # These steps follow the same form as filtfilt with modifications\n zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)\n zi_shape = [1] * x.ndim\n zi_shape[axis] = 2\n zi.shape = [n_sections] + zi_shape\n x_0 = axis_slice(ext, stop=1, axis=axis)\n (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)\n y_0 = axis_slice(y, start=-1, axis=axis)\n (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)\n y = axis_reverse(y, axis=axis)\n if edge > 0:\n y = axis_slice(y, start=edge, stop=-edge, axis=axis)\n return y\n\n\ndef decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):\n \"\"\"\n Downsample the signal after applying an anti-aliasing filter.\n\n By default, an order 8 Chebyshev type I filter is used. A 30 point FIR\n filter with Hamming window is used if `ftype` is 'fir'.\n\n Parameters\n ----------\n x : array_like\n The signal to be downsampled, as an N-dimensional array.\n q : int\n The downsampling factor. When using IIR downsampling, it is recommended\n to call `decimate` multiple times for downsampling factors higher than\n 13.\n n : int, optional\n The order of the filter (1 less than the length for 'fir'). Defaults to\n 8 for 'iir' and 20 times the downsampling factor for 'fir'.\n ftype : str {'iir', 'fir'} or ``dlti`` instance, optional\n If 'iir' or 'fir', specifies the type of lowpass filter. If an instance\n of an `dlti` object, uses that object to filter before downsampling.\n axis : int, optional\n The axis along which to decimate.\n zero_phase : bool, optional\n Prevent phase shift by filtering with `filtfilt` instead of `lfilter`\n when using an IIR filter, and shifting the outputs back by the filter's\n group delay when using an FIR filter. The default value of ``True`` is\n recommended, since a phase shift is generally not desired.\n\n .. versionadded:: 0.18.0\n\n Returns\n -------\n y : ndarray\n The down-sampled signal.\n\n See Also\n --------\n resample : Resample up or down using the FFT method.\n resample_poly : Resample using polyphase filtering and an FIR filter.\n\n Notes\n -----\n The ``zero_phase`` keyword was added in 0.18.0.\n The possibility to use instances of ``dlti`` as ``ftype`` was added in\n 0.18.0.\n \"\"\"\n\n x = np.asarray(x)\n q = operator.index(q)\n\n if n is not None:\n n = operator.index(n)\n\n if ftype == 'fir':\n if n is None:\n half_len = 10 * q # reasonable cutoff for our sinc-like function\n n = 2 * half_len\n b, a = firwin(n+1, 1. / q, window='hamming'), 1.\n elif ftype == 'iir':\n if n is None:\n n = 8\n system = dlti(*cheby1(n, 0.05, 0.8 / q))\n b, a = system.num, system.den\n elif isinstance(ftype, dlti):\n system = ftype._as_tf() # Avoids copying if already in TF form\n b, a = system.num, system.den\n else:\n raise ValueError('invalid ftype')\n\n sl = [slice(None)] * x.ndim\n a = np.asarray(a)\n\n if a.size == 1: # FIR case\n b = b / a\n if zero_phase:\n y = resample_poly(x, 1, q, axis=axis, window=b)\n else:\n # upfirdn is generally faster than lfilter by a factor equal to the\n # downsampling factor, since it only calculates the needed outputs\n n_out = x.shape[axis] // q + bool(x.shape[axis] % q)\n y = upfirdn(b, x, up=1, down=q, axis=axis)\n sl[axis] = slice(None, n_out, None)\n\n else: # IIR case\n if zero_phase:\n y = filtfilt(b, a, x, axis=axis)\n else:\n y = lfilter(b, a, x, axis=axis)\n sl[axis] = slice(None, None, q)\n\n return y[tuple(sl)]\n",
"\"\"\"test sparse matrix construction functions\"\"\"\n\nimport numpy as np\nfrom numpy import array\nfrom numpy.testing import (assert_equal, assert_,\n assert_array_equal, assert_array_almost_equal_nulp)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom scipy._lib._testutils import check_free_memory\nfrom scipy._lib._util import check_random_state\n\nfrom scipy.sparse import csr_matrix, coo_matrix, construct\nfrom scipy.sparse.construct import rand as sprand\nfrom scipy.sparse.sputils import matrix\n\nsparse_formats = ['csr','csc','coo','bsr','dia','lil','dok']\n\n#TODO check whether format=XXX is respected\n\n\ndef _sprandn(m, n, density=0.01, format=\"coo\", dtype=None, random_state=None):\n # Helper function for testing.\n random_state = check_random_state(random_state)\n data_rvs = random_state.standard_normal\n return construct.random(m, n, density, format, dtype,\n random_state, data_rvs)\n\n\nclass TestConstructUtils(object):\n def test_spdiags(self):\n diags1 = array([[1, 2, 3, 4, 5]])\n diags2 = array([[1, 2, 3, 4, 5],\n [6, 7, 8, 9,10]])\n diags3 = array([[1, 2, 3, 4, 5],\n [6, 7, 8, 9,10],\n [11,12,13,14,15]])\n\n cases = []\n cases.append((diags1, 0, 1, 1, [[1]]))\n cases.append((diags1, [0], 1, 1, [[1]]))\n cases.append((diags1, [0], 2, 1, [[1],[0]]))\n cases.append((diags1, [0], 1, 2, [[1,0]]))\n cases.append((diags1, [1], 1, 2, [[0,2]]))\n cases.append((diags1,[-1], 1, 2, [[0,0]]))\n cases.append((diags1, [0], 2, 2, [[1,0],[0,2]]))\n cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]]))\n cases.append((diags1, [3], 2, 2, [[0,0],[0,0]]))\n cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))\n cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]))\n cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]))\n\n cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]))\n cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))\n cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0],\n [0,0,0,4,0,0],\n [0,0,0,0,5,0],\n [6,0,0,0,0,0],\n [0,7,0,0,0,0],\n [0,0,8,0,0,0]]))\n\n cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0],\n [1, 7,13, 0, 0, 0],\n [0, 2, 8,14, 0, 0],\n [0, 0, 3, 9,15, 0],\n [0, 0, 0, 4,10, 0],\n [0, 0, 0, 0, 5, 0]]))\n cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0],\n [11, 0, 0, 9, 0],\n [0,12, 0, 0,10],\n [0, 0,13, 0, 0],\n [1, 0, 0,14, 0],\n [0, 2, 0, 0,15]]))\n\n for d,o,m,n,result in cases:\n assert_equal(construct.spdiags(d,o,m,n).todense(), result)\n\n def test_diags(self):\n a = array([1, 2, 3, 4, 5])\n b = array([6, 7, 8, 9, 10])\n c = array([11, 12, 13, 14, 15])\n\n cases = []\n cases.append((a[:1], 0, (1, 1), [[1]]))\n cases.append(([a[:1]], [0], (1, 1), [[1]]))\n cases.append(([a[:1]], [0], (2, 1), [[1],[0]]))\n cases.append(([a[:1]], [0], (1, 2), [[1,0]]))\n cases.append(([a[:1]], [1], (1, 2), [[0,1]]))\n cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]]))\n cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]]))\n cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))\n cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]]))\n cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]]))\n cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]]))\n cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]]))\n cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]]))\n cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]]))\n cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]]))\n cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]]))\n cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]]))\n cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]]))\n cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]]))\n cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]]))\n cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]]))\n cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]]))\n cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]))\n\n cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]]))\n cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))\n cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0],\n [0,0,0,2,0,0],\n [0,0,0,0,3,0],\n [6,0,0,0,0,4],\n [0,7,0,0,0,0],\n [0,0,8,0,0,0]]))\n\n cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0],\n [1, 7,12, 0, 0],\n [0, 2, 8,13, 0],\n [0, 0, 3, 9,14],\n [0, 0, 0, 4,10]]))\n cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0],\n [11, 0, 0, 7, 0],\n [0,12, 0, 0, 8],\n [0, 0,13, 0, 0],\n [1, 0, 0,14, 0],\n [0, 2, 0, 0,15]]))\n\n # too long arrays are OK\n cases.append(([a], [0], (1, 1), [[1]]))\n cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]]))\n cases.append((np.array([[1, 2, 3], [4, 5, 6]]), [0,-1], (3, 3), [[1, 0, 0], [4, 2, 0], [0, 5, 3]]))\n\n # scalar case: broadcasting\n cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0],\n [1, -2, 1],\n [0, 1, -2]]))\n\n for d, o, shape, result in cases:\n err_msg = \"%r %r %r %r\" % (d, o, shape, result)\n assert_equal(construct.diags(d, o, shape=shape).todense(),\n result, err_msg=err_msg)\n\n if shape[0] == shape[1] and hasattr(d[0], '__len__') and len(d[0]) <= max(shape):\n # should be able to find the shape automatically\n assert_equal(construct.diags(d, o).todense(), result,\n err_msg=err_msg)\n\n def test_diags_default(self):\n a = array([1, 2, 3, 4, 5])\n assert_equal(construct.diags(a).todense(), np.diag(a))\n\n def test_diags_default_bad(self):\n a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])\n assert_raises(ValueError, construct.diags, a)\n\n def test_diags_bad(self):\n a = array([1, 2, 3, 4, 5])\n b = array([6, 7, 8, 9, 10])\n c = array([11, 12, 13, 14, 15])\n\n cases = []\n cases.append(([a[:0]], 0, (1, 1)))\n cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5)))\n cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5)))\n cases.append(([a[:2],c,b[:3]], [-4,2,-1], None))\n cases.append(([], [-4,2,-1], None))\n cases.append(([1], [-5], (4, 4)))\n cases.append(([a], 0, None))\n\n for d, o, shape in cases:\n assert_raises(ValueError, construct.diags, d, o, shape)\n\n assert_raises(TypeError, construct.diags, [[None]], [0])\n\n def test_diags_vs_diag(self):\n # Check that\n #\n # diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...\n #\n\n np.random.seed(1234)\n\n for n_diags in [1, 2, 3, 4, 5, 10]:\n n = 1 + n_diags//2 + np.random.randint(0, 10)\n\n offsets = np.arange(-n+1, n-1)\n np.random.shuffle(offsets)\n offsets = offsets[:n_diags]\n\n diagonals = [np.random.rand(n - abs(q)) for q in offsets]\n\n mat = construct.diags(diagonals, offsets)\n dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])\n\n assert_array_almost_equal_nulp(mat.todense(), dense_mat)\n\n if len(offsets) == 1:\n mat = construct.diags(diagonals[0], offsets[0])\n dense_mat = np.diag(diagonals[0], offsets[0])\n assert_array_almost_equal_nulp(mat.todense(), dense_mat)\n\n def test_diags_dtype(self):\n x = construct.diags([2.2], [0], shape=(2, 2), dtype=int)\n assert_equal(x.dtype, int)\n assert_equal(x.todense(), [[2, 0], [0, 2]])\n\n def test_diags_one_diagonal(self):\n d = list(range(5))\n for k in range(-5, 6):\n assert_equal(construct.diags(d, k).toarray(),\n construct.diags([d], [k]).toarray())\n\n def test_diags_empty(self):\n x = construct.diags([])\n assert_equal(x.shape, (0, 0))\n\n def test_identity(self):\n assert_equal(construct.identity(1).toarray(), [[1]])\n assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]])\n\n I = construct.identity(3, dtype='int8', format='dia')\n assert_equal(I.dtype, np.dtype('int8'))\n assert_equal(I.format, 'dia')\n\n for fmt in sparse_formats:\n I = construct.identity(3, format=fmt)\n assert_equal(I.format, fmt)\n assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])\n\n def test_eye(self):\n assert_equal(construct.eye(1,1).toarray(), [[1]])\n assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]])\n assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]])\n assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]])\n\n assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16'))\n\n for m in [3, 5]:\n for n in [3, 5]:\n for k in range(-5,6):\n assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k))\n if m == n:\n assert_equal(construct.eye(m, k=k).toarray(), np.eye(m, n, k=k))\n\n def test_eye_one(self):\n assert_equal(construct.eye(1).toarray(), [[1]])\n assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]])\n\n I = construct.eye(3, dtype='int8', format='dia')\n assert_equal(I.dtype, np.dtype('int8'))\n assert_equal(I.format, 'dia')\n\n for fmt in sparse_formats:\n I = construct.eye(3, format=fmt)\n assert_equal(I.format, fmt)\n assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])\n\n def test_kron(self):\n cases = []\n\n cases.append(array([[0]]))\n cases.append(array([[-1]]))\n cases.append(array([[4]]))\n cases.append(array([[10]]))\n cases.append(array([[0],[0]]))\n cases.append(array([[0,0]]))\n cases.append(array([[1,2],[3,4]]))\n cases.append(array([[0,2],[5,0]]))\n cases.append(array([[0,2,-6],[8,0,14]]))\n cases.append(array([[5,4],[0,0],[6,0]]))\n cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))\n cases.append(array([[0,1,0,2,0,5,8]]))\n cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))\n\n for a in cases:\n for b in cases:\n result = construct.kron(csr_matrix(a),csr_matrix(b)).todense()\n expected = np.kron(a,b)\n assert_array_equal(result,expected)\n\n def test_kron_large(self):\n n = 2**16\n a = construct.eye(1, n, n-1)\n b = construct.eye(n, 1, 1-n)\n\n construct.kron(a, a)\n construct.kron(b, b)\n\n def test_kronsum(self):\n cases = []\n\n cases.append(array([[0]]))\n cases.append(array([[-1]]))\n cases.append(array([[4]]))\n cases.append(array([[10]]))\n cases.append(array([[1,2],[3,4]]))\n cases.append(array([[0,2],[5,0]]))\n cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))\n cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))\n\n for a in cases:\n for b in cases:\n result = construct.kronsum(csr_matrix(a),csr_matrix(b)).todense()\n expected = np.kron(np.eye(len(b)), a) + \\\n np.kron(b, np.eye(len(a)))\n assert_array_equal(result,expected)\n\n def test_vstack(self):\n\n A = coo_matrix([[1,2],[3,4]])\n B = coo_matrix([[5,6]])\n\n expected = matrix([[1, 2],\n [3, 4],\n [5, 6]])\n assert_equal(construct.vstack([A,B]).todense(), expected)\n assert_equal(construct.vstack([A,B], dtype=np.float32).dtype, np.float32)\n assert_equal(construct.vstack([A.tocsr(),B.tocsr()]).todense(),\n expected)\n assert_equal(construct.vstack([A.tocsr(),B.tocsr()], dtype=np.float32).dtype,\n np.float32)\n assert_equal(construct.vstack([A.tocsr(),B.tocsr()],\n dtype=np.float32).indices.dtype, np.int32)\n assert_equal(construct.vstack([A.tocsr(),B.tocsr()],\n dtype=np.float32).indptr.dtype, np.int32)\n\n def test_hstack(self):\n\n A = coo_matrix([[1,2],[3,4]])\n B = coo_matrix([[5],[6]])\n\n expected = matrix([[1, 2, 5],\n [3, 4, 6]])\n assert_equal(construct.hstack([A,B]).todense(), expected)\n assert_equal(construct.hstack([A,B], dtype=np.float32).dtype, np.float32)\n assert_equal(construct.hstack([A.tocsc(),B.tocsc()]).todense(),\n expected)\n assert_equal(construct.hstack([A.tocsc(),B.tocsc()], dtype=np.float32).dtype,\n np.float32)\n\n def test_bmat(self):\n\n A = coo_matrix([[1,2],[3,4]])\n B = coo_matrix([[5],[6]])\n C = coo_matrix([[7]])\n D = coo_matrix((0,0))\n\n expected = matrix([[1, 2, 5],\n [3, 4, 6],\n [0, 0, 7]])\n assert_equal(construct.bmat([[A,B],[None,C]]).todense(), expected)\n\n expected = matrix([[1, 2, 0],\n [3, 4, 0],\n [0, 0, 7]])\n assert_equal(construct.bmat([[A,None],[None,C]]).todense(), expected)\n\n expected = matrix([[0, 5],\n [0, 6],\n [7, 0]])\n assert_equal(construct.bmat([[None,B],[C,None]]).todense(), expected)\n\n expected = matrix(np.empty((0,0)))\n assert_equal(construct.bmat([[None,None]]).todense(), expected)\n assert_equal(construct.bmat([[None,D],[D,None]]).todense(), expected)\n\n # test bug reported in gh-5976\n expected = matrix([[7]])\n assert_equal(construct.bmat([[None,D],[C,None]]).todense(), expected)\n\n # test failure cases\n with assert_raises(ValueError) as excinfo:\n construct.bmat([[A], [B]])\n excinfo.match(r'Got blocks\\[1,0\\]\\.shape\\[1\\] == 1, expected 2')\n\n with assert_raises(ValueError) as excinfo:\n construct.bmat([[A, C]])\n excinfo.match(r'Got blocks\\[0,1\\]\\.shape\\[0\\] == 1, expected 2')\n\n @pytest.mark.slow\n def test_concatenate_int32_overflow(self):\n \"\"\" test for indptr overflow when concatenating matrices \"\"\"\n check_free_memory(30000)\n\n n = 33000\n A = csr_matrix(np.ones((n, n), dtype=bool))\n B = A.copy()\n C = construct._compressed_sparse_stack((A,B), 0)\n\n assert_(np.all(np.equal(np.diff(C.indptr), n)))\n assert_equal(C.indices.dtype, np.int64)\n assert_equal(C.indptr.dtype, np.int64)\n\n def test_block_diag_basic(self):\n \"\"\" basic test for block_diag \"\"\"\n A = coo_matrix([[1,2],[3,4]])\n B = coo_matrix([[5],[6]])\n C = coo_matrix([[7]])\n\n expected = matrix([[1, 2, 0, 0],\n [3, 4, 0, 0],\n [0, 0, 5, 0],\n [0, 0, 6, 0],\n [0, 0, 0, 7]])\n\n assert_equal(construct.block_diag((A, B, C)).todense(), expected)\n\n def test_block_diag_scalar_1d_args(self):\n \"\"\" block_diag with scalar and 1d arguments \"\"\"\n # one 1d matrix and a scalar\n assert_array_equal(construct.block_diag([[2,3], 4]).toarray(),\n [[2, 3, 0], [0, 0, 4]])\n\n def test_block_diag_1(self):\n \"\"\" block_diag with one matrix \"\"\"\n assert_equal(construct.block_diag([[1, 0]]).todense(),\n matrix([[1, 0]]))\n assert_equal(construct.block_diag([[[1, 0]]]).todense(),\n matrix([[1, 0]]))\n assert_equal(construct.block_diag([[[1], [0]]]).todense(),\n matrix([[1], [0]]))\n # just on scalar\n assert_equal(construct.block_diag([1]).todense(),\n matrix([[1]]))\n\n def test_random_sampling(self):\n # Simple sanity checks for sparse random sampling.\n for f in sprand, _sprandn:\n for t in [np.float32, np.float64, np.longdouble,\n np.int32, np.int64, np.complex64, np.complex128]:\n x = f(5, 10, density=0.1, dtype=t)\n assert_equal(x.dtype, t)\n assert_equal(x.shape, (5, 10))\n assert_equal(x.nnz, 5)\n\n x1 = f(5, 10, density=0.1, random_state=4321)\n assert_equal(x1.dtype, np.double)\n\n x2 = f(5, 10, density=0.1,\n random_state=np.random.RandomState(4321))\n\n assert_array_equal(x1.data, x2.data)\n assert_array_equal(x1.row, x2.row)\n assert_array_equal(x1.col, x2.col)\n\n for density in [0.0, 0.1, 0.5, 1.0]:\n x = f(5, 10, density=density)\n assert_equal(x.nnz, int(density * np.prod(x.shape)))\n\n for fmt in ['coo', 'csc', 'csr', 'lil']:\n x = f(5, 10, format=fmt)\n assert_equal(x.format, fmt)\n\n assert_raises(ValueError, lambda: f(5, 10, 1.1))\n assert_raises(ValueError, lambda: f(5, 10, -0.1))\n\n def test_rand(self):\n # Simple distributional checks for sparse.rand.\n random_states = [None, 4321, np.random.RandomState()]\n try:\n gen = np.random.default_rng()\n random_states.append(gen)\n except AttributeError:\n pass\n\n for random_state in random_states:\n x = sprand(10, 20, density=0.5, dtype=np.float64,\n random_state=random_state)\n assert_(np.all(np.less_equal(0, x.data)))\n assert_(np.all(np.less_equal(x.data, 1)))\n\n def test_randn(self):\n # Simple distributional checks for sparse.randn.\n # Statistically, some of these should be negative\n # and some should be greater than 1.\n random_states = [None, 4321, np.random.RandomState()]\n try:\n gen = np.random.default_rng()\n random_states.append(gen)\n except AttributeError:\n pass\n\n for random_state in random_states:\n x = _sprandn(10, 20, density=0.5, dtype=np.float64,\n random_state=random_state)\n assert_(np.any(np.less(x.data, 0)))\n assert_(np.any(np.less(1, x.data)))\n\n def test_random_accept_str_dtype(self):\n # anything that np.dtype can convert to a dtype should be accepted\n # for the dtype\n construct.random(10, 10, dtype='d')\n\n",
"# Authors: Pearu Peterson, Pauli Virtanen, John Travers\n\"\"\"\nFirst-order ODE integrators.\n\nUser-friendly interface to various numerical integrators for solving a\nsystem of first order ODEs with prescribed initial conditions::\n\n d y(t)[i]\n --------- = f(t,y(t))[i],\n d t\n\n y(t=0)[i] = y0[i],\n\nwhere::\n\n i = 0, ..., len(y0) - 1\n\nclass ode\n---------\n\nA generic interface class to numeric integrators. It has the following\nmethods::\n\n integrator = ode(f, jac=None)\n integrator = integrator.set_integrator(name, **params)\n integrator = integrator.set_initial_value(y0, t0=0.0)\n integrator = integrator.set_f_params(*args)\n integrator = integrator.set_jac_params(*args)\n y1 = integrator.integrate(t1, step=False, relax=False)\n flag = integrator.successful()\n\nclass complex_ode\n-----------------\n\nThis class has the same generic interface as ode, except it can handle complex\nf, y and Jacobians by transparently translating them into the equivalent\nreal-valued system. It supports the real-valued solvers (i.e., not zvode) and is\nan alternative to ode with the zvode solver, sometimes performing better.\n\"\"\"\n# XXX: Integrators must have:\n# ===========================\n# cvode - C version of vode and vodpk with many improvements.\n# Get it from http://www.netlib.org/ode/cvode.tar.gz.\n# To wrap cvode to Python, one must write the extension module by\n# hand. Its interface is too much 'advanced C' that using f2py\n# would be too complicated (or impossible).\n#\n# How to define a new integrator:\n# ===============================\n#\n# class myodeint(IntegratorBase):\n#\n# runner = <odeint function> or None\n#\n# def __init__(self,...): # required\n# <initialize>\n#\n# def reset(self,n,has_jac): # optional\n# # n - the size of the problem (number of equations)\n# # has_jac - whether user has supplied its own routine for Jacobian\n# <allocate memory,initialize further>\n#\n# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required\n# # this method is called to integrate from t=t0 to t=t1\n# # with initial condition y0. f and jac are user-supplied functions\n# # that define the problem. f_params,jac_params are additional\n# # arguments\n# # to these functions.\n# <calculate y1>\n# if <calculation was unsuccessful>:\n# self.success = 0\n# return t1,y1\n#\n# # In addition, one can define step() and run_relax() methods (they\n# # take the same arguments as run()) if the integrator can support\n# # these features (see IntegratorBase doc strings).\n#\n# if myodeint.runner:\n# IntegratorBase.integrator_classes.append(myodeint)\n\n__all__ = ['ode', 'complex_ode']\n__version__ = \"$Id$\"\n__docformat__ = \"restructuredtext en\"\n\nimport re\nimport warnings\n\nfrom numpy import asarray, array, zeros, int32, isscalar, real, imag, vstack\n\nfrom . import vode as _vode\nfrom . import _dop\nfrom . import lsoda as _lsoda\n\n\n# ------------------------------------------------------------------------------\n# User interface\n# ------------------------------------------------------------------------------\n\n\nclass ode(object):\n \"\"\"\n A generic interface class to numeric integrators.\n\n Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.\n\n *Note*: The first two arguments of ``f(t, y, ...)`` are in the\n opposite order of the arguments in the system definition function used\n by `scipy.integrate.odeint`.\n\n Parameters\n ----------\n f : callable ``f(t, y, *f_args)``\n Right-hand side of the differential equation. t is a scalar,\n ``y.shape == (n,)``.\n ``f_args`` is set by calling ``set_f_params(*args)``.\n `f` should return a scalar, array or list (not a tuple).\n jac : callable ``jac(t, y, *jac_args)``, optional\n Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.\n ``jac_args`` is set by calling ``set_jac_params(*args)``.\n\n Attributes\n ----------\n t : float\n Current time.\n y : ndarray\n Current variable values.\n\n See also\n --------\n odeint : an integrator with a simpler interface based on lsoda from ODEPACK\n quad : for finding the area under a curve\n\n Notes\n -----\n Available integrators are listed below. They can be selected using\n the `set_integrator` method.\n\n \"vode\"\n\n Real-valued Variable-coefficient Ordinary Differential Equation\n solver, with fixed-leading-coefficient implementation. It provides\n implicit Adams method (for non-stiff problems) and a method based on\n backward differentiation formulas (BDF) (for stiff problems).\n\n Source: http://www.netlib.org/ode/vode.f\n\n .. warning::\n\n This integrator is not re-entrant. You cannot have two `ode`\n instances using the \"vode\" integrator at the same time.\n\n This integrator accepts the following parameters in `set_integrator`\n method of the `ode` class:\n\n - atol : float or sequence\n absolute tolerance for solution\n - rtol : float or sequence\n relative tolerance for solution\n - lband : None or int\n - uband : None or int\n Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.\n Setting these requires your jac routine to return the jacobian\n in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The\n dimension of the matrix must be (lband+uband+1, len(y)).\n - method: 'adams' or 'bdf'\n Which solver to use, Adams (non-stiff) or BDF (stiff)\n - with_jacobian : bool\n This option is only considered when the user has not supplied a\n Jacobian function and has not indicated (by setting either band)\n that the Jacobian is banded. In this case, `with_jacobian` specifies\n whether the iteration method of the ODE solver's correction step is\n chord iteration with an internally generated full Jacobian or\n functional iteration with no Jacobian.\n - nsteps : int\n Maximum number of (internally defined) steps allowed during one\n call to the solver.\n - first_step : float\n - min_step : float\n - max_step : float\n Limits for the step sizes used by the integrator.\n - order : int\n Maximum order used by the integrator,\n order <= 12 for Adams, <= 5 for BDF.\n\n \"zvode\"\n\n Complex-valued Variable-coefficient Ordinary Differential Equation\n solver, with fixed-leading-coefficient implementation. It provides\n implicit Adams method (for non-stiff problems) and a method based on\n backward differentiation formulas (BDF) (for stiff problems).\n\n Source: http://www.netlib.org/ode/zvode.f\n\n .. warning::\n\n This integrator is not re-entrant. You cannot have two `ode`\n instances using the \"zvode\" integrator at the same time.\n\n This integrator accepts the same parameters in `set_integrator`\n as the \"vode\" solver.\n\n .. note::\n\n When using ZVODE for a stiff system, it should only be used for\n the case in which the function f is analytic, that is, when each f(i)\n is an analytic function of each y(j). Analyticity means that the\n partial derivative df(i)/dy(j) is a unique complex number, and this\n fact is critical in the way ZVODE solves the dense or banded linear\n systems that arise in the stiff case. For a complex stiff ODE system\n in which f is not analytic, ZVODE is likely to have convergence\n failures, and for this problem one should instead use DVODE on the\n equivalent real system (in the real and imaginary parts of y).\n\n \"lsoda\"\n\n Real-valued Variable-coefficient Ordinary Differential Equation\n solver, with fixed-leading-coefficient implementation. It provides\n automatic method switching between implicit Adams method (for non-stiff\n problems) and a method based on backward differentiation formulas (BDF)\n (for stiff problems).\n\n Source: http://www.netlib.org/odepack\n\n .. warning::\n\n This integrator is not re-entrant. You cannot have two `ode`\n instances using the \"lsoda\" integrator at the same time.\n\n This integrator accepts the following parameters in `set_integrator`\n method of the `ode` class:\n\n - atol : float or sequence\n absolute tolerance for solution\n - rtol : float or sequence\n relative tolerance for solution\n - lband : None or int\n - uband : None or int\n Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.\n Setting these requires your jac routine to return the jacobian\n in packed format, jac_packed[i-j+uband, j] = jac[i,j].\n - with_jacobian : bool\n *Not used.*\n - nsteps : int\n Maximum number of (internally defined) steps allowed during one\n call to the solver.\n - first_step : float\n - min_step : float\n - max_step : float\n Limits for the step sizes used by the integrator.\n - max_order_ns : int\n Maximum order used in the nonstiff case (default 12).\n - max_order_s : int\n Maximum order used in the stiff case (default 5).\n - max_hnil : int\n Maximum number of messages reporting too small step size (t + h = t)\n (default 0)\n - ixpr : int\n Whether to generate extra printing at method switches (default False).\n\n \"dopri5\"\n\n This is an explicit runge-kutta method of order (4)5 due to Dormand &\n Prince (with stepsize control and dense output).\n\n Authors:\n\n E. Hairer and G. Wanner\n Universite de Geneve, Dept. de Mathematiques\n CH-1211 Geneve 24, Switzerland\n e-mail: [email protected], [email protected]\n\n This code is described in [HNW93]_.\n\n This integrator accepts the following parameters in set_integrator()\n method of the ode class:\n\n - atol : float or sequence\n absolute tolerance for solution\n - rtol : float or sequence\n relative tolerance for solution\n - nsteps : int\n Maximum number of (internally defined) steps allowed during one\n call to the solver.\n - first_step : float\n - max_step : float\n - safety : float\n Safety factor on new step selection (default 0.9)\n - ifactor : float\n - dfactor : float\n Maximum factor to increase/decrease step size by in one step\n - beta : float\n Beta parameter for stabilised step size control.\n - verbosity : int\n Switch for printing messages (< 0 for no messages).\n\n \"dop853\"\n\n This is an explicit runge-kutta method of order 8(5,3) due to Dormand\n & Prince (with stepsize control and dense output).\n\n Options and references the same as \"dopri5\".\n\n Examples\n --------\n\n A problem to integrate and the corresponding jacobian:\n\n >>> from scipy.integrate import ode\n >>>\n >>> y0, t0 = [1.0j, 2.0], 0\n >>>\n >>> def f(t, y, arg1):\n ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]\n >>> def jac(t, y, arg1):\n ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]\n\n The integration:\n\n >>> r = ode(f, jac).set_integrator('zvode', method='bdf')\n >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)\n >>> t1 = 10\n >>> dt = 1\n >>> while r.successful() and r.t < t1:\n ... print(r.t+dt, r.integrate(r.t+dt))\n 1 [-0.71038232+0.23749653j 0.40000271+0.j ]\n 2.0 [0.19098503-0.52359246j 0.22222356+0.j ]\n 3.0 [0.47153208+0.52701229j 0.15384681+0.j ]\n 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]\n 5.0 [0.02340997-0.61418799j 0.09523835+0.j ]\n 6.0 [0.58643071+0.339819j 0.08000018+0.j ]\n 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]\n 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]\n 9.0 [0.64850462+0.15048982j 0.05405414+0.j ]\n 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]\n\n References\n ----------\n .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary\n Differential Equations i. Nonstiff Problems. 2nd edition.\n Springer Series in Computational Mathematics,\n Springer-Verlag (1993)\n\n \"\"\"\n\n def __init__(self, f, jac=None):\n self.stiff = 0\n self.f = f\n self.jac = jac\n self.f_params = ()\n self.jac_params = ()\n self._y = []\n\n @property\n def y(self):\n return self._y\n\n def set_initial_value(self, y, t=0.0):\n \"\"\"Set initial conditions y(t) = y.\"\"\"\n if isscalar(y):\n y = [y]\n n_prev = len(self._y)\n if not n_prev:\n self.set_integrator('') # find first available integrator\n self._y = asarray(y, self._integrator.scalar)\n self.t = t\n self._integrator.reset(len(self._y), self.jac is not None)\n return self\n\n def set_integrator(self, name, **integrator_params):\n \"\"\"\n Set integrator by name.\n\n Parameters\n ----------\n name : str\n Name of the integrator.\n integrator_params\n Additional parameters for the integrator.\n \"\"\"\n integrator = find_integrator(name)\n if integrator is None:\n # FIXME: this really should be raise an exception. Will that break\n # any code?\n warnings.warn('No integrator name match with %r or is not '\n 'available.' % name)\n else:\n self._integrator = integrator(**integrator_params)\n if not len(self._y):\n self.t = 0.0\n self._y = array([0.0], self._integrator.scalar)\n self._integrator.reset(len(self._y), self.jac is not None)\n return self\n\n def integrate(self, t, step=False, relax=False):\n \"\"\"Find y=y(t), set y as an initial condition, and return y.\n\n Parameters\n ----------\n t : float\n The endpoint of the integration step.\n step : bool\n If True, and if the integrator supports the step method,\n then perform a single integration step and return.\n This parameter is provided in order to expose internals of\n the implementation, and should not be changed from its default\n value in most cases.\n relax : bool\n If True and if the integrator supports the run_relax method,\n then integrate until t_1 >= t and return. ``relax`` is not\n referenced if ``step=True``.\n This parameter is provided in order to expose internals of\n the implementation, and should not be changed from its default\n value in most cases.\n\n Returns\n -------\n y : float\n The integrated value at t\n \"\"\"\n if step and self._integrator.supports_step:\n mth = self._integrator.step\n elif relax and self._integrator.supports_run_relax:\n mth = self._integrator.run_relax\n else:\n mth = self._integrator.run\n\n try:\n self._y, self.t = mth(self.f, self.jac or (lambda: None),\n self._y, self.t, t,\n self.f_params, self.jac_params)\n except SystemError:\n # f2py issue with tuple returns, see ticket 1187.\n raise ValueError('Function to integrate must not return a tuple.')\n\n return self._y\n\n def successful(self):\n \"\"\"Check if integration was successful.\"\"\"\n try:\n self._integrator\n except AttributeError:\n self.set_integrator('')\n return self._integrator.success == 1\n\n def get_return_code(self):\n \"\"\"Extracts the return code for the integration to enable better control\n if the integration fails.\n\n In general, a return code > 0 implies success, while a return code < 0\n implies failure.\n\n Notes\n -----\n This section describes possible return codes and their meaning, for available\n integrators that can be selected by `set_integrator` method.\n\n \"vode\"\n\n =========== =======\n Return Code Message\n =========== =======\n 2 Integration successful.\n -1 Excess work done on this call. (Perhaps wrong MF.)\n -2 Excess accuracy requested. (Tolerances too small.)\n -3 Illegal input detected. (See printed message.)\n -4 Repeated error test failures. (Check all input.)\n -5 Repeated convergence failures. (Perhaps bad Jacobian\n supplied or wrong choice of MF or tolerances.)\n -6 Error weight became zero during problem. (Solution\n component i vanished, and ATOL or ATOL(i) = 0.)\n =========== =======\n\n \"zvode\"\n\n =========== =======\n Return Code Message\n =========== =======\n 2 Integration successful.\n -1 Excess work done on this call. (Perhaps wrong MF.)\n -2 Excess accuracy requested. (Tolerances too small.)\n -3 Illegal input detected. (See printed message.)\n -4 Repeated error test failures. (Check all input.)\n -5 Repeated convergence failures. (Perhaps bad Jacobian\n supplied or wrong choice of MF or tolerances.)\n -6 Error weight became zero during problem. (Solution\n component i vanished, and ATOL or ATOL(i) = 0.)\n =========== =======\n\n \"dopri5\"\n\n =========== =======\n Return Code Message\n =========== =======\n 1 Integration successful.\n 2 Integration successful (interrupted by solout).\n -1 Input is not consistent.\n -2 Larger nsteps is needed.\n -3 Step size becomes too small.\n -4 Problem is probably stiff (interrupted).\n =========== =======\n\n \"dop853\"\n\n =========== =======\n Return Code Message\n =========== =======\n 1 Integration successful.\n 2 Integration successful (interrupted by solout).\n -1 Input is not consistent.\n -2 Larger nsteps is needed.\n -3 Step size becomes too small.\n -4 Problem is probably stiff (interrupted).\n =========== =======\n\n \"lsoda\"\n\n =========== =======\n Return Code Message\n =========== =======\n 2 Integration successful.\n -1 Excess work done on this call (perhaps wrong Dfun type).\n -2 Excess accuracy requested (tolerances too small).\n -3 Illegal input detected (internal error).\n -4 Repeated error test failures (internal error).\n -5 Repeated convergence failures (perhaps bad Jacobian or tolerances).\n -6 Error weight became zero during problem.\n -7 Internal workspace insufficient to finish (internal error).\n =========== =======\n \"\"\"\n try:\n self._integrator\n except AttributeError:\n self.set_integrator('')\n return self._integrator.istate\n\n def set_f_params(self, *args):\n \"\"\"Set extra parameters for user-supplied function f.\"\"\"\n self.f_params = args\n return self\n\n def set_jac_params(self, *args):\n \"\"\"Set extra parameters for user-supplied function jac.\"\"\"\n self.jac_params = args\n return self\n\n def set_solout(self, solout):\n \"\"\"\n Set callable to be called at every successful integration step.\n\n Parameters\n ----------\n solout : callable\n ``solout(t, y)`` is called at each internal integrator step,\n t is a scalar providing the current independent position\n y is the current soloution ``y.shape == (n,)``\n solout should return -1 to stop integration\n otherwise it should return None or 0\n\n \"\"\"\n if self._integrator.supports_solout:\n self._integrator.set_solout(solout)\n if self._y is not None:\n self._integrator.reset(len(self._y), self.jac is not None)\n else:\n raise ValueError(\"selected integrator does not support solout,\"\n \" choose another one\")\n\n\ndef _transform_banded_jac(bjac):\n \"\"\"\n Convert a real matrix of the form (for example)\n\n [0 0 A B] [0 0 0 B]\n [0 0 C D] [0 0 A D]\n [E F G H] to [0 F C H]\n [I J K L] [E J G L]\n [I 0 K 0]\n\n That is, every other column is shifted up one.\n \"\"\"\n # Shift every other column.\n newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))\n newjac[1:, ::2] = bjac[:, ::2]\n newjac[:-1, 1::2] = bjac[:, 1::2]\n return newjac\n\n\nclass complex_ode(ode):\n \"\"\"\n A wrapper of ode for complex systems.\n\n This functions similarly as `ode`, but re-maps a complex-valued\n equation system to a real-valued one before using the integrators.\n\n Parameters\n ----------\n f : callable ``f(t, y, *f_args)``\n Rhs of the equation. t is a scalar, ``y.shape == (n,)``.\n ``f_args`` is set by calling ``set_f_params(*args)``.\n jac : callable ``jac(t, y, *jac_args)``\n Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.\n ``jac_args`` is set by calling ``set_f_params(*args)``.\n\n Attributes\n ----------\n t : float\n Current time.\n y : ndarray\n Current variable values.\n\n Examples\n --------\n For usage examples, see `ode`.\n\n \"\"\"\n\n def __init__(self, f, jac=None):\n self.cf = f\n self.cjac = jac\n if jac is None:\n ode.__init__(self, self._wrap, None)\n else:\n ode.__init__(self, self._wrap, self._wrap_jac)\n\n def _wrap(self, t, y, *f_args):\n f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))\n # self.tmp is a real-valued array containing the interleaved\n # real and imaginary parts of f.\n self.tmp[::2] = real(f)\n self.tmp[1::2] = imag(f)\n return self.tmp\n\n def _wrap_jac(self, t, y, *jac_args):\n # jac is the complex Jacobian computed by the user-defined function.\n jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))\n\n # jac_tmp is the real version of the complex Jacobian. Each complex\n # entry in jac, say 2+3j, becomes a 2x2 block of the form\n # [2 -3]\n # [3 2]\n jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))\n jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)\n jac_tmp[1::2, ::2] = imag(jac)\n jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]\n\n ml = getattr(self._integrator, 'ml', None)\n mu = getattr(self._integrator, 'mu', None)\n if ml is not None or mu is not None:\n # Jacobian is banded. The user's Jacobian function has computed\n # the complex Jacobian in packed format. The corresponding\n # real-valued version has every other column shifted up.\n jac_tmp = _transform_banded_jac(jac_tmp)\n\n return jac_tmp\n\n @property\n def y(self):\n return self._y[::2] + 1j * self._y[1::2]\n\n def set_integrator(self, name, **integrator_params):\n \"\"\"\n Set integrator by name.\n\n Parameters\n ----------\n name : str\n Name of the integrator\n integrator_params\n Additional parameters for the integrator.\n \"\"\"\n if name == 'zvode':\n raise ValueError(\"zvode must be used with ode, not complex_ode\")\n\n lband = integrator_params.get('lband')\n uband = integrator_params.get('uband')\n if lband is not None or uband is not None:\n # The Jacobian is banded. Override the user-supplied bandwidths\n # (which are for the complex Jacobian) with the bandwidths of\n # the corresponding real-valued Jacobian wrapper of the complex\n # Jacobian.\n integrator_params['lband'] = 2 * (lband or 0) + 1\n integrator_params['uband'] = 2 * (uband or 0) + 1\n\n return ode.set_integrator(self, name, **integrator_params)\n\n def set_initial_value(self, y, t=0.0):\n \"\"\"Set initial conditions y(t) = y.\"\"\"\n y = asarray(y)\n self.tmp = zeros(y.size * 2, 'float')\n self.tmp[::2] = real(y)\n self.tmp[1::2] = imag(y)\n return ode.set_initial_value(self, self.tmp, t)\n\n def integrate(self, t, step=False, relax=False):\n \"\"\"Find y=y(t), set y as an initial condition, and return y.\n\n Parameters\n ----------\n t : float\n The endpoint of the integration step.\n step : bool\n If True, and if the integrator supports the step method,\n then perform a single integration step and return.\n This parameter is provided in order to expose internals of\n the implementation, and should not be changed from its default\n value in most cases.\n relax : bool\n If True and if the integrator supports the run_relax method,\n then integrate until t_1 >= t and return. ``relax`` is not\n referenced if ``step=True``.\n This parameter is provided in order to expose internals of\n the implementation, and should not be changed from its default\n value in most cases.\n\n Returns\n -------\n y : float\n The integrated value at t\n \"\"\"\n y = ode.integrate(self, t, step, relax)\n return y[::2] + 1j * y[1::2]\n\n def set_solout(self, solout):\n \"\"\"\n Set callable to be called at every successful integration step.\n\n Parameters\n ----------\n solout : callable\n ``solout(t, y)`` is called at each internal integrator step,\n t is a scalar providing the current independent position\n y is the current soloution ``y.shape == (n,)``\n solout should return -1 to stop integration\n otherwise it should return None or 0\n\n \"\"\"\n if self._integrator.supports_solout:\n self._integrator.set_solout(solout, complex=True)\n else:\n raise TypeError(\"selected integrator does not support solouta,\"\n + \"choose another one\")\n\n\n# ------------------------------------------------------------------------------\n# ODE integrators\n# ------------------------------------------------------------------------------\n\ndef find_integrator(name):\n for cl in IntegratorBase.integrator_classes:\n if re.match(name, cl.__name__, re.I):\n return cl\n return None\n\n\nclass IntegratorConcurrencyError(RuntimeError):\n \"\"\"\n Failure due to concurrent usage of an integrator that can be used\n only for a single problem at a time.\n\n \"\"\"\n\n def __init__(self, name):\n msg = (\"Integrator `%s` can be used to solve only a single problem \"\n \"at a time. If you want to integrate multiple problems, \"\n \"consider using a different integrator \"\n \"(see `ode.set_integrator`)\") % name\n RuntimeError.__init__(self, msg)\n\n\nclass IntegratorBase(object):\n runner = None # runner is None => integrator is not available\n success = None # success==1 if integrator was called successfully\n istate = None # istate > 0 means success, istate < 0 means failure\n supports_run_relax = None\n supports_step = None\n supports_solout = False\n integrator_classes = []\n scalar = float\n\n def acquire_new_handle(self):\n # Some of the integrators have internal state (ancient\n # Fortran...), and so only one instance can use them at a time.\n # We keep track of this, and fail when concurrent usage is tried.\n self.__class__.active_global_handle += 1\n self.handle = self.__class__.active_global_handle\n\n def check_handle(self):\n if self.handle is not self.__class__.active_global_handle:\n raise IntegratorConcurrencyError(self.__class__.__name__)\n\n def reset(self, n, has_jac):\n \"\"\"Prepare integrator for call: allocate memory, set flags, etc.\n n - number of equations.\n has_jac - if user has supplied function for evaluating Jacobian.\n \"\"\"\n\n def run(self, f, jac, y0, t0, t1, f_params, jac_params):\n \"\"\"Integrate from t=t0 to t=t1 using y0 as an initial condition.\n Return 2-tuple (y1,t1) where y1 is the result and t=t1\n defines the stoppage coordinate of the result.\n \"\"\"\n raise NotImplementedError('all integrators must define '\n 'run(f, jac, t0, t1, y0, f_params, jac_params)')\n\n def step(self, f, jac, y0, t0, t1, f_params, jac_params):\n \"\"\"Make one integration step and return (y1,t1).\"\"\"\n raise NotImplementedError('%s does not support step() method' %\n self.__class__.__name__)\n\n def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):\n \"\"\"Integrate from t=t0 to t>=t1 and return (y1,t).\"\"\"\n raise NotImplementedError('%s does not support run_relax() method' %\n self.__class__.__name__)\n\n # XXX: __str__ method for getting visual state of the integrator\n\n\ndef _vode_banded_jac_wrapper(jacfunc, ml, jac_params):\n \"\"\"\n Wrap a banded Jacobian function with a function that pads\n the Jacobian with `ml` rows of zeros.\n \"\"\"\n\n def jac_wrapper(t, y):\n jac = asarray(jacfunc(t, y, *jac_params))\n padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))\n return padded_jac\n\n return jac_wrapper\n\n\nclass vode(IntegratorBase):\n runner = getattr(_vode, 'dvode', None)\n\n messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',\n -2: 'Excess accuracy requested. (Tolerances too small.)',\n -3: 'Illegal input detected. (See printed message.)',\n -4: 'Repeated error test failures. (Check all input.)',\n -5: 'Repeated convergence failures. (Perhaps bad'\n ' Jacobian supplied or wrong choice of MF or tolerances.)',\n -6: 'Error weight became zero during problem. (Solution'\n ' component i vanished, and ATOL or ATOL(i) = 0.)'\n }\n supports_run_relax = 1\n supports_step = 1\n active_global_handle = 0\n\n def __init__(self,\n method='adams',\n with_jacobian=False,\n rtol=1e-6, atol=1e-12,\n lband=None, uband=None,\n order=12,\n nsteps=500,\n max_step=0.0, # corresponds to infinite\n min_step=0.0,\n first_step=0.0, # determined by solver\n ):\n\n if re.match(method, r'adams', re.I):\n self.meth = 1\n elif re.match(method, r'bdf', re.I):\n self.meth = 2\n else:\n raise ValueError('Unknown integration method %s' % method)\n self.with_jacobian = with_jacobian\n self.rtol = rtol\n self.atol = atol\n self.mu = uband\n self.ml = lband\n\n self.order = order\n self.nsteps = nsteps\n self.max_step = max_step\n self.min_step = min_step\n self.first_step = first_step\n self.success = 1\n\n self.initialized = False\n\n def _determine_mf_and_set_bands(self, has_jac):\n \"\"\"\n Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.\n\n In the Fortran code, the legal values of `MF` are:\n 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,\n -11, -12, -14, -15, -21, -22, -24, -25\n but this Python wrapper does not use negative values.\n\n Returns\n\n mf = 10*self.meth + miter\n\n self.meth is the linear multistep method:\n self.meth == 1: method=\"adams\"\n self.meth == 2: method=\"bdf\"\n\n miter is the correction iteration method:\n miter == 0: Functional iteraton; no Jacobian involved.\n miter == 1: Chord iteration with user-supplied full Jacobian.\n miter == 2: Chord iteration with internally computed full Jacobian.\n miter == 3: Chord iteration with internally computed diagonal Jacobian.\n miter == 4: Chord iteration with user-supplied banded Jacobian.\n miter == 5: Chord iteration with internally computed banded Jacobian.\n\n Side effects: If either self.mu or self.ml is not None and the other is None,\n then the one that is None is set to 0.\n \"\"\"\n\n jac_is_banded = self.mu is not None or self.ml is not None\n if jac_is_banded:\n if self.mu is None:\n self.mu = 0\n if self.ml is None:\n self.ml = 0\n\n # has_jac is True if the user provided a Jacobian function.\n if has_jac:\n if jac_is_banded:\n miter = 4\n else:\n miter = 1\n else:\n if jac_is_banded:\n if self.ml == self.mu == 0:\n miter = 3 # Chord iteration with internal diagonal Jacobian.\n else:\n miter = 5 # Chord iteration with internal banded Jacobian.\n else:\n # self.with_jacobian is set by the user in the call to ode.set_integrator.\n if self.with_jacobian:\n miter = 2 # Chord iteration with internal full Jacobian.\n else:\n miter = 0 # Functional iteraton; no Jacobian involved.\n\n mf = 10 * self.meth + miter\n return mf\n\n def reset(self, n, has_jac):\n mf = self._determine_mf_and_set_bands(has_jac)\n\n if mf == 10:\n lrw = 20 + 16 * n\n elif mf in [11, 12]:\n lrw = 22 + 16 * n + 2 * n * n\n elif mf == 13:\n lrw = 22 + 17 * n\n elif mf in [14, 15]:\n lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n\n elif mf == 20:\n lrw = 20 + 9 * n\n elif mf in [21, 22]:\n lrw = 22 + 9 * n + 2 * n * n\n elif mf == 23:\n lrw = 22 + 10 * n\n elif mf in [24, 25]:\n lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n\n else:\n raise ValueError('Unexpected mf=%s' % mf)\n\n if mf % 10 in [0, 3]:\n liw = 30\n else:\n liw = 30 + n\n\n rwork = zeros((lrw,), float)\n rwork[4] = self.first_step\n rwork[5] = self.max_step\n rwork[6] = self.min_step\n self.rwork = rwork\n\n iwork = zeros((liw,), int32)\n if self.ml is not None:\n iwork[0] = self.ml\n if self.mu is not None:\n iwork[1] = self.mu\n iwork[4] = self.order\n iwork[5] = self.nsteps\n iwork[6] = 2 # mxhnil\n self.iwork = iwork\n\n self.call_args = [self.rtol, self.atol, 1, 1,\n self.rwork, self.iwork, mf]\n self.success = 1\n self.initialized = False\n\n def run(self, f, jac, y0, t0, t1, f_params, jac_params):\n if self.initialized:\n self.check_handle()\n else:\n self.initialized = True\n self.acquire_new_handle()\n\n if self.ml is not None and self.ml > 0:\n # Banded Jacobian. Wrap the user-provided function with one\n # that pads the Jacobian array with the extra `self.ml` rows\n # required by the f2py-generated wrapper.\n jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)\n\n args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +\n (f_params, jac_params))\n y1, t, istate = self.runner(*args)\n self.istate = istate\n if istate < 0:\n unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)\n warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,\n self.messages.get(istate, unexpected_istate_msg)))\n self.success = 0\n else:\n self.call_args[3] = 2 # upgrade istate from 1 to 2\n self.istate = 2\n return y1, t\n\n def step(self, *args):\n itask = self.call_args[2]\n self.call_args[2] = 2\n r = self.run(*args)\n self.call_args[2] = itask\n return r\n\n def run_relax(self, *args):\n itask = self.call_args[2]\n self.call_args[2] = 3\n r = self.run(*args)\n self.call_args[2] = itask\n return r\n\n\nif vode.runner is not None:\n IntegratorBase.integrator_classes.append(vode)\n\n\nclass zvode(vode):\n runner = getattr(_vode, 'zvode', None)\n\n supports_run_relax = 1\n supports_step = 1\n scalar = complex\n active_global_handle = 0\n\n def reset(self, n, has_jac):\n mf = self._determine_mf_and_set_bands(has_jac)\n\n if mf in (10,):\n lzw = 15 * n\n elif mf in (11, 12):\n lzw = 15 * n + 2 * n ** 2\n elif mf in (-11, -12):\n lzw = 15 * n + n ** 2\n elif mf in (13,):\n lzw = 16 * n\n elif mf in (14, 15):\n lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n\n elif mf in (-14, -15):\n lzw = 16 * n + (2 * self.ml + self.mu) * n\n elif mf in (20,):\n lzw = 8 * n\n elif mf in (21, 22):\n lzw = 8 * n + 2 * n ** 2\n elif mf in (-21, -22):\n lzw = 8 * n + n ** 2\n elif mf in (23,):\n lzw = 9 * n\n elif mf in (24, 25):\n lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n\n elif mf in (-24, -25):\n lzw = 9 * n + (2 * self.ml + self.mu) * n\n\n lrw = 20 + n\n\n if mf % 10 in (0, 3):\n liw = 30\n else:\n liw = 30 + n\n\n zwork = zeros((lzw,), complex)\n self.zwork = zwork\n\n rwork = zeros((lrw,), float)\n rwork[4] = self.first_step\n rwork[5] = self.max_step\n rwork[6] = self.min_step\n self.rwork = rwork\n\n iwork = zeros((liw,), int32)\n if self.ml is not None:\n iwork[0] = self.ml\n if self.mu is not None:\n iwork[1] = self.mu\n iwork[4] = self.order\n iwork[5] = self.nsteps\n iwork[6] = 2 # mxhnil\n self.iwork = iwork\n\n self.call_args = [self.rtol, self.atol, 1, 1,\n self.zwork, self.rwork, self.iwork, mf]\n self.success = 1\n self.initialized = False\n\n\nif zvode.runner is not None:\n IntegratorBase.integrator_classes.append(zvode)\n\n\nclass dopri5(IntegratorBase):\n runner = getattr(_dop, 'dopri5', None)\n name = 'dopri5'\n supports_solout = True\n\n messages = {1: 'computation successful',\n 2: 'computation successful (interrupted by solout)',\n -1: 'input is not consistent',\n -2: 'larger nsteps is needed',\n -3: 'step size becomes too small',\n -4: 'problem is probably stiff (interrupted)',\n }\n\n def __init__(self,\n rtol=1e-6, atol=1e-12,\n nsteps=500,\n max_step=0.0,\n first_step=0.0, # determined by solver\n safety=0.9,\n ifactor=10.0,\n dfactor=0.2,\n beta=0.0,\n method=None,\n verbosity=-1, # no messages if negative\n ):\n self.rtol = rtol\n self.atol = atol\n self.nsteps = nsteps\n self.max_step = max_step\n self.first_step = first_step\n self.safety = safety\n self.ifactor = ifactor\n self.dfactor = dfactor\n self.beta = beta\n self.verbosity = verbosity\n self.success = 1\n self.set_solout(None)\n\n def set_solout(self, solout, complex=False):\n self.solout = solout\n self.solout_cmplx = complex\n if solout is None:\n self.iout = 0\n else:\n self.iout = 1\n\n def reset(self, n, has_jac):\n work = zeros((8 * n + 21,), float)\n work[1] = self.safety\n work[2] = self.dfactor\n work[3] = self.ifactor\n work[4] = self.beta\n work[5] = self.max_step\n work[6] = self.first_step\n self.work = work\n iwork = zeros((21,), int32)\n iwork[0] = self.nsteps\n iwork[2] = self.verbosity\n self.iwork = iwork\n self.call_args = [self.rtol, self.atol, self._solout,\n self.iout, self.work, self.iwork]\n self.success = 1\n\n def run(self, f, jac, y0, t0, t1, f_params, jac_params):\n x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +\n tuple(self.call_args) + (f_params,)))\n self.istate = istate\n if istate < 0:\n unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)\n warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,\n self.messages.get(istate, unexpected_istate_msg)))\n self.success = 0\n return y, x\n\n def _solout(self, nr, xold, x, y, nd, icomp, con):\n if self.solout is not None:\n if self.solout_cmplx:\n y = y[::2] + 1j * y[1::2]\n return self.solout(x, y)\n else:\n return 1\n\n\nif dopri5.runner is not None:\n IntegratorBase.integrator_classes.append(dopri5)\n\n\nclass dop853(dopri5):\n runner = getattr(_dop, 'dop853', None)\n name = 'dop853'\n\n def __init__(self,\n rtol=1e-6, atol=1e-12,\n nsteps=500,\n max_step=0.0,\n first_step=0.0, # determined by solver\n safety=0.9,\n ifactor=6.0,\n dfactor=0.3,\n beta=0.0,\n method=None,\n verbosity=-1, # no messages if negative\n ):\n super(self.__class__, self).__init__(rtol, atol, nsteps, max_step,\n first_step, safety, ifactor,\n dfactor, beta, method,\n verbosity)\n\n def reset(self, n, has_jac):\n work = zeros((11 * n + 21,), float)\n work[1] = self.safety\n work[2] = self.dfactor\n work[3] = self.ifactor\n work[4] = self.beta\n work[5] = self.max_step\n work[6] = self.first_step\n self.work = work\n iwork = zeros((21,), int32)\n iwork[0] = self.nsteps\n iwork[2] = self.verbosity\n self.iwork = iwork\n self.call_args = [self.rtol, self.atol, self._solout,\n self.iout, self.work, self.iwork]\n self.success = 1\n\n\nif dop853.runner is not None:\n IntegratorBase.integrator_classes.append(dop853)\n\n\nclass lsoda(IntegratorBase):\n runner = getattr(_lsoda, 'lsoda', None)\n active_global_handle = 0\n\n messages = {\n 2: \"Integration successful.\",\n -1: \"Excess work done on this call (perhaps wrong Dfun type).\",\n -2: \"Excess accuracy requested (tolerances too small).\",\n -3: \"Illegal input detected (internal error).\",\n -4: \"Repeated error test failures (internal error).\",\n -5: \"Repeated convergence failures (perhaps bad Jacobian or tolerances).\",\n -6: \"Error weight became zero during problem.\",\n -7: \"Internal workspace insufficient to finish (internal error).\"\n }\n\n def __init__(self,\n with_jacobian=False,\n rtol=1e-6, atol=1e-12,\n lband=None, uband=None,\n nsteps=500,\n max_step=0.0, # corresponds to infinite\n min_step=0.0,\n first_step=0.0, # determined by solver\n ixpr=0,\n max_hnil=0,\n max_order_ns=12,\n max_order_s=5,\n method=None\n ):\n\n self.with_jacobian = with_jacobian\n self.rtol = rtol\n self.atol = atol\n self.mu = uband\n self.ml = lband\n\n self.max_order_ns = max_order_ns\n self.max_order_s = max_order_s\n self.nsteps = nsteps\n self.max_step = max_step\n self.min_step = min_step\n self.first_step = first_step\n self.ixpr = ixpr\n self.max_hnil = max_hnil\n self.success = 1\n\n self.initialized = False\n\n def reset(self, n, has_jac):\n # Calculate parameters for Fortran subroutine dvode.\n if has_jac:\n if self.mu is None and self.ml is None:\n jt = 1\n else:\n if self.mu is None:\n self.mu = 0\n if self.ml is None:\n self.ml = 0\n jt = 4\n else:\n if self.mu is None and self.ml is None:\n jt = 2\n else:\n if self.mu is None:\n self.mu = 0\n if self.ml is None:\n self.ml = 0\n jt = 5\n lrn = 20 + (self.max_order_ns + 4) * n\n if jt in [1, 2]:\n lrs = 22 + (self.max_order_s + 4) * n + n * n\n elif jt in [4, 5]:\n lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n\n else:\n raise ValueError('Unexpected jt=%s' % jt)\n lrw = max(lrn, lrs)\n liw = 20 + n\n rwork = zeros((lrw,), float)\n rwork[4] = self.first_step\n rwork[5] = self.max_step\n rwork[6] = self.min_step\n self.rwork = rwork\n iwork = zeros((liw,), int32)\n if self.ml is not None:\n iwork[0] = self.ml\n if self.mu is not None:\n iwork[1] = self.mu\n iwork[4] = self.ixpr\n iwork[5] = self.nsteps\n iwork[6] = self.max_hnil\n iwork[7] = self.max_order_ns\n iwork[8] = self.max_order_s\n self.iwork = iwork\n self.call_args = [self.rtol, self.atol, 1, 1,\n self.rwork, self.iwork, jt]\n self.success = 1\n self.initialized = False\n\n def run(self, f, jac, y0, t0, t1, f_params, jac_params):\n if self.initialized:\n self.check_handle()\n else:\n self.initialized = True\n self.acquire_new_handle()\n args = [f, y0, t0, t1] + self.call_args[:-1] + \\\n [jac, self.call_args[-1], f_params, 0, jac_params]\n y1, t, istate = self.runner(*args)\n self.istate = istate\n if istate < 0:\n unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)\n warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,\n self.messages.get(istate, unexpected_istate_msg)))\n self.success = 0\n else:\n self.call_args[3] = 2 # upgrade istate from 1 to 2\n self.istate = 2\n return y1, t\n\n def step(self, *args):\n itask = self.call_args[2]\n self.call_args[2] = 2\n r = self.run(*args)\n self.call_args[2] = itask\n return r\n\n def run_relax(self, *args):\n itask = self.call_args[2]\n self.call_args[2] = 3\n r = self.run(*args)\n self.call_args[2] = itask\n return r\n\n\nif lsoda.runner:\n IntegratorBase.integrator_classes.append(lsoda)\n",
"import numpy as np\nimport itertools\nfrom numpy.testing import (assert_equal,\n assert_almost_equal,\n assert_array_equal,\n assert_array_almost_equal,\n suppress_warnings)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom pytest import warns as assert_warns\nfrom scipy.spatial import SphericalVoronoi, distance\nfrom scipy.spatial import _spherical_voronoi as spherical_voronoi\nfrom scipy.spatial.transform import Rotation\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.constants import golden as phi\nfrom scipy.spatial import geometric_slerp\n\n\nTOL = 1E-10\n\n\ndef _generate_tetrahedron():\n return np.array([[1, 1, 1], [1, -1, -1], [-1, 1, -1], [-1, -1, 1]])\n\n\ndef _generate_cube():\n return np.array(list(itertools.product([-1, 1.], repeat=3)))\n\n\ndef _generate_octahedron():\n return np.array([[-1, 0, 0], [+1, 0, 0], [0, -1, 0],\n [0, +1, 0], [0, 0, -1], [0, 0, +1]])\n\n\ndef _generate_dodecahedron():\n\n x1 = _generate_cube()\n x2 = np.array([[0, -phi, -1 / phi],\n [0, -phi, +1 / phi],\n [0, +phi, -1 / phi],\n [0, +phi, +1 / phi]])\n x3 = np.array([[-1 / phi, 0, -phi],\n [+1 / phi, 0, -phi],\n [-1 / phi, 0, +phi],\n [+1 / phi, 0, +phi]])\n x4 = np.array([[-phi, -1 / phi, 0],\n [-phi, +1 / phi, 0],\n [+phi, -1 / phi, 0],\n [+phi, +1 / phi, 0]])\n return np.concatenate((x1, x2, x3, x4))\n\n\ndef _generate_icosahedron():\n x = np.array([[0, -1, -phi],\n [0, -1, +phi],\n [0, +1, -phi],\n [0, +1, +phi]])\n return np.concatenate([np.roll(x, i, axis=1) for i in range(3)])\n\n\ndef _generate_polyhedron(name):\n\n if name == \"tetrahedron\":\n p = _generate_tetrahedron()\n elif name == \"cube\":\n p = _generate_cube()\n elif name == \"octahedron\":\n p = _generate_octahedron()\n elif name == \"dodecahedron\":\n p = _generate_dodecahedron()\n elif name == \"icosahedron\":\n p = _generate_icosahedron()\n else:\n raise ValueError(\"unrecognized polyhedron\")\n\n return p / np.linalg.norm(p, axis=1)[:, np.newaxis]\n\n\nclass TestSphericalVoronoi(object):\n\n def setup_method(self):\n self.points = np.array([\n [-0.78928481, -0.16341094, 0.59188373],\n [-0.66839141, 0.73309634, 0.12578818],\n [0.32535778, -0.92476944, -0.19734181],\n [-0.90177102, -0.03785291, -0.43055335],\n [0.71781344, 0.68428936, 0.12842096],\n [-0.96064876, 0.23492353, -0.14820556],\n [0.73181537, -0.22025898, -0.6449281],\n [0.79979205, 0.54555747, 0.25039913]]\n )\n\n # Issue #9386\n self.hemisphere_points = np.array([\n [0.88610999, -0.42383021, 0.18755541],\n [0.51980039, -0.72622668, 0.4498915],\n [0.56540011, -0.81629197, -0.11827989],\n [0.69659682, -0.69972598, 0.15854467]])\n\n # Issue #8859\n phi = np.linspace(0, 2 * np.pi, 10, endpoint=False) # azimuth angle\n theta = np.linspace(0.001, np.pi * 0.4, 5) # polar angle\n theta = theta[np.newaxis, :].T\n\n phiv, thetav = np.meshgrid(phi, theta)\n phiv = np.reshape(phiv, (50, 1))\n thetav = np.reshape(thetav, (50, 1))\n\n x = np.cos(phiv) * np.sin(thetav)\n y = np.sin(phiv) * np.sin(thetav)\n z = np.cos(thetav)\n self.hemisphere_points2 = np.concatenate([x, y, z], axis=1)\n\n def test_constructor(self):\n center = np.array([1, 2, 3])\n radius = 2\n s1 = SphericalVoronoi(self.points)\n # user input checks in SphericalVoronoi now require\n # the radius / center to match the generators so adjust\n # accordingly here\n s2 = SphericalVoronoi(self.points * radius, radius)\n s3 = SphericalVoronoi(self.points + center, center=center)\n s4 = SphericalVoronoi(self.points * radius + center, radius, center)\n assert_array_equal(s1.center, np.array([0, 0, 0]))\n assert_equal(s1.radius, 1)\n assert_array_equal(s2.center, np.array([0, 0, 0]))\n assert_equal(s2.radius, 2)\n assert_array_equal(s3.center, center)\n assert_equal(s3.radius, 1)\n assert_array_equal(s4.center, center)\n assert_equal(s4.radius, radius)\n\n def test_vertices_regions_translation_invariance(self):\n sv_origin = SphericalVoronoi(self.points)\n center = np.array([1, 1, 1])\n sv_translated = SphericalVoronoi(self.points + center, center=center)\n assert_equal(sv_origin.regions, sv_translated.regions)\n assert_array_almost_equal(sv_origin.vertices + center,\n sv_translated.vertices)\n\n def test_vertices_regions_scaling_invariance(self):\n sv_unit = SphericalVoronoi(self.points)\n sv_scaled = SphericalVoronoi(self.points * 2, 2)\n assert_equal(sv_unit.regions, sv_scaled.regions)\n assert_array_almost_equal(sv_unit.vertices * 2,\n sv_scaled.vertices)\n\n def test_old_radius_api(self):\n sv_unit = SphericalVoronoi(self.points, radius=1)\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \"`radius` is `None`\")\n sv = SphericalVoronoi(self.points, None)\n assert_array_almost_equal(sv_unit.vertices, sv.vertices)\n\n def test_old_radius_api_warning(self):\n with assert_warns(DeprecationWarning):\n SphericalVoronoi(self.points, None)\n\n def test_sort_vertices_of_regions(self):\n sv = SphericalVoronoi(self.points)\n unsorted_regions = sv.regions\n sv.sort_vertices_of_regions()\n assert_equal(sorted(sv.regions), sorted(unsorted_regions))\n\n def test_sort_vertices_of_regions_flattened(self):\n expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1],\n [4, 8, 7, 5, 6], [9, 11, 10], [2, 7, 5],\n [1, 4, 8, 11, 9], [0, 3, 10, 9, 1]])\n expected = list(itertools.chain(*sorted(expected))) # type: ignore\n sv = SphericalVoronoi(self.points)\n sv.sort_vertices_of_regions()\n actual = list(itertools.chain(*sorted(sv.regions)))\n assert_array_equal(actual, expected)\n\n def test_sort_vertices_of_regions_dimensionality(self):\n points = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0.5, 0.5, 0.5, 0.5]])\n with pytest.raises(TypeError, match=\"three-dimensional\"):\n sv = spherical_voronoi.SphericalVoronoi(points)\n sv.sort_vertices_of_regions()\n\n def test_num_vertices(self):\n # for any n >= 3, a spherical Voronoi diagram has 2n - 4\n # vertices; this is a direct consequence of Euler's formula\n # as explained by Dinis and Mamede (2010) Proceedings of the\n # 2010 International Symposium on Voronoi Diagrams in Science\n # and Engineering\n sv = SphericalVoronoi(self.points)\n expected = self.points.shape[0] * 2 - 4\n actual = sv.vertices.shape[0]\n assert_equal(actual, expected)\n\n def test_voronoi_circles(self):\n sv = spherical_voronoi.SphericalVoronoi(self.points)\n for vertex in sv.vertices:\n distances = distance.cdist(sv.points, np.array([vertex]))\n closest = np.array(sorted(distances)[0:3])\n assert_almost_equal(closest[0], closest[1], 7, str(vertex))\n assert_almost_equal(closest[0], closest[2], 7, str(vertex))\n\n def test_duplicate_point_handling(self):\n # an exception should be raised for degenerate generators\n # related to Issue# 7046\n self.degenerate = np.concatenate((self.points, self.points))\n with assert_raises(ValueError):\n spherical_voronoi.SphericalVoronoi(self.degenerate)\n\n def test_incorrect_radius_handling(self):\n # an exception should be raised if the radius provided\n # cannot possibly match the input generators\n with assert_raises(ValueError):\n spherical_voronoi.SphericalVoronoi(self.points,\n radius=0.98)\n\n def test_incorrect_center_handling(self):\n # an exception should be raised if the center provided\n # cannot possibly match the input generators\n with assert_raises(ValueError):\n spherical_voronoi.SphericalVoronoi(self.points,\n center=[0.1, 0, 0])\n\n def test_single_hemisphere_handling(self):\n # Test solution of Issues #9386, #8859\n\n for points in [self.hemisphere_points, self.hemisphere_points2]:\n sv = SphericalVoronoi(points)\n triangles = sv.points[sv._simplices]\n dots = np.einsum('ij,ij->i', sv.vertices, triangles[:, 0])\n circumradii = np.arccos(np.clip(dots, -1, 1))\n assert np.max(circumradii) > np.pi / 2\n\n def test_rank_deficient(self):\n # rank-1 input cannot be triangulated\n points = np.array([[-1, 0, 0], [1, 0, 0]])\n with pytest.raises(ValueError, match=\"Rank of input points\"):\n spherical_voronoi.SphericalVoronoi(points)\n\n @pytest.mark.parametrize(\"n\", [8, 15, 21])\n @pytest.mark.parametrize(\"radius\", [0.5, 1, 2])\n @pytest.mark.parametrize(\"fraction\", [0, 0.6])\n @pytest.mark.parametrize(\"center\", [(0, 0, 0), (1, 2, 3)])\n def test_circular_input(self, n, radius, fraction, center):\n # generate a random circle\n rng = np.random.RandomState(0)\n U = Rotation.random(random_state=rng).as_matrix()\n h = radius * fraction\n circle_radius = np.sqrt(radius**2 - h**2)\n thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)\n points = np.vstack([circle_radius * np.sin(thetas),\n circle_radius * np.cos(thetas),\n h * np.ones(n)]).T\n points = points @ U\n\n # calculate Spherical Voronoi diagram\n sv = SphericalVoronoi(points + center, radius=radius, center=center)\n\n # each region must have 4 vertices\n region_sizes = np.array([len(region) for region in sv.regions])\n assert (region_sizes == 4).all()\n regions = np.array(sv.regions)\n\n # vertices are those between each pair of input points + north and\n # south poles\n vertices = sv.vertices - center\n assert len(vertices) == n + 2\n\n # verify that north and south poles are orthogonal to circle on which\n # input points lie\n poles = vertices[n:]\n midpoint = np.array([0, 0, h]) @ U\n assert np.abs(np.dot(points - midpoint, poles.T)).max() < 1E-10\n\n # test arc lengths to forward and backward neighbors\n for point, region in zip(points, sv.regions):\n cosine = np.dot(vertices[region] - midpoint, point - midpoint)\n sine = np.linalg.norm(np.cross(vertices[region] - midpoint,\n point - midpoint), axis=1)\n arclengths = circle_radius * np.arctan2(sine, cosine)\n assert_almost_equal(arclengths[[0, 2]], circle_radius * np.pi / n)\n\n # test arc lengths to poles\n for point, region in zip(points, sv.regions):\n cosine = np.dot(vertices[region], point)\n sine = np.linalg.norm(np.cross(vertices[region], point), axis=1)\n arclengths = np.arctan2(sine, cosine)\n angle = np.arcsin(h / radius)\n assert_almost_equal(arclengths[1], np.pi / 2 - angle)\n assert_almost_equal(arclengths[3], np.pi / 2 + angle)\n\n regions = sv.regions.copy()\n sv.sort_vertices_of_regions()\n assert regions == sv.regions\n\n @pytest.mark.parametrize(\"dim\", range(2, 7))\n def test_higher_dimensions(self, dim):\n n = 100\n rng = np.random.RandomState(seed=0)\n points = rng.randn(n, dim)\n points /= np.linalg.norm(points, axis=1)[:, np.newaxis]\n sv = SphericalVoronoi(points)\n assert sv.vertices.shape[1] == dim\n assert len(sv.regions) == n\n\n # verify Euler characteristic\n cell_counts = []\n simplices = np.sort(sv._simplices)\n for i in range(1, dim + 1):\n cells = []\n for indices in itertools.combinations(range(dim), i):\n cells.append(simplices[:, list(indices)])\n cells = np.unique(np.concatenate(cells), axis=0)\n cell_counts.append(len(cells))\n expected_euler = 1 + (-1)**(dim-1)\n actual_euler = sum([(-1)**i * e for i, e in enumerate(cell_counts)])\n assert expected_euler == actual_euler\n\n @pytest.mark.parametrize(\"dim\", range(2, 7))\n def test_cross_polytope_regions(self, dim):\n # The hypercube is the dual of the cross-polytope, so the voronoi\n # vertices of the cross-polytope lie on the points of the hypercube.\n\n # generate points of the cross-polytope\n points = np.concatenate((-np.eye(dim), np.eye(dim)))\n sv = SphericalVoronoi(points)\n assert all([len(e) == 2**(dim - 1) for e in sv.regions])\n\n # generate points of the hypercube\n expected = np.vstack(list(itertools.product([-1, 1], repeat=dim)))\n expected = expected.astype(np.float) / np.sqrt(dim)\n\n # test that Voronoi vertices are correctly placed\n dist = distance.cdist(sv.vertices, expected)\n res = linear_sum_assignment(dist)\n assert dist[res].sum() < TOL\n\n @pytest.mark.parametrize(\"dim\", range(2, 4))\n def test_hypercube_regions(self, dim):\n # The cross-polytope is the dual of the hypercube, so the voronoi\n # vertices of the hypercube lie on the points of the cross-polytope.\n\n # generate points of the hypercube\n points = np.vstack(list(itertools.product([-1, 1], repeat=dim)))\n points = points.astype(np.float) / np.sqrt(dim)\n sv = SphericalVoronoi(points)\n\n # generate points of the cross-polytope\n expected = np.concatenate((-np.eye(dim), np.eye(dim)))\n\n # test that Voronoi vertices are correctly placed\n dist = distance.cdist(sv.vertices, expected)\n res = linear_sum_assignment(dist)\n assert dist[res].sum() < TOL\n\n @pytest.mark.parametrize(\"radius\", [0.5, 1, 2])\n @pytest.mark.parametrize(\"center\", [(0, 0, 0), (1, 2, 3)])\n def test_area_reconstitution(self, radius, center):\n for points in [self.points, self.hemisphere_points,\n self.hemisphere_points2]:\n sv = SphericalVoronoi(radius * points + center,\n radius=radius,\n center=center)\n areas = sv.calculate_areas()\n assert_almost_equal(areas.sum(), 4 * np.pi * radius**2)\n\n @pytest.mark.parametrize(\"radius\", [0.5, 1, 2])\n def test_area_reconstitution_large_input(self, radius):\n np.random.seed(0)\n n = 1000\n points = np.random.uniform(-1, 1, (n, 3))\n points /= np.linalg.norm(points, axis=1).reshape((n, 1))\n\n sv = SphericalVoronoi(radius * points, radius=radius)\n areas = sv.calculate_areas()\n assert_almost_equal(areas.sum(), 4 * np.pi * radius**2)\n\n @pytest.mark.parametrize(\"poly\", [\"tetrahedron\", \"cube\", \"octahedron\",\n \"dodecahedron\", \"icosahedron\"])\n def test_equal_area_regions(self, poly):\n points = _generate_polyhedron(poly)\n sv = SphericalVoronoi(points)\n areas = sv.calculate_areas()\n assert_almost_equal(areas, 4 * np.pi / len(points))\n\n def test_ultra_close_gens(self):\n # use geometric_slerp to produce generators that\n # are close together, to push the limits\n # of the area (angle) calculations\n # also, limit generators to a single hemisphere\n path = geometric_slerp([0, 0, 1],\n [1, 0, 0],\n t=np.linspace(0, 1, 1000))\n sv = SphericalVoronoi(path)\n areas = sv.calculate_areas()\n assert_almost_equal(areas.sum(), 4 * np.pi)\n"
] | [
[
"numpy.dot",
"numpy.sum",
"scipy.special.roots_legendre",
"numpy.arange",
"numpy.asarray",
"numpy.linalg.inv",
"numpy.cumsum",
"numpy.full",
"numpy.real",
"numpy.diff",
"numpy.isscalar",
"scipy.special.gammaln",
"numpy.array",
"numpy.isinf",
"numpy.empty"
],
[
"numpy.dot",
"numpy.split",
"numpy.imag",
"numpy.take",
"numpy.asarray",
"numpy.around",
"numpy.issubdtype",
"numpy.concatenate",
"numpy.lib.stride_tricks.as_strided",
"numpy.mean",
"numpy.any",
"scipy.fft.next_fast_len",
"numpy.iscomplexobj",
"numpy.moveaxis",
"scipy.fft.fftfreq",
"numpy.where",
"scipy.spatial.cKDTree",
"numpy.polyval",
"numpy.hstack",
"numpy.swapaxes",
"scipy.fft.ifft",
"scipy.fft.fft2",
"numpy.pad",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"scipy.linalg.lstsq",
"numpy.finfo",
"numpy.atleast_1d",
"numpy.roots",
"numpy.size",
"numpy.real",
"numpy.isrealobj",
"numpy.ravel",
"numpy.zeros",
"scipy.fft.ifft2",
"numpy.log",
"scipy.special.lambertw",
"scipy.linalg.companion",
"numpy.polymul",
"scipy.fft.irfft",
"numpy.atleast_2d",
"numpy.polydiv",
"scipy.fft._helper._init_nd_shape_and_axes",
"numpy.array",
"numpy.sum",
"numpy.polyadd",
"numpy.correlate",
"numpy.convolve",
"numpy.linalg.solve",
"numpy.abs",
"scipy.fft.rfft",
"numpy.empty",
"numpy.ones",
"numpy.result_type",
"numpy.polysub",
"numpy.prod",
"numpy.angle",
"scipy.fft.fft"
],
[
"numpy.diag",
"scipy.sparse.construct.random",
"numpy.kron",
"numpy.dtype",
"scipy.sparse.construct._compressed_sparse_stack",
"numpy.random.default_rng",
"numpy.random.randint",
"numpy.testing.assert_equal",
"scipy.sparse.construct.eye",
"scipy.sparse.coo_matrix",
"scipy.sparse.sputils.matrix",
"numpy.arange",
"numpy.less",
"scipy.sparse.construct.rand",
"numpy.eye",
"scipy._lib._util.check_random_state",
"numpy.diff",
"numpy.less_equal",
"scipy.sparse.construct.block_diag",
"scipy.sparse.construct.hstack",
"scipy.sparse.construct.spdiags",
"scipy.sparse.construct.bmat",
"scipy.sparse.construct.diags",
"scipy.sparse.csr_matrix",
"scipy.sparse.construct.kron",
"numpy.array",
"scipy.sparse.construct.identity",
"numpy.random.RandomState",
"numpy.random.seed",
"scipy._lib._testutils.check_free_memory",
"scipy.sparse.construct.vstack",
"numpy.random.shuffle",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.prod",
"numpy.empty"
],
[
"numpy.imag",
"numpy.asarray",
"numpy.real",
"numpy.isscalar",
"numpy.array",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.einsum",
"numpy.concatenate",
"numpy.arctan2",
"numpy.max",
"numpy.cross",
"scipy.optimize.linear_sum_assignment",
"numpy.roll",
"numpy.testing.assert_equal",
"scipy.spatial.transform.Rotation.random",
"numpy.arcsin",
"numpy.reshape",
"numpy.testing.suppress_warnings",
"numpy.clip",
"numpy.eye",
"numpy.sin",
"numpy.testing.assert_almost_equal",
"scipy.spatial._spherical_voronoi.SphericalVoronoi",
"numpy.testing.assert_array_almost_equal",
"scipy.spatial.distance.cdist",
"numpy.meshgrid",
"numpy.random.RandomState",
"numpy.array",
"numpy.random.seed",
"numpy.cos",
"numpy.sort",
"numpy.linalg.norm",
"numpy.testing.assert_array_equal",
"numpy.ones",
"numpy.random.uniform",
"scipy.spatial.SphericalVoronoi"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.19"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.7",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
fubiye/edgar-abs-kg | [
"3973059c7b1cdaab8a4e857a43c702ac0be7e725",
"3973059c7b1cdaab8a4e857a43c702ac0be7e725"
] | [
"dataset/corpus_to_txts.py",
"dataset/corpus_test.py"
] | [
"# coding=utf-8\n\nimport pandas as pd\nfrom pathlib import Path\n# extract corpus to seprate files\nOUT_PUT_DIR = r'D:\\data\\edgar\\example\\documents'\ndf = pd.read_csv(r'D:\\data\\edgar\\example\\corpus.csv')\n# def write_to_file(cik,filingId,fileName,content):\ndef write_to_file(cik,filingId,fileName,content):\n base_dir = Path(OUT_PUT_DIR)\n file_name = str(cik) + '+' + str(filingId) + '+' + str(fileName)\n file_name = file_name.replace('.htm', '.txt')\n (base_dir/file_name).write_text(content,encoding='utf-8')\n\ndf.apply(lambda row: write_to_file(row['CIK'],row['FilingId'],row['FileName'],row['Content']), axis=1)\n",
"import pandas as pd\n\ndf = pd.read_csv(r'D:\\data\\edgar\\example\\corpus.csv')\n# df['FilingType'] = df['Content'].apply(lambda content: content[:content.index('\\n')])\n# df['ContentLen'] = df['Content'].apply(lambda content: len(content))\n# print(df[df['FilingType'] == '10-D'])\n# print(df[df['FilingType'] == '10-D']['Content'].iloc[0])\na = df[df['FilingType'] == '10-D']['Content'].iloc[0]\nb = df[df['FilingType'] == '10-D']['Content'].iloc[1]\n# from pprint import pprint\n# from difflib import Differ\n# d = Differ()\n# result = list(d.compare(a, b))\n# pprint(result)\nprint(a)\nprint(\"$****$\")\nprint(b)"
] | [
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Yili-Yang/Litterman_Carbon_Pricing | [
"71eeefc5e2d9b4c1473a9a6ae85c33b019e32d84"
] | [
"ezclimate/optimization.py"
] | [
"from __future__ import division, print_function\nimport numpy as np\nimport multiprocessing\nfrom tools import _pickle_method, _unpickle_method\ntry:\n import copy_reg\nexcept:\n import copyreg as copy_reg\nimport types\n\ncopy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)\n\nclass GeneticAlgorithm(object):\n\t\"\"\"Optimization algorithm for the EZ-Climate model. \n\n\tParameters\n\t----------\n\tpop_amount : int\n\t\tnumber of individuals in the population\n\tnum_feature : int \n\t\tnumber of elements in each individual, i.e. number of nodes in tree-model\n\tnum_generations : int \n\t\tnumber of generations of the populations to be evaluated\n\tbound : float\n\t\tupper bound of mitigation in each node\n\tcx_prob : float\n\t\t probability of mating\n\tmut_prob : float\n\t\tprobability of mutation.\n\tutility : `Utility` object\n\t\tobject of utility class\n\tfixed_values : ndarray, optional\n\t\tnodes to keep fixed\n\tfixed_indicies : ndarray, optional\n\t\tindicies of nodes to keep fixed\n\tprint_progress : bool, optional\n\t\tif the progress of the evolution should be printed\n\n\tAttributes\n\t----------\n\tpop_amount : int\n\t\tnumber of individuals in the population\n\tnum_feature : int \n\t\tnumber of elements in each individual, i.e. number of nodes in tree-model\n\tnum_generations : int \n\t\tnumber of generations of the populations to be evaluated\n\tbound : float\n\t\tupper bound of mitigation in each node\n\tcx_prob : float\n\t\t probability of mating\n\tmut_prob : float\n\t\tprobability of mutation.\n\tu : `Utility` object\n\t\tobject of utility class\n\tfixed_values : ndarray, optional\n\t\tnodes to keep fixed\n\tfixed_indicies : ndarray, optional\n\t\tindicies of nodes to keep fixed\n\tprint_progress : bool, optional\n\t\tif the progress of the evolution should be printed\n\n\t\"\"\"\n\tdef __init__(self, pop_amount, num_generations, cx_prob, mut_prob, bound, num_feature, utility,\n\t\t\t\t fixed_values=None, fixed_indicies=None, print_progress=False):\n\t\tself.num_feature = num_feature\n\t\tself.pop_amount = pop_amount\n\t\tself.num_gen = num_generations\n\t\tself.cx_prob = cx_prob\n\t\tself.mut_prob = mut_prob\n\t\tself.u = utility\n\t\tself.bound = bound\n\t\tself.fixed_values = fixed_values\n\t\tself.fixed_indicies = fixed_indicies\n\t\tself.print_progress = print_progress\n\n\tdef _generate_population(self, size):\n\t\t\"\"\"Return 1D-array of random values in the given bound as the initial population.\"\"\"\n\t\tpop = np.random.random([size, self.num_feature])*self.bound\n\t\tif self.fixed_values is not None:\n\t\t\tfor ind in pop:\n\t\t\t\tind[self.fixed_indicies] = self.fixed_values # override fix values\n\t\treturn pop\n\n\tdef _evaluate(self, indvidual):\n\t\t\"\"\"Returns the utility of given individual.\"\"\"\n\t\treturn self.u.utility(indvidual)\n\n\tdef _select(self, pop, rate):\n\t\t\"\"\"Returns a 1D-array of selected individuals.\n\t \n\t Parameters\n\t ----------\n\t pop : ndarray \n\t \tpopulation given by 2D-array with shape ('pop_amount', 'num_feature')\n\t rate : float \n\t \tthe probability of an individual being selected\n\t\t \n\t Returns\n\t -------\n\t ndarray \n\t \tselected individuals\n\n\t\t\"\"\"\n\t\tindex = np.random.choice(self.pop_amount, int(rate*self.pop_amount), replace=False) \n\t\treturn pop[index,:] #return a list of random instance of pop\n\n\tdef _random_index(self, individuals, size):\n\t\t\"\"\"Generate a random index of individuals of size 'size'.\n\n\t\tParameters\n\t\t----------\n\t\tindividuals : ndarray or list\n\t\t\t2D-array of individuals\n\t\tsize : int \n\t\t\tnumber of indices to generate\n\t\t\n\t\tReturns\n\t\t-------\n\t\tndarray \n\t\t\t1D-array of indices\n\n\t\t\"\"\"\n\t\tinds_size = len(individuals)\n\t\treturn np.random.choice(inds_size, size)\n\n\tdef _selection_tournament(self, pop, k, tournsize, fitness):\n\t \"\"\"Select `k` individuals from the input `individuals` using `k`\n\t tournaments of `tournsize` individuals.\n\t \n\t Parameters\n\t ----------\n\t individuals : ndarray or list\n\t \t2D-array of individuals to select from\n\t k : int\n\t \t number of individuals to select\n\t tournsize : int\n\t \tnumber of individuals participating in each tournament\n\t \tfitness : \n\t \t\tutility in our model\n\t \tReturns\n\t \t-------\n\t \tndarray s\n\t \t\tselected individuals\n\t \n\t \"\"\"\n\t chosen = []\n\t # for k times, randomly choose a tournsize number of index and pick up the one with the highest fitness\n\t for i in xrange(k):\n\t index = self._random_index(pop, tournsize)\n\t aspirants = pop[index] \n\t aspirants_fitness = fitness[index]\n\t chosen_index = np.where(aspirants_fitness == np.max(aspirants_fitness))[0]\n\t if len(chosen_index) != 0:\n\t \tchosen_index = chosen_index[0]\n\t chosen.append(aspirants[chosen_index])\n\t return np.array(chosen)\n\n\tdef _two_point_cross_over(self, pop):\n\t\t\"\"\"Performs a two-point cross-over of the population.\n\t \n\t Parameters\n\t ----------\n\t\tpop : ndarray \n\t\t\tpopulation given by 2D-array with shape ('pop_amount', 'num_feature')\n\n\t\t\"\"\"\n\t\tchild_group1 = pop[::2] # instance with even index\n\t\tchild_group2 = pop[1::2]# instance with odd index\n\t\tfor child1, child2 in zip(child_group1, child_group2):\n\t\t\tif np.random.random() <= self.cx_prob:\n\t\t\t\t#generates 2 random index for the swap, can be done much better.\n\t\t\t\tcxpoint1 = np.random.randint(1, self.num_feature)\n\t\t\t\tcxpoint2 = np.random.randint(1, self.num_feature - 1)\n\t\t\t\tif cxpoint2 >= cxpoint1:\n\t\t\t\t\tcxpoint2 += 1\n\t\t\t\telse: # Swap the two cx points\n\t\t\t\t\tcxpoint1, cxpoint2 = cxpoint2, cxpoint1\n\t\t\t\tchild1[cxpoint1:cxpoint2], child2[cxpoint1:cxpoint2] \\\n\t\t\t\t= child2[cxpoint1:cxpoint2].copy(), child1[cxpoint1:cxpoint2].copy()\n\t\t\t\tif self.fixed_values is not None:\n\t\t\t\t\tchild1[self.fixed_indicies] = self.fixed_values\n\t\t\t\t\tchild2[self.fixed_indicies] = self.fixed_values\n\t\n\tdef _uniform_cross_over(self, pop, ind_prob):\n\t\t\"\"\"Performs a uniform cross-over of the population.\n\t \n\t Parameters\n\t ----------\n\t pop : ndarray\n\t \tpopulation given by 2D-array with shape ('pop_amount', 'num_feature')\n\t ind_prob : float\n\t \tprobability of feature cross-over\n\t \n\t\t\"\"\"\n\t\tchild_group1 = pop[::2]\n\t\tchild_group2 = pop[1::2]\n\t\tfor child1, child2 in zip(child_group1, child_group2):\n\t\t\tsize = min(len(child1), len(child2))\n\t\t\tfor i in range(size):\n\t\t\t\tif np.random.random() < ind_prob:\n\t\t\t\t\tchild1[i], child2[i] = child2[i], child1[i]\n\n\tdef _mutate(self, pop, ind_prob, scale=2.0):\n\t\t\"\"\"Mutates individual's elements. The individual has a probability of `mut_prob` of \n\t\tbeeing selected and every element in this individual has a probability `ind_prob` of beeing \n\t\tmutated. The mutated value is a random number.\n\n\t\tParameters\n\t\t----------\n\t\tpop : ndarray\n\t\t\tpopulation given by 2D-array with shape ('pop_amount', 'num_feature')\n\t ind_prob : float \n\t \tprobability of feature mutation \n\t scale : float \n\t \tscaling constant of the random generated number for mutation\n\n\t\t\"\"\"\n\t\t# it is using a expectation of prob. Can be done much better.\n\t\tpop_tmp = np.copy(pop)\n\t\tmutate_index = np.random.choice(self.pop_amount, int(self.mut_prob * self.pop_amount), replace=False)\n\t\tfor i in mutate_index:\n\t\t\tfeature_index = np.random.choice(self.num_feature, int(ind_prob * self.num_feature), replace=False)\n\t\t\tfor j in feature_index:\n\t\t\t\tif self.fixed_indicies is not None and j in self.fixed_indicies:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tpop[i][j] = max(0.0, pop[i][j]+(np.random.random()-0.5)*scale)\n\t\n\tdef _uniform_mutation(self, pop, ind_prob, scale=2.0):\n\t\t\"\"\"Mutates individual's elements. The individual has a probability of `mut_prob` of\n\t\tbeeing selected and every element in this individual has a probability `ind_prob` of beeing \n\t\tmutated. The mutated value is the current value plus a scaled uniform [-0.5,0.5] random value.\n\n\t\tParameters\n\t\t----------\n\t\tpop : ndarray\n\t\t\tpopulation given by 2D-array with shape ('pop_amount', 'num_feature')\n\t ind_prob : float \n\t \tprobability of feature mutation \n\t scale : float \n\t \tscaling constant of the random generated number for mutation\n\n\t \"\"\" \n\t\tpop_len = len(pop)\n\t\tmutate_index = np.random.choice(pop_len, int(self.mut_prob * pop_len), replace=False)\n\t\tfor i in mutate_index:\n\t\t\tprob = np.random.random(self.num_feature) \n\t\t\tinc = (np.random.random(self.num_feature) - 0.5)*scale\n\t\t\tpop[i] += (prob > (1.0-ind_prob)).astype(int)*inc\n\t\t\tpop[i] = np.maximum(1e-5, pop[i])\n\t\t\tif self.fixed_values is not None:\n\t\t\t\tpop[i][self.fixed_indicies] = self.fixed_values\n\n\tdef _show_evolution(self, fits, pop):\n\t\t\"\"\"Print statistics of the evolution of the population.\"\"\"\n\t\tlength = len(pop)\n\t\tmean = fits.mean()\n\t\tstd = fits.std()\n\t\tmin_val = fits.min()\n\t\tmax_val = fits.max()\n\t\tprint (\" Min {} \\n Max {} \\n Avg {}\".format(min_val, max_val, mean))\n\t\tprint (\" Std {} \\n Population Size {}\".format(std, length))\n\t\tprint (\" Best Individual: \", pop[np.argmax(fits)])\n\n\tdef _survive(self, pop_tmp, fitness_tmp):\n\t\t\"\"\"The 80 percent of the individuals with best fitness survives to\n\t\tthe next generation.\n\n\t\tParameters\n\t\t----------\n\t\tpop_tmp : ndarray\n\t\t\tpopulation\n\t\tfitness_tmp : ndarray\n\t\t\tfitness values of `pop_temp`\n\n\t\tReturns\n\t\t-------\n\t\tndarray \n\t\t\tindividuals that survived\n\n\t\t\"\"\"\n\t\tindex_fits = np.argsort(fitness_tmp)[::-1]\n\t\tfitness = fitness_tmp[index_fits]\n\t\tpop = pop_tmp[index_fits]\n\t\tnum_survive = int(0.8*self.pop_amount) \n\t\tsurvive_pop = np.copy(pop[:num_survive])\n\t\tsurvive_fitness = np.copy(fitness[:num_survive])\n\t\treturn np.copy(survive_pop), np.copy(survive_fitness)\n\n\tdef run(self):\n\t\t\"\"\"Start the evolution process.\n\t\t\n\t\tThe evolution steps are:\n\t\t\t1. Select the individuals to perform cross-over and mutation.\n\t\t\t2. Cross over among the selected candidate.\n\t\t\t3. Mutate result as offspring.\n\t\t\t4. Combine the result of offspring and parent together. And selected the top \n\t\t\t 80 percent of original population amount.\n\t\t\t5. Random Generate 20 percent of original population amount new individuals \n\t\t\t and combine the above new population.\n\n\t\tReturns\n\t\t-------\n\t\ttuple\n\t\t\tfinal population and the fitness for the final population\n\n\t\tNote\n\t\t----\n\t\tUses the :mod:`~multiprocessing` package.\n\n\t\t\"\"\"\n\t\tprint(\"----------------Genetic Evolution Starting----------------\")\n\t\tpop = self._generate_population(self.pop_amount)\n\t\tpool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n\t\tfitness = pool.map(self._evaluate, pop) # how do we know pop[i] belongs to fitness[i]?\n\t\tfitness = np.array([val[0] for val in fitness])\n\t\tu_hist = np.zeros(self.num_gen) # not been used ...\n\t\tfor g in range(0, self.num_gen):\n\t\t\tprint (\"-- Generation {} --\".format(g+1))\n\t\t\tpop_select = self._select(np.copy(pop), rate=1)\n\t\t\t\n\t\t\tself._uniform_cross_over(pop_select, 0.50)\n\t\t\tself._uniform_mutation(pop_select, 0.25, np.exp(-float(g)/self.num_gen)**2)\n\t\t\t#self._mutate(pop_select, 0.05)\n\t\t\t\n\t\t\tfitness_select = pool.map(self._evaluate, pop_select)\n\t\t\tfitness_select = np.array([val[0] for val in fitness_select])\n\t\t\t\n\t\t\tpop_tmp = np.append(pop, pop_select, axis=0)\n\t\t\tfitness_tmp = np.append(fitness, fitness_select, axis=0)\n\n\t\t\tpop_survive, fitness_survive = self._survive(pop_tmp, fitness_tmp)\n\n\t\t\tpop_new = self._generate_population(self.pop_amount - len(pop_survive))\n\t\t\tfitness_new = pool.map(self._evaluate, pop_new)\n\t\t\tfitness_new = np.array([val[0] for val in fitness_new])\n\n\t\t\tpop = np.append(pop_survive, pop_new, axis=0)\n\t\t\tfitness = np.append(fitness_survive, fitness_new, axis=0)\n\t\t\tif self.print_progress:\n\t\t\t\tself._show_evolution(fitness, pop)\n\t\t\tu_hist[g] = fitness[0]\n\n\t\tfitness = pool.map(self._evaluate, pop)\n\t\tfitness = np.array([val[0] for val in fitness])\n\t\treturn pop, fitness\n\n\nclass GradientSearch(object) :\n\t\"\"\"Gradient search optimization algorithm for the EZ-Climate model.\n\n\tParameters\n\t----------\n\tutility : `Utility` object\n\t\tobject of utility class\n\tlearning_rate : float\n\t\tstarting learning rate of gradient descent\n\tvar_nums : int\n\t\tnumber of elements in array to optimize\n\taccuracy : float\n\t\tstop value for the gradient descent\n\tfixed_values : ndarray, optional\n\t\tnodes to keep fixed\n\tfixed_indicies : ndarray, optional\n\t\tindicies of nodes to keep fixed\n\tprint_progress : bool, optional\n\t\tif the progress of the evolution should be printed\n\tscale_alpha : ndarray, optional\n\t\tarray to scale the learning rate\n\n\tAttributes\n\t----------\n\tutility : `Utility` object\n\t\tobject of utility class\n\tlearning_rate : float\n\t\tstarting learning rate of gradient descent\n\tvar_nums : int\n\t\tnumber of elements in array to optimize\n\taccuracy : float\n\t\tstop value for the gradient descent\n\tfixed_values : ndarray, optional\n\t\tnodes to keep fixed\n\tfixed_indicies : ndarray, optional\n\t\tindicies of nodes to keep fixed\n\tprint_progress : bool, optional\n\t\tif the progress of the evolution should be printed\n\tscale_alpha : ndarray, optional\n\t\tarray to scale the learning rate\n\n\t\"\"\"\n\n\tdef __init__(self, utility, var_nums, accuracy=1e-06, iterations=100, fixed_values=None, \n\t\t fixed_indicies=None, print_progress=False, scale_alpha=None):\n\t\tself.u = utility\n\t\tself.var_nums = var_nums\n\t\tself.accuracy = accuracy\n\t\tself.iterations = iterations\n\t\tself.fixed_values = fixed_values\n\t\tself.fixed_indicies = fixed_indicies\n\t\tself.print_progress = print_progress\n\t\tself.scale_alpha = scale_alpha\n\t\tif scale_alpha is None:\n\t\t\tself.scale_alpha = np.exp(np.linspace(0.0, 3.0, var_nums))\n\n\tdef _partial_grad(self, i):\n\t\t\"\"\"Calculate the ith element of the gradient vector.\"\"\"\n\t\tm_copy = self.m.copy()\n\t\tm_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0\n\t\tminus_utility = self.u.utility(m_copy)\n\t\tm_copy[i] += 2*self.delta\n\t\tplus_utility = self.u.utility(m_copy)\n\t\tgrad = (plus_utility-minus_utility) / (2*self.delta) # the math is trival\n\t\treturn grad, i\n\n\tdef numerical_gradient(self, m, delta=1e-08, fixed_indicies=None):\n\t\t\"\"\"Calculate utility gradient numerically.\n\n\t\tParameters\n\t\t----------\n\t\tm : ndarray or list\n\t\t\tarray of mitigation\n\t\tdelta : float, optional\n\t\t\tchange in mitigation \n\t\tfixed_indicies : ndarray or list, optional\n\t\t\tindicies of gradient that should not be calculated\n\n\t\tReturns\n\t\t-------\n\t\tndarray\n\t\t\tgradient\n\n\t\t\"\"\"\n\t\tself.delta = delta\n\t\tself.m = m\n\t\tif fixed_indicies is None:\n\t\t\tfixed_indicies = []\n\t\tgrad = np.zeros(len(m))\n\t\tif not isinstance(m, np.ndarray):\n\t\t\tself.m = np.array(m)\n\t\tpool = multiprocessing.Pool()\n\t\tindicies = np.delete(range(len(m)), fixed_indicies)\n\t\tres = pool.map(self._partial_grad, indicies)\n\t\tfor g, i in res:\n\t\t\tgrad[i] = g\n\t\tpool.close()\n\t\tpool.join()\n\t\tdel self.m\n\t\tdel self.delta\n\t\treturn grad \n\n\tdef _partial_grad_cons(self, i):\n\t\t\"\"\"Calculate the ith element of the gradient vector.\"\"\"\n\t\tm_copy = self.m.copy()\n\t\tm_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0\n\t\tminus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)\n\t\tm_copy[i] += 2*self.delta\n\t\tplus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)\n\t\tgrad = (plus_utility-minus_utility) / (2*self.delta) # the math is trival\n\t\treturn grad, i\n\n\tdef numerical_gradient_cons(self, m, cons,delta=1e-08):\n\t\t\"\"\"Calculate utility gradient numerically.\n\n\t\tParameters\n\t\t----------\n\t\tm : ndarray or list\n\t\t\tarray of mitigation\n\t\tdelta : float, optional\n\t\t\tchange in mitigation \n\t\tfixed_indicies : ndarray or list, optional\n\t\t\tindicies of gradient that should not be calculated\n\n\t\tReturns\n\t\t-------\n\t\tndarray\n\t\t\tgradient\n\n\t\t\"\"\"\n\t\tself.delta = delta\n\t\tself.m = m\n\t\tself.cons = cons\n\t\tgrad = np.zeros(len(m))\n\t\tif not isinstance(m, np.ndarray):\n\t\t\tself.m = np.array(m)\n\t\tpool = multiprocessing.Pool()\n\t\tindicies = np.array(range(len(m)))\n\t\tres = pool.map(self._partial_grad_cons, indicies)\n\t\tfor g, i in res:\n\t\t\tgrad[i] = g\n\t\tpool.close()\n\t\tpool.join()\n\t\tdel self.m\n\t\tdel self.delta\n\t\tdel self.cons\n\t\treturn grad \n\n\tdef _accelerate_scale(self, accelerator, prev_grad, grad):\n\t\tsign_vector = np.sign(prev_grad * grad)\n\t\tscale_vector = np.ones(self.var_nums) * ( 1 + 0.10)\n\t\taccelerator[sign_vector <= 0] = 1\n\t\taccelerator *= scale_vector\n\t\treturn accelerator\n\n\n\tdef gradient_descent(self, initial_point, return_last=False):\n\t\t\"\"\"Gradient descent algorithm. The `initial_point` is updated using the \n\t\tAdam algorithm. Adam uses the history of the gradient to compute individual \n\t\tstep sizes for each element in the mitigation vector. The vector of step \n\t\tsizes are calculated using estimates of the first and second moments of \n\t\tthe gradient.\n\n\t\tParameters\n\t\t----------\n\t\tinitial_point : ndarray\n\t\t\tinitial guess of the mitigation\n\t\treturn_last : bool, optional\n\t\t\tif True the function returns the last point, else the point \n\t\t\t\twith highest utility\n\n\t\tReturns\n\t\t-------\n\t\ttuple\n\t\t\t(best point, best utility)\n\t\t\n\t\t\"\"\"\n\t\tnum_decision_nodes = initial_point.shape[0]\n\t\tx_hist = np.zeros((self.iterations+1, num_decision_nodes))\n\t\tu_hist = np.zeros(self.iterations+1)\n\t\tu_hist[0] = self.u.utility(initial_point)\n\t\tx_hist[0] = initial_point\n\t\t\n\t\tbeta1, beta2 = 0.90, 0.90\n\t\teta = 0.0015 # learning rate\n\t\teps = 1e-3\n\t\tm_t, v_t = 0, 0\n\n\t\tprev_grad = 0.0\n\t\taccelerator = np.ones(self.var_nums)\n\t\t# formula at http://sebastianruder.com/optimizing-gradient-descent/index.html#fnref:15\t\n\t\tfor i in range(self.iterations):\n\t\t\tgrad = self.numerical_gradient(x_hist[i], fixed_indicies=self.fixed_indicies)\n\t\t\tm_t = beta1*m_t + (1-beta1)*grad\n\t\t\tv_t = beta2*v_t + (1-beta2)*np.power(grad, 2) \n\t\t\tm_hat = m_t / (1-beta1**(i+1))\n\t\t\tv_hat = v_t / (1-beta2**(i+1))\n\t\t\tif i != 0:\n\t\t\t\taccelerator = self._accelerate_scale(accelerator, prev_grad, grad)\n\t\t\t\n\t\t\tnew_x = x_hist[i] + ((eta*m_hat)/(np.square(v_hat)+eps)) * accelerator # empirical acceleration, parameter =1.1 is need to be proved later on\n\t\t\tnew_x[new_x < 0] = 0.0\n\n\t\t\tif self.fixed_values is not None:\n\t\t\t\tnew_x[self.fixed_indicies] = self.fixed_values\n\n\t\t\tx_hist[i+1] = new_x\n\t\t\tu_hist[i+1] = self.u.utility(new_x)[0]\n\t\t\tprev_grad = grad.copy()\n\n\t\t\tif self.print_progress:\n\t\t\t\tprint(\"-- Iteration {} -- \\n Current Utility: {}\".format(i+1, u_hist[i+1]))\n\t\t\t\tprint(new_x)\n\n\t\tif return_last:\n\t\t\treturn x_hist[i+1], u_hist[i+1]\n\t\tbest_index = np.argmax(u_hist)\n\t\treturn x_hist[best_index], u_hist[best_index]\n\n\tdef run(self, initial_point_list, topk=4):\n\t\t\"\"\"Initiate the gradient search algorithm. \n\n\t\tParameters\n\t\t----------\n\t\tinitial_point_list : list\n\t\t\tlist of initial points to select from\n\t\ttopk : int, optional\n\t\t\tselect and run gradient descent on the `topk` first points of \n\t\t\t`initial_point_list`\n\n\t\tReturns\n\t\t-------\n\t\ttuple\n\t\t\tbest mitigation point and the utility of the best mitigation point\n\n\t\tRaises\n\t\t------\n\t\tValueError\n\t\t\tIf `topk` is larger than the length of `initial_point_list`.\n\n\t\tNote\n\t\t----\n\t\tUses the :mod:`~multiprocessing` package.\n\n\t\t\"\"\"\n\t\tprint(\"----------------Gradient Search Starting----------------\")\n\t\t\n\t\tif topk > len(initial_point_list):\n\t\t\traise ValueError(\"topk {} > number of initial points {}\".format(topk, len(initial_point_list)))\n\n\t\tcandidate_points = initial_point_list[:topk]\n\t\tmitigations = []\n\t\tutilities = np.zeros(topk)\n\t\tfor cp, count in zip(candidate_points, range(topk)):\n\t\t\tif not isinstance(cp, np.ndarray):\n\t\t\t\tcp = np.array(cp)\n\t\t\tprint(\"Starting process {} of Gradient Descent\".format(count+1))\n\t\t\tm, u = self.gradient_descent(cp)\n\t\t\tmitigations.append(m)\n\t\t\tutilities[count] = u\n\t\tbest_index = np.argmax(utilities)\n\t\treturn mitigations[best_index], utilities[best_index]\n\n\nclass CoordinateDescent(object):\n\t\"\"\"Coordinate descent optimization algorithm for the EZ-Climate model.\n\n\tParameters\n\t----------\n\tutility : `Utility` object\n\t\tobject of utility class\n\tvar_nums : int\n\t\tnumber of elements in array to optimize\n\taccuracy : float\n\t\tstop value for the utility increase \n\titerations : int \n\t\tmaximum number of iterations\n\n\tAttributes\n\t----------\n\tutility : `Utility` object\n\t\tobject of utility class\n\tvar_nums : int\n\t\tnumber of elements in array to optimize\n\taccuracy : float\n\t\tstop value for the utility increase\n\titerations : int \n\t\tmaximum number of iterations\n\n\t\"\"\"\n\tdef __init__(self, utility, var_nums, accuracy=1e-4, iterations=100):\n\t\tself.u = utility\n\t\tself.var_nums = var_nums\n\t\tself.accuracy = accuracy\n\t\tself.iterations = iterations\n\t\n\tdef _min_func(self, x, m, i):\n\t\tm_copy = m.copy()\n\t\tm_copy[i] = x\n\t\treturn -self.u.utility(m_copy)[0]\n\n\tdef _minimize_node(self, node, m):\n\t\tfrom scipy.optimize import fmin\n\t\treturn fmin(self._min_func, x0=m[node], args=(m, node), disp=False)\n\n\tdef run(self, m):\n\t\t\"\"\"Run the coordinate descent iterations.\n\n\t\tParameters\n\t\t----------\n\t\tm : initial point\n\n\t\tReturns\n\t\t-------\n\t\ttuple\n\t\t\tbest mitigation point and the utility of the best mitigation point\n\n\t\tNote\n\t\t----\n\t\tUses the :mod:`~scipy` package.\n\n\t\t\"\"\"\n\t\tnum_decision_nodes = m.shape[0]\n\t\tx_hist = []\n\t\tu_hist = []\n\t\tnodes = range(self.var_nums)\n\t\tx_hist.append(m.copy())\n\t\tu_hist.append(self.u.utility(m)[0])\n\t\tprint(\"----------------Coordinate Descent Starting----------------\")\n\t\tprint(\"Starting Utility: {}\".format(u_hist[0]))\n\t\tfor i in range(self.iterations):\n\t\t\tprint(\"-- Iteration {} --\".format(i+1))\n\t\t\tnode_iteration = np.random.choice(nodes, replace=False, size=len(nodes))\n\t\t\tfor node in node_iteration:\n\t\t\t\tm[node] = max(0.0, self._minimize_node(node, m))\n\t\t\tx_hist.append(m.copy())\n\t\t\tu_hist.append(self.u.utility(m)[0])\n\t\t\tprint(\"Current Utility: {}\".format(u_hist[i+1]))\n\t\t\tif np.abs(u_hist[i+1] - u_hist[i]) < self.accuracy:\n\t\t\t\tbreak\n\t\treturn x_hist[-1], u_hist[-1]"
] | [
[
"numpy.square",
"numpy.random.random",
"numpy.maximum",
"numpy.linspace",
"numpy.random.choice",
"numpy.abs",
"numpy.power",
"numpy.ones",
"numpy.sign",
"numpy.max",
"numpy.copy",
"numpy.argmax",
"scipy.optimize.fmin",
"numpy.append",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
steuxyo/Pandora | [
"57db04f31d6cecba93fa3bc0091f624c8b8ec5f1"
] | [
"tests/test_disparity.py"
] | [
"#!/usr/bin/env python\n# coding: utf8\n#\n# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).\n#\n# This file is part of PANDORA\n#\n# https://github.com/CNES/Pandora\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThis module contains functions to test the disparity module.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\nimport xarray as xr\n\nimport common\nimport pandora\nimport pandora.constants as cst\nimport pandora.disparity as disparity\nimport pandora.matching_cost as matching_cost\nfrom pandora.img_tools import read_img\nfrom pandora.state_machine import PandoraMachine\n\n\nclass TestDisparity(unittest.TestCase):\n \"\"\"\n TestDisparity class allows to test the disparity module\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Method called to prepare the test fixture\n\n \"\"\"\n # Create stereo images\n data = np.array(([[1, 2, 4, 6],\n [2, 4, 1, 6],\n [6, 7, 8, 10]]), dtype=np.float64)\n self.left = xr.Dataset({'im': (['row', 'col'], data)},\n coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})\n self.left.attrs = {'valid_pixels': 0, 'no_data_mask': 1}\n\n data = np.array(([[6, 1, 2, 4],\n [6, 2, 4, 1],\n [10, 6, 7, 8]]), dtype=np.float64)\n self.right = xr.Dataset({'im': (['row', 'col'], data)},\n coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})\n self.right.attrs = {'valid_pixels': 0, 'no_data_mask': 1}\n\n def test_to_disp(self):\n \"\"\"\n Test the to disp method\n\n \"\"\"\n\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)\n\n # Disparity map ground truth, for the images described in the setUp method\n gt_disp = np.array([[1, 1, 1, -3],\n [1, 1, 1, -3],\n [1, 1, 1, -3]])\n\n # Compute the disparity\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n disp = disparity_.to_disp(cv)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)\n\n #\n # Test the to_disp method with negative disparity range\n #\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)\n\n # Disparity map ground truth\n gt_disp = np.array([[0, -1, -2, -3],\n [0, -1, -1, -3],\n [0, -1, -2, -3]])\n\n # Compute the disparity\n disp = disparity_.to_disp(cv)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)\n\n #\n # Test the to_disp method with positive disparity range\n #\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)\n\n # Disparity map ground truth\n gt_disp = np.array([[1, 1, 1, 0],\n [1, 1, 1, 0],\n [1, 1, 1, 0]])\n\n # Compute the disparity\n disp = disparity_.to_disp(cv)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)\n\n # Test disp_indices copy\n # Modify the disparity map\n disp['disparity_map'].data[0, 0] = -95\n # Check if the xarray disp_indices is equal to the ground truth disparity map\n np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)\n\n def test_to_disp_with_offset(self):\n \"\"\"\n Test the to disp method with window_size > 1\n\n \"\"\"\n\n # Create the left cost volume, with SAD measure window size 3, subpixel 1, disp_min -3 disp_max 1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)\n\n # Disparity map ground truth, for the images described in the setUp method\n # Check if gt is full size and border (i.e [offset:-offset] equal to invalid_disparity\n gt_disp = np.array([[-99, -99, -99, -99],\n [-99, 1, 0, -99],\n [-99, -99, -99, -99]])\n\n # Compute the disparity\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': -99})\n disp = disparity_.to_disp(cv)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)\n\n #\n # Test the to_disp method with negative disparity range\n #\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)\n\n # Disparity map ground truth\n gt_disp = np.array([[-99, -99, -99, -99],\n [-99, -99, -1, -99],\n [-99, -99, -99, -99]])\n\n # Compute the disparity\n disp = disparity_.to_disp(cv)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)\n\n #\n # Test the to_disp method with positive disparity range\n #\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)\n\n # Disparity map ground truth\n gt_disp = np.array([[-99, -99, -99, -99],\n [-99, 1, -99, -99],\n [-99, -99, -99, -99]])\n # Compute the disparity\n disp = disparity_.to_disp(cv)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)\n\n # Test disp_indices copy\n # Modify the disparity map\n disp['disparity_map'].data[0, 0] = -95\n # Check if the xarray disp_indices is equal to the ground truth disparity map\n np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)\n\n def test_argmin_split(self):\n \"\"\"\n Test the argmin_split method\n\n \"\"\"\n # Create the left cost volume, with SAD measure, window size 1, subpixel 2, disp_min -3 disp_max 1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,\n 'subpix': 2})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)\n indices_nan = np.isnan(cv['cost_volume'].data)\n cv['cost_volume'].data[indices_nan] = np.inf\n\n # ground truth\n gt_disp = np.array([[1., 1., 1., -3.],\n [1., -0.5, 1., -3.],\n [1., 1., -1.5, -3]], dtype=np.float32)\n\n # Compute the disparity\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n disp = disparity_.argmin_split(cv)\n\n # Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(gt_disp, disp)\n\n def test_argmax_split(self):\n \"\"\"\n Test the argmax_split method\n\n \"\"\"\n # Create the left cost volume, with ZNCC measure, window size 1, subpixel 2, disp_min -3 disp_max 1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'zncc', 'window_size': 1,\n 'subpix': 2})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)\n indices_nan = np.isnan(cv['cost_volume'].data)\n cv['cost_volume'].data[indices_nan] = -np.inf\n\n # ground truth\n gt_disp = np.array([[0., -1., -2., -3.],\n [0., -1., -2., -3.],\n [0., -1., -2., -3.]], dtype=np.float32)\n\n # Compute the disparity\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n disp = disparity_.argmax_split(cv)\n\n # Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(gt_disp, disp)\n\n def test_coefficient_map(self):\n \"\"\"\n Test the method coefficient map\n\n \"\"\"\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)\n\n # Compute the disparity\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n disparity_.to_disp(cv)\n\n # Coefficient map ground truth, for the images described in the setUp method\n gt_coeff = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]])\n # Compute the disparity, and the coefficient map\n coeff = disparity_.coefficient_map(cv)\n\n # Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(coeff.data, gt_coeff)\n\n def test_approximate_right_disparity(self):\n \"\"\"\n Test the approximate_right_disparity method\n\n \"\"\"\n # Create the left cost volume, with SAD measure window size 3 and subpixel 1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)\n\n # Right disparity map ground truth, for the images described in the setUp method\n gt_disp = np.array([[0, 0, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 0]])\n\n # Compute the right disparity map\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n disp_r = disparity_.approximate_right_disparity(cv, self.right)\n\n # Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)\n\n def test_right_disparity_subpixel(self):\n \"\"\"\n Test the right disparity method, with subpixel disparity\n\n \"\"\"\n # Create the left cost volume, with SAD measure window size 3 and subpixel 4\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,\n 'subpix': 4})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)\n\n # Right disparity map ground truth\n gt_disp = np.array([[0, 0, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 0]])\n\n # Compute the right disparity map\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n disp_r = disparity_.approximate_right_disparity(cv, self.right)\n\n # Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)\n\n @staticmethod\n def test_right_disparity_comparaison():\n \"\"\"\n Test the right disparity method by comparing the right disparity map calculated from scratch with the one\n calculated with the fast method\n\n \"\"\"\n # Build the default configuration\n default_cfg = pandora.check_json.default_short_configuration\n\n pandora_left = read_img('tests/pandora/left.png', no_data=np.nan, mask=None)\n pandora_right = read_img('tests/pandora/right.png', no_data=np.nan, mask=None)\n\n fast_cfg = {\n 'pipeline': {\n 'right_disp_map': {\n 'method': 'accurate'\n },\n 'matching_cost': {\n 'matching_cost_method': 'census'\n },\n 'disparity': {\n 'disparity_method': 'wta'\n },\n 'refinement': {\n 'refinement_method': 'vfit'\n },\n 'validation': {\n 'validation_method': 'cross_checking',\n 'right_left_mode': 'approximate'\n }\n }\n }\n\n pandora_machine_fast = PandoraMachine()\n cfg = pandora.check_json.update_conf(default_cfg, fast_cfg)\n left, right_fast = \\\n pandora.run(pandora_machine_fast, pandora_left, pandora_right, -60, 0, cfg['pipeline']) # pylint: disable=unused-variable\n\n acc_cfg = {\n 'pipeline':\n {\n 'right_disp_map': {\n 'method': 'accurate'\n },\n 'matching_cost': {\n 'matching_cost_method': 'census'\n },\n 'disparity': {\n 'disparity_method': 'wta'\n },\n 'refinement': {\n 'refinement_method': 'vfit'\n },\n 'validation': {\n 'validation_method': 'cross_checking',\n 'right_left_mode': 'accurate',\n }\n }\n }\n\n pandora_machine_acc = PandoraMachine()\n cfg = pandora.check_json.update_conf(default_cfg, acc_cfg)\n left, right_acc = pandora.run(pandora_machine_acc, pandora_left, pandora_right, -60, 0, cfg['pipeline'])\n # Check if the calculated disparity map in fast mode is equal to the disparity map in accurate mode\n np.testing.assert_array_equal(right_fast['disparity_map'].data, right_acc['disparity_map'].data)\n\n # Check if the calculated coefficient map in fast mode is equal to the coefficient map in accurate mode\n np.testing.assert_array_equal(right_fast['interpolated_coeff'].data, right_acc['interpolated_coeff'].data)\n\n def test_to_disp_validity_mask(self):\n \"\"\"\n Test the generated validity mask in the to_disp method\n\n # If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image\n # If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)\n \"\"\"\n # ------ Negative disparities ------\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)\n\n # Compute the disparity map and validity mask\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Positive disparities ------\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],\n [0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],\n [0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],\n dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Negative and positive disparities ------\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]],\n dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Variable grids of disparities ------\n # Disp_min and disp_max\n disp_min_grid = np.array([[-3, -2, -3, -1],\n [-2, -2, -1, -3],\n [-1, -2, -2, -3]])\n\n disp_max_grid = np.array([[-1, -1, -2, 0],\n [0, -1, 0, 0],\n [0, 0, -1, -1]])\n\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,\n 'subpix': 1})\n dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)\n matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n def test_to_disp_validity_mask_with_offset(self):\n \"\"\"\n Test the generated validity mask in the to_disp method\n\n # If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image\n # If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)\n \"\"\"\n # ------ Negative disparities ------\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)\n\n # Compute the disparity map and validity mask\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Positive disparities ------\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Negative and positive disparities ------\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Variable grids of disparities ------\n # Disp_min and disp_max\n disp_min_grid = np.array([[-3, -2, -3, -1],\n [-2, -2, -1, -3],\n [-1, -2, -2, -3]])\n\n disp_max_grid = np.array([[-1, -1, -2, 0],\n [0, -1, 0, 0],\n [0, 0, -1, -1]])\n\n # Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,\n 'subpix': 1})\n dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)\n matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, self.left, self.right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)\n\n # Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n def test_approximate_right_disparity_validity_mask(self):\n \"\"\"\n Test the generated validity mask in the right_disparity method\n\n # If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image\n # If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)\n \"\"\"\n # Create the left cost volume, with SAD measure window size 1 and subpixel 1\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,\n 'subpix': 1})\n\n # ------ Negative and positive disparities ------\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)\n\n # Validity mask ground truth ( for disparities -1 0 1 2 )\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]], dtype=np.uint16)\n\n # Compute the right disparity map and the validity mask\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n dataset = disparity_.approximate_right_disparity(cv, self.right)\n\n # Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Negative disparities ------\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)\n\n # Validity mask ground truth ( for disparities -2 -1 )\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n 0, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n 0, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n 0, 0]], dtype=np.uint16)\n\n # Compute the right disparity map and the validity mask\n dataset = disparity_.approximate_right_disparity(cv, self.right)\n\n # Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ------ Positive disparities ------\n cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, -1)\n\n # Validity mask ground truth ( for disparities 1 2 )\n gt_mask = np.array([[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],\n [0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],\n [0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]], dtype=np.uint16)\n\n # Compute the right disparity map and the validity mask\n dataset = disparity_.approximate_right_disparity(cv, self.right)\n\n # Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n @staticmethod\n def test_validity_mask():\n \"\"\"\n # If bit 0 == 1 : Invalid pixel : the disparity interval is missing in the right image\n # If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image\n # If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)\n # If bit 6 == 1 : Invalid pixel : invalidated by the validity mask of the left image given as input\n # If bit 7 == 1 : Invalid pixel : right positions invalidated by the mask of the right image given as\n # input\n\n \"\"\"\n # Masks convention\n # 1 = valid\n # 2 = no_data\n # ---------------------- Test with positive and negative disparity range ----------------------\n data = np.array(([[1, 2, 4, 6],\n [2, 4, 1, 6],\n [6, 7, 8, 10]]), dtype=np.float64)\n left_mask = np.array([[2, 1, 1, 1],\n [1, 2, 4, 1],\n [5, 1, 1, 2]], dtype=np.uint8)\n left = xr.Dataset({'im': (['row', 'col'], data),\n 'msk': (['row', 'col'], left_mask)},\n coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})\n left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}\n\n data = np.array(([[6, 1, 2, 4],\n [6, 2, 4, 1],\n [10, 6, 7, 8]]), dtype=np.float64)\n right_mask = np.array([[1, 1, 3, 5],\n [4, 1, 1, 1],\n [2, 2, 4, 6]], dtype=np.uint8)\n\n right = xr.Dataset({'im': (['row', 'col'], data),\n 'msk': (['row', 'col'], right_mask)},\n coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})\n right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}\n\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)\n\n # Compute the disparity map and validity mask\n disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, left, right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array(\n [[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],\n [cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT]], dtype=np.uint16)\n\n # Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ---------------------- Test with negative disparity range ----------------------\n cv = matching_cost_plugin.compute_cost_volume(left, right, -2, -1)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, left, right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, 0],\n [cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]],\n dtype=np.uint16)\n\n # Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ---------------------- Test with positive disparity range ----------------------\n cv = matching_cost_plugin.compute_cost_volume(left, right, 1, 2)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, left, right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],\n [0, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],\n [cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],\n dtype=np.uint16)\n\n # Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ---------------------- Test with positive and negative disparity range and window size = 3----------------\n data = np.array(([[1, 2, 4, 6, 1],\n [2, 4, 1, 6, 1],\n [6, 7, 8, 10, 1],\n [0, 5, 6, 7, 8]]), dtype=np.float64)\n left_mask = np.array([[2, 1, 1, 1, 1],\n [1, 2, 4, 1, 1],\n [5, 2, 1, 1, 1],\n [1, 1, 1, 1, 1]], dtype=np.uint8)\n left = xr.Dataset({'im': (['row', 'col'], data),\n 'msk': (['row', 'col'], left_mask)},\n coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})\n left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}\n\n data = np.array(([[6, 1, 2, 4, 1],\n [6, 2, 4, 1, 6],\n [10, 6, 7, 8, 1],\n [5, 6, 7, 8, 0]]), dtype=np.float64)\n right_mask = np.array([[1, 1, 1, 2, 1],\n [5, 1, 1, 1, 1],\n [2, 1, 1, 6, 1],\n [0, 1, 1, 1, 1]], dtype=np.uint8)\n\n right = xr.Dataset({'im': (['row', 'col'], data),\n 'msk': (['row', 'col'], right_mask)},\n coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})\n right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}\n\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, left, right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array(\n [[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +\n cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n ],\n dtype=np.uint16)\n\n # Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n # ---------------------- Test with positive and negative disparity range on flag 1 ----------------------\n # Masks convention\n # 1 = valid\n # 0 = no_data\n\n data = np.ones((10, 10), dtype=np.float64)\n left_mask = np.ones((10, 10), dtype=np.uint8)\n\n left = xr.Dataset({'im': (['row', 'col'], data),\n 'msk': (['row', 'col'], left_mask)},\n coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})\n left.attrs = {'valid_pixels': 1, 'no_data_mask': 0}\n\n data = np.ones((10, 10), dtype=np.float64)\n right_mask = np.ones((10, 10), dtype=np.uint8)\n right_mask = np.tril(right_mask, -1.5)\n\n right = xr.Dataset({'im': (['row', 'col'], data),\n 'msk': (['row', 'col'], right_mask)},\n coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})\n right.attrs = {'valid_pixels': 1, 'no_data_mask': 0}\n\n matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,\n 'subpix': 1})\n cv = matching_cost_plugin.compute_cost_volume(left, right, -3, 2)\n\n # Compute the disparity map and validity mask\n dataset = disparity_.to_disp(cv)\n disparity_.validity_mask(dataset, left, right, cv)\n\n # Validity mask ground truth\n gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +\n cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],\n [cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,\n cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]\n ],\n dtype=np.uint8)\n\n # Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)\n np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)\n\n\nif __name__ == '__main__':\n common.setup_logging()\n unittest.main()\n"
] | [
[
"numpy.isnan",
"numpy.arange",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.tril"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mylar-pr/DaaS | [
"e41fa9e9fbda66d7150f00e6db13dd3a76cd3501"
] | [
"first_lambda/service.py"
] | [
"import datetime\nimport json\nimport os\nimport boto3\nimport pandas as pd\nimport io\nimport requests\nimport numpy as np\nfrom io import StringIO\nimport uuid\n\n\n\ns3 = boto3.resource(\n service_name='s3',\n region_name='us-east-2')\nbucket_name = 'secom-daas-bucket' # already created on S3\n\nlink1 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data'\nlink2 = \"https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data\"\nlinks = [link1,link2]\n\npath = \"/tmp/\"\ntimestamp = str(int(datetime.datetime.timestamp(datetime.datetime.now())))\n\ndef timestampify(link,timestamp):\n return link.split(\"/\")[-1].split(\".\")[0]+\"_\"+timestamp+\".data\"\n\ndata_filename = timestampify(link1,timestamp)\nlabel_filename = timestampify(link2,timestamp)\n\n\ndef download_data():\n \n url = link1\n \n r = requests.get(url)\n with open(path + data_filename, 'wb') as f:\n f.write(r.content)\n files = r.content\n f.close()\n print(\"Downloaded Secom data.\")\n \n url = link2\n r = requests.get(url)\n with open(path + label_filename, 'wb') as f:\n f.write(r.content)\n files = r.content\n f.close()\n print(\"Downloaded Secom labels.\")\n #time_stamp = str(int(datetime.datetime.timestamp(datetime.datetime.now())))\n\ndef process_time(secom_labels):\n return [\" \".join(i.decode(\"utf-8\").split()[1:]).split('\"')[1] for i in secom_labels]\n\ndef process_data(secom):\n return np.array([pd.to_numeric(bytearray(i).decode(\"UTF-8\").split(),errors='coerce') for i in secom]).astype(str)\n\ndef process_dataset(secom_path,secom_labels_path):\n\n print(\"processing dataset from {} and {}\".format(secom_path,secom_labels_path))\n #read the downloaded .data files \n with open(secom_path,'rb') as myfile:\n secom= myfile.readlines() \n myfile.close()\n\n with open(secom_labels_path,'rb') as myfile:\n secom_labels= myfile.readlines() \n myfile.close()\n\n columns1= [\"Time\"]\n df1 = pd.DataFrame(data=process_time(secom_labels),\n columns=columns1)\n df1\n\n features_size = len(secom[0].split())\n columns2 = [\"feature \"+ str(i) for i in range(features_size)]\n df2 = pd.DataFrame(data=process_data(secom),\n columns=columns2)\n\n df2.fillna(df2.mean(),inplace=True)\n df3 = pd.concat([df1,df2],axis=1).reset_index()\n\n df3 = df3.rename(columns = {'index':'secomId'})\n #set the secomId as unique ids\n df3['secomId'] = pd.Series([int(uuid.uuid4().int/(10**30)) for i in range(df3.shape[0])])\n\n\n return df3\n\n\n\n\n \n#bucket = 'my_bucket_name' # already created on S3\ndef upload_to_s3(df,bucket_name,dest_path='df.csv'):\n csv_buffer = StringIO()\n df.to_csv(csv_buffer)\n #s3_resource = boto3.resource('s3')\n s3.Object(bucket_name, dest_path).put(Body=csv_buffer.getvalue())\n print(\"Succesfully stored csv file into S3...\")\n\n\n\n\ndef handler(event, context):\n # Your code goes here!\n startTime = datetime.datetime.now()\n \n download_data()\n \n df = process_dataset(path + data_filename,path + label_filename)\n\n upload_to_s3(df, bucket_name, 'processed/processed_'+timestamp+\".csv\" )\n \n\n print(datetime.datetime.now() - startTime)\n\n\n\n\nhandler(1,1)"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
antonyvigouret/Text-Recognition-PyTorch | [
"7576480684612e856602169b3229fe6c8f4b4b9d"
] | [
"train.py"
] | [
"import string\n\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.nn import CTCLoss\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchsummary import summary\nfrom tqdm import tqdm\n\nfrom cnn_seq2seq import ConvSeq2Seq\nfrom cnn_seq2seq import Decoder\nfrom cnn_seq2seq import Encoder\nfrom cnn_seq2seq_att import ConvSeq2SeqAtt\nfrom crnn import CRNN\nfrom data_utils import FakeTextImageGenerator\nfrom utils import labels_to_text\nfrom utils import text_to_labels\n\n\ndef train(path=None):\n dataset = FakeTextImageGenerator(batch_size=16).iter()\n\n criterion = CTCLoss(reduction=\"mean\", zero_infinity=True)\n\n net = CRNN(nclass=100).float()\n optimizer = optim.Adam(net.parameters(), lr=0.001)\n\n if path:\n checkpoint = torch.load(path)\n net.load_state_dict(checkpoint[\"model_state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n epoch = checkpoint[\"epoch\"]\n loss = checkpoint[\"loss\"]\n print(f\"model current epoch: {epoch} with loss: {loss}\")\n\n # loop over the dataset multiple times\n for epoch in range(1, 1000):\n running_loss = 0.0\n loop = tqdm(range(100))\n for i in loop:\n data = next(dataset)\n images = data[\"the_inputs\"]\n labels = data[\"the_labels\"]\n input_length = data[\"input_length\"]\n label_length = data[\"label_length\"]\n targets = data[\"targets\"]\n\n # print(\"target\", targets)\n # print(\"target l\", targets.size())\n # print(\"label_l\", label_length)\n # print(\"label_l l\", label_length.size())\n # print(\"pred_l\", input_length)\n # print(\"pred_l l\", input_length.size())\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(images.float())\n # print(outputs[8, 0, :])\n # print(outputs[:, 0, :])\n # print(outputs.size())\n loss = criterion(outputs, labels, input_length, label_length)\n\n # print(loss.item())\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n loop.set_postfix(epoch=epoch, loss=(running_loss / (i + 1)))\n\n # print(f\"Epoch: {epoch} | Loss: {running_loss/100}\")\n torch.save(\n {\n \"epoch\": epoch,\n \"model_state_dict\": net.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": running_loss,\n },\n \"checkpoint5.pt\",\n )\n\n print(\"Finished Training\")\n\n\ndef train_cs2s(path=None):\n alphabet = string.printable\n nclass = len(alphabet)\n\n writer = SummaryWriter()\n dataset = FakeTextImageGenerator(batch_size=4).iter()\n\n criterion = CrossEntropyLoss(ignore_index=97)\n\n encoder = Encoder(512, 512, 1, 0)\n decoder = Decoder(512, 100, 100, 1, 0)\n net = ConvSeq2Seq(encoder, decoder, nclass=nclass).float()\n\n optimizer = optim.Adam(net.parameters(), lr=0.003)\n\n if path:\n net2 = CRNN(nclass=100).float()\n checkpoint = torch.load(path)\n net2.load_state_dict(checkpoint[\"model_state_dict\"])\n # optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n # epoch = checkpoint[\"epoch\"]\n # loss = checkpoint[\"loss\"]\n # print(f\"model current epoch: {epoch} with loss: {loss}\")\n print(net2)\n\n net.conv1.load_state_dict(net2.conv1.state_dict())\n net.conv2.load_state_dict(net2.conv2.state_dict())\n net.conv3.load_state_dict(net2.conv3.state_dict())\n net.conv4.load_state_dict(net2.conv4.state_dict())\n net.conv5.load_state_dict(net2.conv5.state_dict())\n net.conv6.load_state_dict(net2.conv6.state_dict())\n net.conv7.load_state_dict(net2.conv7.state_dict())\n net.train()\n\n # loop over the dataset multiple times\n step = 0\n for epoch in range(1, 1000):\n running_loss = 0.0\n loop = tqdm(range(100))\n for i in loop:\n data = next(dataset)\n images = data[\"the_inputs\"]\n labels = data[\"the_labels\"]\n input_length = data[\"input_length\"]\n label_length = data[\"label_length\"]\n targets = data[\"targets\"]\n\n # print(\"target\", targets)\n # print(\"target l\", targets.size())\n # print(\"label_l\", label_length)\n # print(\"label_l l\", label_length.size())\n # print(\"pred_l\", input_length)\n # print(\"pred_l l\", input_length.size())\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(images.float(), labels, 0.5)\n # permute batchsize and seq_len dim to match labels when using .view(-1, output.size()[2])\n outputs = outputs.permute(1, 0, 2)\n # print(outputs[8, 0, :])\n # print(outputs[:, 0, :])\n # print(outputs.size())\n # print(labels.size())\n output_argmax = outputs.argmax(2)\n # print(output_argmax.view(-1))\n # print(labels.reshape(-1))\n loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))\n\n writer.add_scalar(\"loss\", loss.item(), step)\n step += 1\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(net.parameters(), 1)\n optimizer.step()\n\n running_loss += loss.item()\n\n loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))\n\n # print(f\"Epoch: {epoch} | Loss: {running_loss/100}\")\n torch.save(\n {\n \"epoch\": epoch,\n \"model_state_dict\": net.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": running_loss,\n },\n \"cs2s_good.pt\",\n )\n torch.save(net, \"model_test_pretrained.pt\")\n\n print(\"Finished Training\")\n\n\ndef train_cs2satt(path=None):\n writer = SummaryWriter()\n dataset = FakeTextImageGenerator(batch_size=8).iter()\n\n criterion = CrossEntropyLoss(ignore_index=97)\n\n net = ConvSeq2SeqAtt(nclass=100).float()\n\n optimizer = optim.Adam(net.parameters(), lr=3e-4)\n if path:\n checkpoint = torch.load(path)\n net.load_state_dict(checkpoint[\"model_state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n epoch = checkpoint[\"epoch\"]\n loss = checkpoint[\"loss\"]\n print(f\"model current epoch: {epoch} with loss: {loss}\")\n net.train()\n\n # loop over the dataset multiple times\n step = 0\n for epoch in range(1, 1000):\n running_loss = 0.0\n loop = tqdm(range(100))\n for i in loop:\n data = next(dataset)\n images = data[\"the_inputs\"]\n labels = data[\"the_labels\"]\n input_length = data[\"input_length\"]\n label_length = data[\"label_length\"]\n targets = data[\"targets\"]\n\n # print(\"target\", targets)\n # print(\"target l\", targets.size())\n # print(\"label_l\", label_length)\n # print(\"label_l l\", label_length.size())\n # print(\"pred_l\", input_length)\n # print(\"pred_l l\", input_length.size())\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(images.float(), labels, 0.5)\n # permute batchsize and seq_len dim to match labels when using .view(-1, output.size()[2])\n outputs = outputs.permute(1, 0, 2)\n # print(outputs[8, 0, :])\n # print(outputs[:, 0, :])\n # print(outputs.size())\n # print(labels.size())\n output_argmax = outputs.argmax(2)\n # print(output_argmax.view(-1))\n # print(labels.reshape(-1))\n loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))\n\n # print(loss.item())\n writer.add_scalar(\"loss\", loss.item(), step)\n step += 1\n loss.backward()\n torch.nn.utils.clip_grad_norm_(net.parameters(), 1)\n optimizer.step()\n\n running_loss += loss.item()\n\n loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))\n\n print(f\"Epoch: {epoch} | Loss: {running_loss/100}\")\n torch.save(\n {\n \"epoch\": epoch,\n \"model_state_dict\": net.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": running_loss,\n },\n \"cs2satt_good.pt\",\n )\n # torch.save(net, \"model_test_pretrained.pt\")\n\n print(\"Finished Training\")\n\n\nif __name__ == \"__main__\":\n train_cs2satt(\"cs2satt_good.pt\")\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.nn.CTCLoss",
"torch.utils.tensorboard.SummaryWriter",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aspuru-guzik-group/routescore | [
"3adedbc1d6193751bd1cd0af33395572b35a8e43",
"3adedbc1d6193751bd1cd0af33395572b35a8e43"
] | [
"_Figure_S18.py",
"_Figure_S11.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Custom style\nplt.style.use('scientific')\n\n# absolute tolerances for chimera\nabsolutes = np.array([0.67, 1080000, 0.2, 0.15848931924611134])\n\n# load in gryffin runs with Naive score as objective\ndf_naive = pd.read_pickle('Optimization/runs/gryffin_runs_naive.pkl')\n\n# make the plot\n\nfig, axes = plt.subplots(nrows=4, ncols=1, sharex=True, figsize=(8, 10))\n\nsns.lineplot(x='eval', y='peak_score', data=df_naive, ax=axes[0], label='Naive Score Included')\naxes[0].axhline(absolutes[0], ls='--', linewidth=2, c='k', alpha=0.6)\naxes[0].fill_between(df_naive['eval'], absolutes[0], np.amin(df_naive['peak_score']), color='#8C9196', alpha=0.25)\naxes[0].set_ylim(0.25, 0.9)\naxes[0].set_ylabel('Peak score ', fontsize=15)\naxes[0].tick_params(labelsize=13)\naxes[0].legend(loc='lower right', ncol=1, fontsize=15)\n\nsns.lineplot(x='eval', y='naive_score', data=df_naive, ax=axes[1])\naxes[1].set_yscale('log')\naxes[1].axhline(absolutes[1], ls='--', linewidth=2, c='k', alpha=0.6)\naxes[1].fill_between(df_naive['eval'], absolutes[1], np.amax(df_naive['naive_score']), color='#8C9196', alpha=0.25)\naxes[1].set_ylim(np.amin(df_naive['naive_score']), np.amax(df_naive['naive_score']))\naxes[1].set_ylabel('Naive score \\n$( \\$ \\cdot (mol \\ target)^{-1}$)', fontsize=15)\naxes[1].tick_params(labelsize=13)\n\nsns.lineplot(x='eval', y='spectral_overlap', data=df_naive, ax=axes[2])\naxes[2].axhline(absolutes[2], ls='--', linewidth=2, c='k', alpha=0.6)\naxes[2].fill_between(df_naive['eval'], absolutes[2], np.amax(df_naive['spectral_overlap']), color='#8C9196', alpha=0.25)\naxes[2].set_ylim(0., 0.3)\naxes[2].set_ylabel('Spectral \\noverlap', fontsize=15)\naxes[2].tick_params(labelsize=13)\n\nsns.lineplot(x='eval', y='fluo_rate', data=df_naive, ax=axes[3])\naxes[3].axhline(absolutes[3], ls='--', linewidth=2, c='k', alpha=0.6)\naxes[3].fill_between(df_naive['eval'], absolutes[3], np.amin(df_naive['fluo_rate']), color='#8C9196', alpha=0.25)\naxes[3].set_ylim(0., 0.6)\naxes[3].set_ylabel('Fluorescence \\nrate (ns$^{-1}$)', fontsize=15)\naxes[3].tick_params(labelsize=13)\naxes[3].set_xlabel('Number of evaluations', fontsize=15)\n\nfor ax in axes:\n ax.set_xlim(0, 500)\n\nplt.tight_layout()\nplt.savefig('Figure_S18.png', dpi=300)\nplt.show()\n",
"#!/usr/bin/env python\n\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom scipy.stats import pearsonr, spearmanr\n\n# distributions of the individual scores\n\n# load the full properties\ndf_full = pickle.load(open('Properties/full_props.pkl', 'rb'))\n\n\n# pairwise correlations of the naive score and SA, SC, SYBA, and RAscore-NN\nfig, axes = plt.subplots(1, 4, figsize=(12, 3.5), sharey=True)\naxes = axes.flatten()\n\nscores = ['sa_score', 'sc_score', 'syba_score', 'sr_nn_score']\nnames = ['SAscore', 'SCscore', 'SYBAscore', 'RAscore-NN']\n\nfor ix, (ax, score, name) in enumerate(zip(axes, scores, names)):\n\n sns.scatterplot(\n df_full[score],\n df_full['naive_score'],\n ax=ax,\n )\n ax.set_xlabel(name, fontsize=12)\n\n spear = spearmanr(df_full[score], df_full['naive_score'])\n pear = pearsonr(df_full[score], df_full['naive_score'])\n axes[ix].set_title(f'pearson = {round(pear[0], 2)}\\nspearman = {round(spear[0], 2)}', fontsize=12)\n\naxes[0].set_ylabel('Naive score $( \\$ \\cdot mol \\ target^{-1})$', fontsize=12)\n\nplt.tight_layout()\nplt.savefig('Figure_S11.png', dpi=300)\nplt.show()\n"
] | [
[
"numpy.amax",
"matplotlib.pyplot.tight_layout",
"pandas.read_pickle",
"numpy.amin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.tight_layout",
"scipy.stats.pearsonr",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"scipy.stats.spearmanr",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jingshuw/sctransfer | [
"380c3f26934c26cd177e63aacf4f3bdcf9a29c47",
"380c3f26934c26cd177e63aacf4f3bdcf9a29c47"
] | [
"sctransfer/network.py",
"build/lib/sctransfer/layers.py"
] | [
"## code simplified from the dca package\n\nimport os\nimport numpy as np\nimport scanpy.api as sc\n\nimport keras\nfrom keras.layers import Input, Dense, Dropout, Activation, BatchNormalization\nfrom keras.models import Model\nfrom keras.objectives import mean_squared_error\nfrom keras import backend as K\n\nimport tensorflow as tf\n\nfrom .loss import NB\nfrom .layers import ConstantDispersionLayer, ColWiseMultLayer\n\n\nMeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)\nDispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)\n\n\nclass Autoencoder():\n def __init__(self,\n input_size,\n output_size=None,\n hidden_size=(64, 32, 64),\n hidden_dropout=0.,\n input_dropout=0.,\n batchnorm=True,\n activation='relu',\n init='glorot_uniform',\n nonmissing_indicator = None,\n debug = False):\n\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.hidden_dropout = hidden_dropout\n self.input_dropout = input_dropout\n self.batchnorm = batchnorm\n self.activation = activation\n self.init = init\n self.loss = None\n self.extra_models = {}\n self.model = None\n self.input_layer = None\n self.sf_layer = None\n self.debug = debug\n self.nonmissing_indicator = nonmissing_indicator\n\n if self.output_size is None:\n self.output_size = input_size\n\n\n if isinstance(self.hidden_dropout, list):\n assert len(self.hidden_dropout) == len(self.hidden_size)\n else:\n self.hidden_dropout = [self.hidden_dropout]*len(self.hidden_size)\n\n def build(self):\n\n self.input_layer = Input(shape=(self.input_size,), name='count')\n self.sf_layer = Input(shape=(1,), name='size_factors')\n last_hidden = self.input_layer\n\n if self.input_dropout > 0.0:\n last_hidden = Dropout(self.input_dropout, name='input_dropout')(last_hidden)\n\n for i, (hid_size, hid_drop) in enumerate(zip(self.hidden_size, self.hidden_dropout)):\n center_idx = int(np.floor(len(self.hidden_size) / 2.0))\n if i == center_idx:\n layer_name = 'center'\n stage = 'center' # let downstream know where we are\n elif i < center_idx:\n layer_name = 'enc%s' % i\n stage = 'encoder'\n else:\n layer_name = 'dec%s' % (i-center_idx)\n stage = 'decoder'\n\n\n last_hidden = Dense(hid_size, activation=None, kernel_initializer=self.init,\n name=layer_name)(last_hidden)\n \n if self.batchnorm:\n last_hidden = BatchNormalization(center=True, scale=False)(last_hidden)\n ### TODO: check why scale = False\n\n last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)\n if hid_drop > 0.0:\n last_hidden = Dropout(hid_drop, name='%s_drop'%layer_name)(last_hidden)\n\n self.decoder_output = last_hidden\n self.build_output()\n\n def build_output(self):\n\n ## For Gaussian loss\n self.loss = mean_squared_error\n mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,\n name='mean')(self.decoder_output)\n output = ColWiseMultLayer(name='output')([mean, self.sf_layer])\n\n # keep unscaled output as an extra model\n self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)\n self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)\n\n\n ######## ADD WEIGHTS ###########\n\n\n def load_weights(self, filename):\n self.model.load_weights(filename)\n\n\n def predict(self, adata, colnames=None, dimreduce=True, reconstruct=True, error=True):\n\n res = {}\n colnames = adata.var_names.values if colnames is None else colnames\n rownames = adata.obs_names.values\n\n # print('Calculating reconstructions...')\n\n res['mean_norm'] = self.extra_models['mean_norm'].predict(adata.X)\n \n return res\n\n\nclass NBConstantDispAutoencoder(Autoencoder):\n\n def build_output(self):\n mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,\n name='mean')(self.decoder_output)\n\n # Plug in dispersion parameters via fake dispersion layer\n disp = ConstantDispersionLayer(name='dispersion')\n mean = disp(mean)\n\n output = ColWiseMultLayer(name='output')([mean, self.sf_layer])\n\n nb = NB(disp.theta_exp, nonmissing_indicator = self.nonmissing_indicator)\n self.extra_models['dispersion'] = lambda :K.function([], [nb.theta])([])[0].squeeze()\n self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)\n self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)\n\n\n def predict(self, adata, colnames=None, **kwargs):\n colnames = adata.var_names.values if colnames is None else colnames\n rownames = adata.obs_names.values\n res = super().predict(adata, colnames=colnames, **kwargs)\n\n res['dispersion'] = self.extra_models['dispersion']()\n \n return res\n\n",
"from keras.engine.topology import Layer\nfrom keras.layers import Lambda, Dense\nfrom keras.engine.base_layer import InputSpec\nfrom keras import backend as K\nimport tensorflow as tf\n\n\nclass ConstantDispersionLayer(Layer):\n '''\n An identity layer which allows us to inject extra parameters\n such as dispersion to Keras models\n '''\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def build(self, input_shape):\n self.theta = self.add_weight(shape=(1, input_shape[1]),\n initializer='zeros',\n trainable=True,\n name='theta')\n self.theta_exp = tf.clip_by_value(K.exp(self.theta), 1e-3, 1e4)\n super().build(input_shape)\n\n def call(self, x):\n return tf.identity(x)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass SliceLayer(Layer):\n def __init__(self, index, **kwargs):\n self.index = index\n super().__init__(**kwargs)\n\n def build(self, input_shape):\n if not isinstance(input_shape, list):\n raise ValueError('Input should be a list')\n\n super().build(input_shape)\n\n def call(self, x):\n assert isinstance(x, list), 'SliceLayer input is not a list'\n return x[self.index]\n\n def compute_output_shape(self, input_shape):\n return input_shape[self.index]\n\n\n\nclass ElementwiseDense(Dense):\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n assert (input_dim == self.units) or (self.units == 1), \\\n \"Input and output dims are not compatible\"\n\n # shape=(input_dim, ) makes this elementwise bcs of broadcasting\n self.kernel = self.add_weight(shape=(self.units,),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs): # use * instead of tf.matmul, we need broadcasting here\n output = inputs * self.kernel\n if self.use_bias:\n output = output + self.bias\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n\n\n\nnan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x))\nColWiseMultLayer = lambda name: Lambda(lambda l: l[0]*(tf.matmul(tf.reshape(l[1], (-1,1)),\n tf.ones((1, l[0].get_shape()[1]),\n dtype=l[1].dtype))),\n name=name)\n"
] | [
[
"tensorflow.nn.softplus"
],
[
"tensorflow.is_nan",
"tensorflow.zeros_like",
"tensorflow.reshape",
"tensorflow.identity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
glos/ioos_qc | [
"17e69ad582275be7ad0f5a2af40c11d810b344e8"
] | [
"ioos_qc/results.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\nimport logging\nfrom typing import NamedTuple, List\nfrom dataclasses import dataclass\nfrom collections import OrderedDict as odict, defaultdict\n\nimport numpy as np\nfrom ioos_qc.qartod import QartodFlags\n\nL = logging.getLogger(__name__) # noqa\n\n\nclass CallResult(NamedTuple):\n package: str\n test: str\n function: callable\n results: np.ndarray\n\n def __repr__(self):\n return f'<CallResult package={self.package} test={self.test}>'\n\n\nclass ContextResult(NamedTuple):\n stream_id: str\n results: List[CallResult]\n subset_indexes: np.ndarray\n data: np.ndarray = None\n tinp: np.ndarray = None\n zinp: np.ndarray = None\n lat: np.ndarray = None\n lon: np.ndarray = None\n\n def __repr__(self):\n return f'<ContextResult stream_id={self.stream_id}>'\n\n\n@dataclass\nclass CollectedResult:\n stream_id: str\n package: str\n test: str\n function: callable\n results: np.ma.core.MaskedArray = None\n data: np.ndarray = None\n tinp: np.ndarray = None\n zinp: np.ndarray = None\n lat: np.ndarray = None\n lon: np.ndarray = None\n\n def __repr__(self):\n return f'<CollectedResult stream_id={self.stream_id} package={self.package} test={self.test}>'\n\n def function_name(self) -> str:\n return self.function.__name__\n\n @property\n def hash_key(self) -> str:\n return f'{self.stream_id}:{self.package}.{self.test}'\n\n\ndef collect_results(results, how='list'):\n if how in ['list', list]:\n return collect_results_list(results)\n elif how in ['dict', dict]:\n return collect_results_dict(results)\n\n\ndef collect_results_list(results):\n \"\"\" Turns a list of ContextResult objects into an iterator of CollectedResult objects\n by combining the subset_index information in each ContextResult together into\n a single array of results.\n \"\"\"\n collected = odict()\n\n # ContextResults\n for r in results:\n\n cr = None\n # Shortcut for CallResult objects when someone uses QcConfig.run() directly\n # and doesn't go through a Stream object\n if isinstance(r, CallResult):\n cr = CollectedResult(\n stream_id=None,\n package=r.package,\n test=r.test,\n function=r.function,\n results=r.results,\n )\n collected[cr.hash_key] = cr\n continue\n\n # CallResults\n for tr in r.results:\n\n cr = CollectedResult(\n stream_id=r.stream_id,\n package=tr.package,\n test=tr.test,\n function=tr.function\n )\n\n if cr.hash_key not in collected:\n # Set the initial values\n cr.results = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=tr.results.dtype)\n cr.data = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.data.dtype)\n cr.tinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.tinp.dtype)\n cr.zinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.zinp.dtype)\n cr.lat = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lat.dtype)\n cr.lon = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lon.dtype)\n collected[cr.hash_key] = cr\n\n collected[cr.hash_key].results[r.subset_indexes] = tr.results\n\n if cr is not None:\n if r.subset_indexes.all():\n collected[cr.hash_key].data = r.data\n collected[cr.hash_key].tinp = r.tinp\n collected[cr.hash_key].zinp = r.zinp\n collected[cr.hash_key].lat = r.lat\n collected[cr.hash_key].lon = r.lon\n else:\n collected[cr.hash_key].data[r.subset_indexes] = r.data\n collected[cr.hash_key].tinp[r.subset_indexes] = r.tinp\n collected[cr.hash_key].zinp[r.subset_indexes] = r.zinp\n collected[cr.hash_key].lat[r.subset_indexes] = r.lat\n collected[cr.hash_key].lon[r.subset_indexes] = r.lon\n\n return list(collected.values())\n\n\ndef collect_results_dict(results):\n \"\"\" Turns a list of ContextResult objects into a dictionary of test results\n by combining the subset_index information in each ContextResult together into\n a single array of results. This is mostly here for historical purposes. Users\n should migrate to using the Result objects directly.\n \"\"\"\n # Magic for nested key generation\n # https://stackoverflow.com/a/27809959\n collected = defaultdict(lambda: defaultdict(odict))\n\n # ContextResults\n for r in results:\n\n # Shortcut for CallResult objects when someone uses QcConfig.run() directly\n # and doesn't go through a Stream object\n if isinstance(r, CallResult):\n collected[r.package][r.test] = r.results\n continue\n\n flag_arr = np.ma.empty_like(r.subset_indexes, dtype='uint8')\n flag_arr.fill(QartodFlags.UNKNOWN)\n\n # iterate over the CallResults\n for tr in r.results:\n testpackage = tr.package\n testname = tr.test\n testresults = tr.results\n\n if testname not in collected[r.stream_id][testpackage]:\n collected[r.stream_id][testpackage][testname] = np.copy(flag_arr)\n collected[r.stream_id][testpackage][testname][r.subset_indexes] = testresults\n\n return collected\n"
] | [
[
"numpy.copy",
"numpy.ma.masked_all",
"numpy.ma.empty_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
semio/zipline | [
"f13e9fd1253a500771bf10217b1d37031272c03c"
] | [
"tests/test_assets.py"
] | [
"#\n# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTests for the zipline.assets package\n\"\"\"\n\nimport sys\nfrom unittest import TestCase\n\nfrom datetime import datetime, timedelta\nimport pickle\nimport uuid\nimport warnings\n\nimport pandas as pd\nfrom pandas.tseries.tools import normalize_date\nfrom pandas.util.testing import assert_frame_equal\n\nfrom nose_parameterized import parameterized\nfrom numpy import full\n\nfrom zipline.assets import Asset, Equity, Future, AssetFinder\nfrom zipline.assets.futures import FutureChain\nfrom zipline.errors import (\n SymbolNotFound,\n MultipleSymbolsFound,\n SidAssignmentError,\n RootSymbolNotFound,\n)\nfrom zipline.finance.trading import with_environment\nfrom zipline.utils.test_utils import (\n all_subindices,\n make_rotating_asset_info,\n)\n\n\ndef build_lookup_generic_cases():\n \"\"\"\n Generate test cases for AssetFinder test_lookup_generic.\n \"\"\"\n\n unique_start = pd.Timestamp('2013-01-01', tz='UTC')\n unique_end = pd.Timestamp('2014-01-01', tz='UTC')\n\n dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')\n dupe_0_end = dupe_0_start + timedelta(days=1)\n\n dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')\n dupe_1_end = dupe_1_start + timedelta(days=1)\n\n frame = pd.DataFrame.from_records(\n [\n {\n 'sid': 0,\n 'file_name': 'duplicated',\n 'company_name': 'duplicated_0',\n 'start_date_nano': dupe_0_start.value,\n 'end_date_nano': dupe_0_end.value,\n 'exchange': '',\n },\n {\n 'sid': 1,\n 'file_name': 'duplicated',\n 'company_name': 'duplicated_1',\n 'start_date_nano': dupe_1_start.value,\n 'end_date_nano': dupe_1_end.value,\n 'exchange': '',\n },\n {\n 'sid': 2,\n 'file_name': 'unique',\n 'company_name': 'unique',\n 'start_date_nano': unique_start.value,\n 'end_date_nano': unique_end.value,\n 'exchange': '',\n },\n ],\n )\n finder = AssetFinder(metadata=frame)\n dupe_0, dupe_1, unique = assets = [\n finder.retrieve_asset(i)\n for i in range(3)\n ]\n\n dupe_0_start = dupe_0.start_date\n dupe_1_start = dupe_1.start_date\n cases = [\n ##\n # Scalars\n\n # Asset object\n (finder, assets[0], None, assets[0]),\n (finder, assets[1], None, assets[1]),\n (finder, assets[2], None, assets[2]),\n # int\n (finder, 0, None, assets[0]),\n (finder, 1, None, assets[1]),\n (finder, 2, None, assets[2]),\n # Duplicated symbol with resolution date\n (finder, 'duplicated', dupe_0_start, dupe_0),\n (finder, 'duplicated', dupe_1_start, dupe_1),\n # Unique symbol, with or without resolution date.\n (finder, 'unique', unique_start, unique),\n (finder, 'unique', None, unique),\n\n ##\n # Iterables\n\n # Iterables of Asset objects.\n (finder, assets, None, assets),\n (finder, iter(assets), None, assets),\n # Iterables of ints\n (finder, (0, 1), None, assets[:-1]),\n (finder, iter((0, 1)), None, assets[:-1]),\n # Iterables of symbols.\n (finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),\n (finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),\n # Mixed types\n (finder,\n ('duplicated', 2, 'unique', 1, dupe_1),\n dupe_0_start,\n [dupe_0, assets[2], unique, assets[1], dupe_1]),\n ]\n return cases\n\n\nclass AssetTestCase(TestCase):\n\n def test_asset_object(self):\n self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')\n self.assertEquals(Asset(5061), 5061)\n self.assertEquals(5061, Asset(5061))\n\n self.assertEquals(Asset(5061), Asset(5061))\n self.assertEquals(int(Asset(5061)), 5061)\n\n self.assertEquals(str(Asset(5061)), 'Asset(5061)')\n\n def test_asset_is_pickleable(self):\n\n # Very wow\n s = Asset(\n 1337,\n symbol=\"DOGE\",\n asset_name=\"DOGECOIN\",\n start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),\n end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),\n first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),\n exchange='THE MOON',\n )\n s_unpickled = pickle.loads(pickle.dumps(s))\n\n attrs_to_check = ['end_date',\n 'exchange',\n 'first_traded',\n 'end_date',\n 'asset_name',\n 'start_date',\n 'sid',\n 'start_date',\n 'symbol']\n\n for attr in attrs_to_check:\n self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))\n\n def test_asset_comparisons(self):\n\n s_23 = Asset(23)\n s_24 = Asset(24)\n\n self.assertEqual(s_23, s_23)\n self.assertEqual(s_23, 23)\n self.assertEqual(23, s_23)\n\n self.assertNotEqual(s_23, s_24)\n self.assertNotEqual(s_23, 24)\n self.assertNotEqual(s_23, \"23\")\n self.assertNotEqual(s_23, 23.5)\n self.assertNotEqual(s_23, [])\n self.assertNotEqual(s_23, None)\n\n self.assertLess(s_23, s_24)\n self.assertLess(s_23, 24)\n self.assertGreater(24, s_23)\n self.assertGreater(s_24, s_23)\n\n def test_lt(self):\n self.assertTrue(Asset(3) < Asset(4))\n self.assertFalse(Asset(4) < Asset(4))\n self.assertFalse(Asset(5) < Asset(4))\n\n def test_le(self):\n self.assertTrue(Asset(3) <= Asset(4))\n self.assertTrue(Asset(4) <= Asset(4))\n self.assertFalse(Asset(5) <= Asset(4))\n\n def test_eq(self):\n self.assertFalse(Asset(3) == Asset(4))\n self.assertTrue(Asset(4) == Asset(4))\n self.assertFalse(Asset(5) == Asset(4))\n\n def test_ge(self):\n self.assertFalse(Asset(3) >= Asset(4))\n self.assertTrue(Asset(4) >= Asset(4))\n self.assertTrue(Asset(5) >= Asset(4))\n\n def test_gt(self):\n self.assertFalse(Asset(3) > Asset(4))\n self.assertFalse(Asset(4) > Asset(4))\n self.assertTrue(Asset(5) > Asset(4))\n\n def test_type_mismatch(self):\n if sys.version_info.major < 3:\n self.assertIsNotNone(Asset(3) < 'a')\n self.assertIsNotNone('a' < Asset(3))\n else:\n with self.assertRaises(TypeError):\n Asset(3) < 'a'\n with self.assertRaises(TypeError):\n 'a' < Asset(3)\n\n\nclass TestFuture(TestCase):\n future = Future(\n 2468,\n symbol='OMH15',\n root_symbol='OM',\n notice_date=pd.Timestamp('2014-01-20', tz='UTC'),\n expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),\n contract_multiplier=500\n )\n\n def test_str(self):\n strd = self.future.__str__()\n self.assertEqual(\"Future(2468 [OMH15])\", strd)\n\n def test_repr(self):\n reprd = self.future.__repr__()\n self.assertTrue(\"Future\" in reprd)\n self.assertTrue(\"2468\" in reprd)\n self.assertTrue(\"OMH15\" in reprd)\n self.assertTrue(\"root_symbol='OM'\" in reprd)\n self.assertTrue((\"notice_date=Timestamp('2014-01-20 00:00:00+0000', \"\n \"tz='UTC')\") in reprd)\n self.assertTrue(\"expiration_date=Timestamp('2014-02-20 00:00:00+0000'\"\n in reprd)\n self.assertTrue(\"contract_multiplier=500\" in reprd)\n\n def test_reduce(self):\n reduced = self.future.__reduce__()\n self.assertEqual(Future, reduced[0])\n\n def test_to_and_from_dict(self):\n dictd = self.future.to_dict()\n self.assertTrue('root_symbol' in dictd)\n self.assertTrue('notice_date' in dictd)\n self.assertTrue('expiration_date' in dictd)\n self.assertTrue('contract_multiplier' in dictd)\n\n from_dict = Future.from_dict(dictd)\n self.assertTrue(isinstance(from_dict, Future))\n self.assertEqual(self.future, from_dict)\n\n def test_root_symbol(self):\n self.assertEqual('OM', self.future.root_symbol)\n\n\nclass AssetFinderTestCase(TestCase):\n\n def test_lookup_symbol_fuzzy(self):\n as_of = pd.Timestamp('2013-01-01', tz='UTC')\n frame = pd.DataFrame.from_records(\n [\n {\n 'sid': i,\n 'file_name': 'TEST@%d' % i,\n 'company_name': \"company%d\" % i,\n 'start_date_nano': as_of.value,\n 'end_date_nano': as_of.value,\n 'exchange': uuid.uuid4().hex,\n }\n for i in range(3)\n ]\n )\n finder = AssetFinder(frame, fuzzy_char='@')\n asset_0, asset_1, asset_2 = (\n finder.retrieve_asset(i) for i in range(3)\n )\n\n for i in range(2): # we do it twice to test for caching bugs\n self.assertIsNone(finder.lookup_symbol('test', as_of))\n self.assertEqual(\n asset_1,\n finder.lookup_symbol('test@1', as_of)\n )\n\n # Adding an unnecessary fuzzy shouldn't matter.\n self.assertEqual(\n asset_1,\n finder.lookup_symbol('test@1', as_of, fuzzy=True)\n )\n\n # Shouldn't find this with no fuzzy_str passed.\n self.assertIsNone(finder.lookup_symbol('test1', as_of))\n # Should find exact match.\n self.assertEqual(\n asset_1,\n finder.lookup_symbol('test1', as_of, fuzzy=True),\n )\n\n def test_lookup_symbol_resolve_multiple(self):\n\n # Incrementing by two so that start and end dates for each\n # generated Asset don't overlap (each Asset's end_date is the\n # day after its start date.)\n dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')\n df = pd.DataFrame.from_records(\n [\n {\n 'sid': i,\n 'file_name': 'existing',\n 'company_name': 'existing',\n 'start_date_nano': date.value,\n 'end_date_nano': (date + timedelta(days=1)).value,\n 'exchange': 'NYSE',\n }\n for i, date in enumerate(dates)\n ]\n )\n\n finder = AssetFinder(df)\n for _ in range(2): # Run checks twice to test for caching bugs.\n with self.assertRaises(SymbolNotFound):\n finder.lookup_symbol_resolve_multiple('non_existing', dates[0])\n\n with self.assertRaises(MultipleSymbolsFound):\n finder.lookup_symbol_resolve_multiple('existing', None)\n\n for i, date in enumerate(dates):\n # Verify that we correctly resolve multiple symbols using\n # the supplied date\n result = finder.lookup_symbol_resolve_multiple(\n 'existing',\n date,\n )\n self.assertEqual(result.symbol, 'existing')\n self.assertEqual(result.sid, i)\n\n @parameterized.expand(\n build_lookup_generic_cases()\n )\n def test_lookup_generic(self, finder, symbols, reference_date, expected):\n \"\"\"\n Ensure that lookup_generic works with various permutations of inputs.\n \"\"\"\n results, missing = finder.lookup_generic(symbols, reference_date)\n self.assertEqual(results, expected)\n self.assertEqual(missing, [])\n\n def test_lookup_generic_handle_missing(self):\n data = pd.DataFrame.from_records(\n [\n # Sids that will be found when we do lookups.\n {\n 'sid': 0,\n 'file_name': 'real',\n 'company_name': 'real',\n 'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),\n 'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),\n 'exchange': '',\n },\n {\n 'sid': 1,\n 'file_name': 'also_real',\n 'company_name': 'also_real',\n 'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),\n 'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),\n 'exchange': '',\n },\n # Sid whose end date is before our query date. We should\n # still correctly find it.\n {\n 'sid': 2,\n 'file_name': 'real_but_old',\n 'company_name': 'real_but_old',\n 'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),\n 'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),\n 'exchange': '',\n },\n # Sid whose end date is before our query date. We should\n # still correctly find it.\n {\n 'sid': 3,\n 'file_name': 'real_but_in_the_future',\n 'company_name': 'real_but_in_the_future',\n 'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),\n 'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),\n 'exchange': 'THE FUTURE',\n },\n ]\n )\n finder = AssetFinder(data)\n results, missing = finder.lookup_generic(\n ['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],\n pd.Timestamp('2013-02-01', tz='UTC'),\n )\n\n self.assertEqual(len(results), 3)\n self.assertEqual(results[0].symbol, 'real')\n self.assertEqual(results[0].sid, 0)\n self.assertEqual(results[1].symbol, 'also_real')\n self.assertEqual(results[1].sid, 1)\n\n self.assertEqual(len(missing), 2)\n self.assertEqual(missing[0], 'fake')\n self.assertEqual(missing[1], 'real_but_in_the_future')\n\n def test_insert_metadata(self):\n finder = AssetFinder()\n finder.insert_metadata(0,\n asset_type='equity',\n start_date='2014-01-01',\n end_date='2015-01-01',\n symbol=\"PLAY\",\n foo_data=\"FOO\",)\n\n # Test proper insertion\n equity = finder.retrieve_asset(0)\n self.assertIsInstance(equity, Equity)\n self.assertEqual('PLAY', equity.symbol)\n self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),\n equity.end_date)\n\n # Test invalid field\n self.assertFalse('foo_data' in finder.metadata_cache[0])\n\n def test_consume_metadata(self):\n\n # Test dict consumption\n finder = AssetFinder()\n dict_to_consume = {0: {'symbol': 'PLAY'},\n 1: {'symbol': 'MSFT'}}\n finder.consume_metadata(dict_to_consume)\n\n equity = finder.retrieve_asset(0)\n self.assertIsInstance(equity, Equity)\n self.assertEqual('PLAY', equity.symbol)\n\n finder = AssetFinder()\n\n # Test dataframe consumption\n df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])\n df['asset_name'][0] = \"Dave'N'Busters\"\n df['exchange'][0] = \"NASDAQ\"\n df['asset_name'][1] = \"Microsoft\"\n df['exchange'][1] = \"NYSE\"\n finder.consume_metadata(df)\n self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])\n self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])\n\n def test_consume_asset_as_identifier(self):\n # Build some end dates\n eq_end = pd.Timestamp('2012-01-01', tz='UTC')\n fut_end = pd.Timestamp('2008-01-01', tz='UTC')\n\n # Build some simple Assets\n equity_asset = Equity(1, symbol=\"TESTEQ\", end_date=eq_end)\n future_asset = Future(200, symbol=\"TESTFUT\", end_date=fut_end)\n\n # Consume the Assets\n finder = AssetFinder()\n finder.consume_identifiers([equity_asset, future_asset])\n\n # Test equality with newly built Assets\n self.assertEqual(equity_asset, finder.retrieve_asset(1))\n self.assertEqual(future_asset, finder.retrieve_asset(200))\n self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)\n self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)\n\n def test_sid_assignment(self):\n\n # This metadata does not contain SIDs\n metadata = {'PLAY': {'symbol': 'PLAY'},\n 'MSFT': {'symbol': 'MSFT'}}\n\n today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))\n\n # Build a finder that is allowed to assign sids\n finder = AssetFinder(metadata=metadata,\n allow_sid_assignment=True)\n\n # Verify that Assets were built and different sids were assigned\n play = finder.lookup_symbol('PLAY', today)\n msft = finder.lookup_symbol('MSFT', today)\n self.assertEqual('PLAY', play.symbol)\n self.assertIsNotNone(play.sid)\n self.assertNotEqual(play.sid, msft.sid)\n\n def test_sid_assignment_failure(self):\n\n # This metadata does not contain SIDs\n metadata = {'PLAY': {'symbol': 'PLAY'},\n 'MSFT': {'symbol': 'MSFT'}}\n\n # Build a finder that is not allowed to assign sids, asserting failure\n with self.assertRaises(SidAssignmentError):\n AssetFinder(metadata=metadata, allow_sid_assignment=False)\n\n def test_security_dates_warning(self):\n\n # Build an asset with an end_date\n eq_end = pd.Timestamp('2012-01-01', tz='UTC')\n equity_asset = Equity(1, symbol=\"TESTEQ\", end_date=eq_end)\n\n # Catch all warnings\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered\n warnings.simplefilter(\"always\")\n equity_asset.security_start_date\n equity_asset.security_end_date\n equity_asset.security_name\n # Verify the warning\n self.assertEqual(3, len(w))\n for warning in w:\n self.assertTrue(issubclass(warning.category,\n DeprecationWarning))\n\n def test_lookup_future_chain(self):\n metadata = {\n # Notice day is today, so not valid\n 2: {\n 'symbol': 'ADN15',\n 'root_symbol': 'AD',\n 'asset_type': 'future',\n 'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),\n 'start_date': pd.Timestamp('2015-01-01', tz='UTC')\n },\n 1: {\n 'symbol': 'ADV15',\n 'root_symbol': 'AD',\n 'asset_type': 'future',\n 'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),\n 'start_date': pd.Timestamp('2015-01-01', tz='UTC')\n },\n # Starts trading today, so should be valid.\n 0: {\n 'symbol': 'ADF16',\n 'root_symbol': 'AD',\n 'asset_type': 'future',\n 'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),\n 'start_date': pd.Timestamp('2015-05-14', tz='UTC')\n },\n # Copy of the above future, but starts trading in August,\n # so it isn't valid.\n 3: {\n 'symbol': 'ADF16',\n 'root_symbol': 'AD',\n 'asset_type': 'future',\n 'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),\n 'start_date': pd.Timestamp('2015-08-01', tz='UTC')\n },\n\n }\n\n finder = AssetFinder(metadata=metadata)\n dt = pd.Timestamp('2015-05-14', tz='UTC')\n last_year = pd.Timestamp('2014-01-01', tz='UTC')\n first_day = pd.Timestamp('2015-01-01', tz='UTC')\n\n # Check that we get the expected number of contracts, in the\n # right order\n ad_contracts = finder.lookup_future_chain('AD', dt, dt)\n self.assertEqual(len(ad_contracts), 2)\n self.assertEqual(ad_contracts[0].sid, 1)\n self.assertEqual(ad_contracts[1].sid, 0)\n\n # Check that we get nothing if our knowledge date is last year\n ad_contracts = finder.lookup_future_chain('AD', dt, last_year)\n self.assertEqual(len(ad_contracts), 0)\n\n # Check that we get things that start on the knowledge date\n ad_contracts = finder.lookup_future_chain('AD', dt, first_day)\n self.assertEqual(len(ad_contracts), 1)\n\n def test_map_identifier_index_to_sids(self):\n # Build an empty finder and some Assets\n dt = pd.Timestamp('2014-01-01', tz='UTC')\n finder = AssetFinder()\n asset1 = Equity(1, symbol=\"AAPL\")\n asset2 = Equity(2, symbol=\"GOOG\")\n asset200 = Future(200, symbol=\"CLK15\")\n asset201 = Future(201, symbol=\"CLM15\")\n\n # Check for correct mapping and types\n pre_map = [asset1, asset2, asset200, asset201]\n post_map = finder.map_identifier_index_to_sids(pre_map, dt)\n self.assertListEqual([1, 2, 200, 201], post_map)\n for sid in post_map:\n self.assertIsInstance(sid, int)\n\n # Change order and check mapping again\n pre_map = [asset201, asset2, asset200, asset1]\n post_map = finder.map_identifier_index_to_sids(pre_map, dt)\n self.assertListEqual([201, 2, 200, 1], post_map)\n\n @with_environment()\n def test_compute_lifetimes(self, env=None):\n num_assets = 4\n trading_day = env.trading_day\n first_start = pd.Timestamp('2015-04-01', tz='UTC')\n\n frame = make_rotating_asset_info(\n num_assets=num_assets,\n first_start=first_start,\n frequency=env.trading_day,\n periods_between_starts=3,\n asset_lifetime=5\n )\n finder = AssetFinder(frame)\n\n all_dates = pd.date_range(\n start=first_start,\n end=frame.end_date.max(),\n freq=trading_day,\n )\n\n for dates in all_subindices(all_dates):\n expected_mask = full(\n shape=(len(dates), num_assets),\n fill_value=False,\n dtype=bool,\n )\n\n for i, date in enumerate(dates):\n it = frame[['start_date', 'end_date']].itertuples()\n for j, start, end in it:\n if start <= date <= end:\n expected_mask[i, j] = True\n\n # Filter out columns with all-empty columns.\n expected_result = pd.DataFrame(\n data=expected_mask,\n index=dates,\n columns=frame.sid.values,\n )\n actual_result = finder.lifetimes(dates)\n assert_frame_equal(actual_result, expected_result)\n\n\nclass TestFutureChain(TestCase):\n metadata = {\n 0: {\n 'symbol': 'CLG06',\n 'root_symbol': 'CL',\n 'asset_type': 'future',\n 'start_date': pd.Timestamp('2005-12-01', tz='UTC'),\n 'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),\n 'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},\n 1: {\n 'root_symbol': 'CL',\n 'symbol': 'CLK06',\n 'asset_type': 'future',\n 'start_date': pd.Timestamp('2005-12-01', tz='UTC'),\n 'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),\n 'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},\n 2: {\n 'symbol': 'CLQ06',\n 'root_symbol': 'CL',\n 'asset_type': 'future',\n 'start_date': pd.Timestamp('2005-12-01', tz='UTC'),\n 'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),\n 'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},\n 3: {\n 'symbol': 'CLX06',\n 'root_symbol': 'CL',\n 'asset_type': 'future',\n 'start_date': pd.Timestamp('2006-02-01', tz='UTC'),\n 'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),\n 'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}\n }\n\n asset_finder = AssetFinder(metadata=metadata)\n\n def test_len(self):\n \"\"\" Test the __len__ method of FutureChain.\n \"\"\"\n # None of the contracts have started yet.\n cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')\n self.assertEqual(len(cl), 0)\n\n # Sids 0, 1, & 2 have started, 3 has not yet started.\n cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')\n self.assertEqual(len(cl), 3)\n\n # Sid 0 is still valid the day before its notice date.\n cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')\n self.assertEqual(len(cl), 3)\n\n # Sid 0 is now invalid, leaving only Sids 1 & 2 valid.\n cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')\n self.assertEqual(len(cl), 2)\n\n # Sid 3 has started, so 1, 2, & 3 are now valid.\n cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')\n self.assertEqual(len(cl), 3)\n\n # All contracts are no longer valid.\n cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')\n self.assertEqual(len(cl), 0)\n\n def test_getitem(self):\n \"\"\" Test the __getitem__ method of FutureChain.\n \"\"\"\n cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')\n self.assertEqual(cl[0], 0)\n self.assertEqual(cl[1], 1)\n self.assertEqual(cl[2], 2)\n with self.assertRaises(IndexError):\n cl[3]\n\n cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')\n self.assertEqual(cl[0], 0)\n\n cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')\n self.assertEqual(cl[0], 1)\n\n cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')\n self.assertEqual(cl[-1], 3)\n\n def test_root_symbols(self):\n \"\"\" Test that different variations on root symbols are handled\n as expected.\n \"\"\"\n # Make sure this successfully gets the chain for CL.\n cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')\n self.assertEqual(cl.root_symbol, 'CL')\n\n # These root symbols don't exist, so RootSymbolNotFound should\n # be raised immediately.\n with self.assertRaises(RootSymbolNotFound):\n FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')\n\n with self.assertRaises(RootSymbolNotFound):\n FutureChain(self.asset_finder, lambda: '2005-12-01', '')\n\n def test_repr(self):\n \"\"\" Test the __repr__ method of FutureChain.\n \"\"\"\n cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')\n cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',\n as_of_date='2006-02-01')\n\n # The default chain should not include the as of date.\n self.assertEqual(repr(cl), \"FutureChain(root_symbol='CL')\")\n\n # An explicit as of date should show up in the repr.\n self.assertEqual(\n repr(cl_feb),\n (\"FutureChain(root_symbol='CL', \"\n \"as_of_date='2006-02-01 00:00:00+00:00')\")\n )\n\n def test_as_of(self):\n \"\"\" Test the as_of method of FutureChain.\n \"\"\"\n cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')\n\n # Test that the as_of_date is set correctly to the future\n feb = '2006-02-01'\n cl_feb = cl.as_of(feb)\n self.assertEqual(\n cl_feb.as_of_date,\n pd.Timestamp(feb, tz='UTC')\n )\n\n # Test that the as_of_date is set correctly to the past, with\n # args of str, datetime.datetime, and pd.Timestamp.\n feb_prev = '2005-02-01'\n cl_feb_prev = cl.as_of(feb_prev)\n self.assertEqual(\n cl_feb_prev.as_of_date,\n pd.Timestamp(feb_prev, tz='UTC')\n )\n\n feb_prev = datetime(year=2005, month=2, day=1)\n cl_feb_prev = cl.as_of(feb_prev)\n self.assertEqual(\n cl_feb_prev.as_of_date,\n pd.Timestamp(feb_prev, tz='UTC')\n )\n\n feb_prev = pd.Timestamp('2005-02-01')\n cl_feb_prev = cl.as_of(feb_prev)\n self.assertEqual(\n cl_feb_prev.as_of_date,\n pd.Timestamp(feb_prev, tz='UTC')\n )\n\n # The chain as of the current dt should always be the same as\n # the defualt chain. Tests date as str, pd.Timestamp, and\n # datetime.datetime.\n self.assertEqual(cl[0], cl.as_of('2005-12-01')[0])\n self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])\n self.assertEqual(\n cl[0],\n cl.as_of(datetime(year=2005, month=12, day=1))[0]\n )\n\n def test_offset(self):\n \"\"\" Test the offset method of FutureChain.\n \"\"\"\n cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')\n\n # Test that an offset forward sets as_of_date as expected\n self.assertEqual(\n cl.offset('3 days').as_of_date,\n cl.as_of_date + pd.Timedelta(days=3)\n )\n\n # Test that an offset backward sets as_of_date as expected, with\n # time delta given as str, datetime.timedelta, and pd.Timedelta.\n self.assertEqual(\n cl.offset('-1000 days').as_of_date,\n cl.as_of_date + pd.Timedelta(days=-1000)\n )\n self.assertEqual(\n cl.offset(timedelta(days=-1000)).as_of_date,\n cl.as_of_date + pd.Timedelta(days=-1000)\n )\n self.assertEqual(\n cl.offset(pd.Timedelta('-1000 days')).as_of_date,\n cl.as_of_date + pd.Timedelta(days=-1000)\n )\n\n # An offset of zero should give the original chain.\n self.assertEqual(cl[0], cl.offset(0)[0])\n self.assertEqual(cl[0], cl.offset(\"0 days\")[0])\n\n # A string that doesn't represent a time delta should raise a\n # ValueError.\n with self.assertRaises(ValueError):\n cl.offset(\"blah\")\n"
] | [
[
"pandas.DataFrame",
"pandas.Timedelta",
"pandas.util.testing.assert_frame_equal",
"pandas.date_range",
"pandas.DataFrame.from_records",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
keunhong/toolbox | [
"e8d1dadab4d9ccf8d78fe86ea933819ac6a07fca"
] | [
"toolbox/sampling/__init__.py"
] | [
"import logging\nimport random\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom skimage.transform import resize\nfrom scipy.ndimage import zoom\n\nfrom toolbox import images\nfrom toolbox.images import crop, mask_bbox\nfrom .poisson_disk import sample_poisson_uniform\n\nlogger = logging.getLogger(__name__)\n\n\nclass PatchType:\n S2F_MASKED_BLACK = 'cropped_scaled_to_fit'\n S2F_MASKED_WHITE = 'cropped_scaled_to_fit_white'\n S2F = 'scaled_to_fit'\n RANDOM = 'random2'\n\n\ndef sample_poisson_mask(mask, r, k):\n ymin, ymax, xmin, xmax = mask_bbox(mask)\n height = ymax - ymin\n width = xmax - xmin\n points = np.array(sample_poisson_uniform(height, width, r, k,\n mask[ymin:ymax, xmin:xmax]))\n points[:, 0] += ymin\n points[:, 1] += xmin\n points = np.floor(points).astype(int)\n return points\n\n\ndef generate_dense_bboxes(\n mask: np.ndarray,\n scale=0.23,\n min_dist=0.091):\n mask_height, mask_width = mask.shape\n min_length = min(mask_height, mask_width)\n patch_sample_size = scale * min_length\n centers = sample_poisson_mask(mask, min_length * min_dist, 1000)\n half = int(patch_sample_size / 2)\n bboxes = []\n for center in centers:\n ycent, xcent = center\n bbox = (ycent - half,\n ycent + half + 1,\n xcent - half,\n xcent + half + 1)\n if (bbox[0] >= 0 and bbox[1] < mask_height\n and bbox[2] >= 0 and bbox[3] < mask_width):\n bboxes.append(bbox)\n print('bboxes={} centers={}, mask_size={}, min_dist={}'.format(\n len(bboxes), len(centers), mask.shape, min_length * min_dist))\n return bboxes\n\n\ndef random_crops(image, patch_size, num_crops):\n border_mask = np.ones(image.shape[:2], dtype=bool)\n left = patch_size/2\n right = image.shape[1] - patch_size/2\n top = patch_size/2\n bottom = image.shape[0] - patch_size/2\n border_mask[:, :left] = False\n border_mask[:, right:] = False\n border_mask[:top, :] = False\n border_mask[bottom:, :] = False\n\n yinds, xinds = np.where(border_mask)\n\n bboxes = []\n for i in range(num_crops):\n point_idx = np.random.randint(0, len(yinds))\n ycent, xcent = yinds[point_idx], xinds[point_idx]\n half = int(patch_size / 2)\n\n # Just squash the patch if it's out of bounds.\n bbox = (ycent - half,\n ycent + half + 1,\n xcent - half,\n xcent + half + 1)\n bboxes.append(bbox)\n\n return bboxes_to_patches(image, bboxes, patch_size)\n\n\ndef generate_random_bboxes(mask: np.ndarray, scale_range=(1.0, 1.0),\n num_patches=5, fixed_size=None):\n \"\"\"\n Generates random bounding boxes at random scales with centroid within the\n mask.\n :param mask: The contrained area for the centroid of the patch.\n :param min_scale: The min scale (multiple of the minimum length of the\n input mask) of the sampling.\n :param max_scale: The max scale (multiple of the minimum length of the\n input mask) of the sampling.\n :param num_patches: Number of patches to generate.\n :return: Bounding boxes.\n \"\"\"\n mask_height, mask_width = mask.shape[:2]\n min_length = min(mask_height, mask_width)\n\n yinds, xinds = np.where(mask)\n\n patch_bboxes = []\n patch_scales = []\n tries = 0\n while len(patch_bboxes) < num_patches:\n scale = random.uniform(*scale_range)\n patch_scales.append(scale)\n patch_size = scale * fixed_size if fixed_size else int(scale * min_length)\n point_idx = np.random.randint(0, len(yinds))\n ycent, xcent = yinds[point_idx], xinds[point_idx]\n half = int(patch_size / 2)\n\n # Just squash the patch if it's out of bounds.\n if (ycent - half < 0 or ycent + half > mask.shape[0] or\n xcent - half < 0 or xcent + half > mask.shape[1]):\n if tries < 100:\n tries += 1\n continue\n\n bbox = (max(ycent - half, 0),\n min(ycent + half + 1, mask.shape[0]),\n max(xcent - half, 0),\n min(xcent + half + 1, mask.shape[1]))\n patch_bboxes.append(bbox)\n\n return patch_bboxes, patch_scales\n\n\ndef bboxes_to_patches(im: np.ndarray,\n bboxes: List[Tuple[int, int, int, int]],\n patch_size: int, use_pil=False):\n \"\"\"\n Converts bounding boxes to actual patches. Patches are all resized to the\n patch size regardless of the original bounding box size.\n :param im: To crop patch from.\n :param bboxes: Boxes defining the patch.\n :param patch_size: Patch size to return.\n :return: Image patches.\n \"\"\"\n patches = []\n for bbox in bboxes:\n cropped = crop(im, bbox)\n if cropped.shape[0] != patch_size or cropped.shape[1] != patch_size:\n scale = [patch_size/cropped.shape[0], patch_size/cropped.shape[1]]\n if len(im.shape) == 3:\n scale.append(1.0)\n if use_pil:\n cropped = resize(cropped, (patch_size, patch_size)) \\\n .astype(dtype=np.float32)\n else:\n cropped = zoom(cropped, scale, im.dtype, order=1)\n patches.append(cropped)\n return patches\n\n\ndef compute_mask_tight_patch(im: np.ndarray,\n mask: np.ndarray,\n patch_size: int):\n \"\"\"\n Computes a patch which contains all the pixels active in the mask scaled to\n the patch size.\n :param im:\n :param mask:\n :param patch_size:\n :return:\n \"\"\"\n bbox = images.compute_mask_bbox(mask)\n cropped = images.crop(im, bbox)\n resized = imresize(cropped, (patch_size, patch_size, cropped.shape[2]))\n return resized\n\n\ndef compute_minmax_thickness(mask):\n max_width = 0\n max_height = 0\n for row_id in range(mask.shape[0]):\n row = mask[row_id, :]\n split_locs = np.where(np.diff(row) != 0)[0] + 1\n for segment in (np.split(row, split_locs)):\n if segment[0] != 0:\n max_width = max(max_width, len(segment))\n for col_id in range(mask.shape[1]):\n col = mask[:, col_id]\n split_locs = np.where(np.diff(col) != 0)[0] + 1\n for segment in (np.split(col, split_locs)):\n if segment[0] != 0:\n max_height = max(max_height, len(segment))\n\n return min(max_width, max_height), max(max_width, max_height)\n"
] | [
[
"numpy.split",
"scipy.ndimage.zoom",
"numpy.ones",
"numpy.diff",
"numpy.floor",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
medialandstudio/bias | [
"9548a2b66c0134c797fa3d00de3711cfef9dbb70"
] | [
"SCANNER_FTX_PERP.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 7 12:02:50 2021\n\n@author: ministudio\n\"\"\"\n\nfrom datetime import datetime, timezone\nimport pandas as pd\nimport numpy as np\nfrom alive_progress import alive_bar\n\n\ndef get_all_futures(ftx_client):\n tickers = ftx_client.fetchMarkets()\n list_perp =[]\n \n #with alive_bar(len(tickers),length=20) as bar:\n for ticker in tickers:\n if 'PERP' in ticker['id']: \n list_perp.append(ticker['id'])\n #bar()\n\n return list_perp\n\n\ndef scanner(day,month,year,ticker,ftx): \n results = pd.DataFrame(columns=['P/L %'])\n start_trade = datetime(year, month, day, 0, 0, 0)\n timestamp = start_trade.replace(tzinfo=timezone.utc).timestamp()\n candles = ftx.fetchOHLCV(ticker, timeframe='1h', since=timestamp*1000, limit=5000)\n candles_df = pd.DataFrame(candles, columns=['MTS','OPEN','HIGH','LOW','CLOSE','VOLUME'])\n volume = candles_df.VOLUME.sum()\n \n for j in range(0,24):\n # algoritmo per andare di candela in candela\n ledger = pd.DataFrame(columns=['POSITION','ENTRY PRICE','P_L SINGLE','P_L TOTAL'])\n long = True\n time_scanner = ''\n \n # calcolo l'offset tra una candela e l'altra di mio interesse \n offset = 12\n \n if j != 0:\n candles = candles[1:] \n \n try:\n for i in range(0,len(candles),offset):\n entry_price = candles[i][1]\n \n if i == 0:\n start = datetime.utcfromtimestamp(candles[i][0]/1000)\n end = datetime.utcfromtimestamp(candles[i+offset][0]/1000) #datetime.utcfromtimestamp(candles[i+offset+10][0]/1000)\n #print('FROM',start.strftime(\"%H:%M\"),'TO',end.strftime(\"%H:%M\"))\n var_pct = p_l_total = 0\n position = 'LONG'\n time_scanner = f'{start.strftime(\"%H:%M\")} to {end.strftime(\"%H:%M\")}'\n \n else:\n #r_exit_entry = candles[i][4]/candles[i-offset][4] #if not long else candles[i][4]/candles[i-offset][4]\n \n # calcolo il profitto\n if long:\n var_pct = round((candles[i-offset][1] - candles[i][1])/candles[i-offset][1]*100, 3)\n p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct\n \n if not long:\n var_pct = round((candles[i][1]-candles[i-offset][1])/candles[i][1]*100, 3)\n p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct\n \n if long:\n date = datetime.utcfromtimestamp(candles[i][0]/1000)\n position = 'LONG'\n long = False\n else:\n # quindi vado in short\n date = datetime.utcfromtimestamp(candles[i][0]/1000) #candles[i+10][0]/1000\n position = 'SHORT'\n long = True\n \n ledger.loc[date] = [position, entry_price, var_pct, p_l_total]\n \n results.loc[time_scanner] = round(ledger['P_L TOTAL'][-1],2)\n #print('P/L TOTAL :\\t',round(ledger['P_L TOTAL'][-1],2), '%\\n') \n \n except Exception as e: \n results.loc[time_scanner] = np.NAN\n \n return results, volume\n\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
srijithmass/RANK-OF-A-MATRIX | [
"f0b2dacac02159a1385cfa23b180859444013911"
] | [
"Rank of a matrix.py"
] | [
"#Program to find the rank of a matrix.\r\n#Developed by: SRIJITH R\r\n#RegisterNumber: 21004191\r\nimport numpy as np\r\nA=np.array([[5,-3,-10],[2,2,-3],[-3,-1,5]])\r\nval=np.linalg.matrix_rank(A)\r\nprint(val)"
] | [
[
"numpy.array",
"numpy.linalg.matrix_rank"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
negiaditya/PROJECTS-Data_Science | [
"d26e1fdfc6ce51f02e65c4dbca3edfb5cd97f0a1"
] | [
"Data Science salary prediction/FlaskAPI/app.py"
] | [
"import flask\nfrom flask import Flask,jsonify,request\nimport json\nfrom data_input import data_in\nimport numpy as np\nimport pickle\n\n\n\ndef load_models():\n\tfile_name = './models/model_file.p'\n\twith open(file_name,'rb') as pickled:\n\t\tdata = pickle.load(pickled)\n\t\tmodel = data['model']\n\treturn model\n\napp = Flask(__name__)\n\[email protected]('/predict',methods=['GET'])\ndef predict():\n\trequest_json = request.get_json()\n\tx = request_json['input']\n\tx_in = np.array(x).reshape(1,-1)\n\tmodel = load_models()\n\tprediction = model.predict(x_in)[0]\n\tresponse = json.dumps({'response': prediction})\n\treturn response,200\n\n\nif __name__ == '__main__':\n\tapplication.run(debug=True)"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lfdversluis/wta-tools | [
"e9d505df03fff9bb57208dfb82212977ef5e7ca2",
"e9d505df03fff9bb57208dfb82212977ef5e7ca2"
] | [
"parse_scripts/parquet_parsers/galaxy_to_parquet.py",
"parse_scripts/validate_parquet_files.py"
] | [
"import json\nimport os\nimport sys\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\n\nfrom objects.task import Task\nfrom objects.workflow import Workflow\nfrom objects.workload import Workload\npd.set_option('display.max_columns', None)\n\n\nUSAGE = 'Usage: python(3) ./galaxy_to_parquet.py galaxy_folder'\nNAME = 'Galaxy'\nTARGET_DIR = os.path.join(os.path.dirname(os.getcwd()), 'output_parquet', NAME)\nDATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'\nEPOCH = datetime(1970, 1, 1)\nJOBS = None\nMETRICS = None\nWORKFLOWS = None\nWORKFLOW_INVOCATIONS = None\nWORKFLOW_STEPS = None\nWORKFLOW_INVOKE_STEPS = None\nWORKFLOW_CONNECTIONS = None\nWORKFLOW_STEP_INPUT = None\n\n\ndef read_files(folder_path):\n global METRICS\n METRICS = pd.read_csv(os.path.join(folder_path, 'job_metrics_numeric.csv'),\n names=[\"id\", \"job_id\", \"plugin\", \"metric_name\", \"metric_value\"], dtype={\n \"id\": np.float,\n \"job_id\": np.float,\n \"plugin\": np.str,\n \"metric_name\": np.str,\n \"metric_value\": np.float,\n })\n print(\"Done with reading metrics\")\n global WORKFLOWS\n WORKFLOWS = pd.read_csv(os.path.join(folder_path, 'workflows.csv'),\n names=[\"id\", \"create_time\", \"update_time\", \"stored_workflow_id\", \"has_cycles\", \"has_errors\",\n \"parent_workflow_id\", \"uuid\"], dtype={\n \"id\": np.float,\n \"create_time\": np.str,\n \"update_time\": np.str,\n \"stored_workflow_id\": np.float,\n \"has_cycles\": np.str,\n \"has_errors\": np.str,\n \"parent_workflow_id\": np.float,\n \"uuid\": np.str,\n })\n print(\"Done with reading workflows\")\n\n global WORKFLOW_INVOCATIONS\n WORKFLOW_INVOCATIONS = pd.read_csv(os.path.join(folder_path, 'workflow-invocations.csv'),\n names=[\"id\", \"create_time\", \"update_time\", \"workflow_id\", \"state\", \"scheduler\",\n \"handler\"], dtype={\n \"id\": np.float,\n \"create_time\": np.str,\n \"update_time\": np.str,\n \"workflow_id\": np.float,\n \"state\": np.str,\n \"scheduler\": np.str,\n \"handler\": np.str,\n })\n print(\"Done with reading workflow invocations\")\n\n global WORKFLOW_STEPS\n WORKFLOW_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-steps.csv'),\n names=[\"id\", \"create_time\", \"update_time\", \"workflow_id\", \"type\", \"tool_id\",\n \"tool_version\", \"order_index\", \"subworkflow_id\", \"dynamic_tool_id\"], dtype={\n \"id\": np.float,\n \"create_time\": np.str,\n \"update_time\": np.str,\n \"workflow_id\": np.float,\n \"type\": np.str,\n \"tool_id\": np.str,\n \"tool_version\": np.str,\n \"order_index\": np.float,\n \"subworkflow_id\": np.str,\n \"dynamic_tool_id\": np.str,\n })\n print(\"Done with reading workflow steps\")\n\n global WORKFLOW_INVOKE_STEPS\n WORKFLOW_INVOKE_STEPS = pd.read_csv(os.path.join(folder_path, 'workflow-invoke-steps.csv'), keep_default_na=True,\n names=[\"id\", \"create_time\", \"update_time\", \"workflow_invocation_id\",\n \"workflow_step_id\", \"job_id\", \"state\"], dtype={\n \"id\": np.float,\n \"create_time\": np.str,\n \"update_time\": np.str,\n \"workflow_invocation_id\": np.float,\n \"workflow_step_id\": np.float,\n \"job_id\": np.float,\n \"state\": np.str,\n })\n print(\"Done with reading workflow invocation steps\")\n\n global WORKFLOW_CONNECTIONS\n WORKFLOW_CONNECTIONS = pd.read_csv(os.path.join(folder_path, 'workflow-connections.csv'),\n names=[\"id\", \"output_step_id\", \"input_step_input_id\", \"output_name\",\n \"input_subworkflow_step_id\"], dtype={\n \"id\": np.float,\n \"output_step_id\": np.float,\n \"input_step_input_id\": np.float,\n \"output_name\": np.str,\n \"input_subworkflow_step_id\": np.float,\n })\n print(\"Done with reading workflow connections\")\n\n global WORKFLOW_STEP_INPUT\n WORKFLOW_STEP_INPUT = pd.read_csv(os.path.join(folder_path, 'workflow-step-input.csv'),\n names=[\"id\", \"workflow_step_id\", \"name\"], dtype={\n \"id\": np.float,\n \"workflow_step_id\": np.float,\n \"name\": np.str,\n })\n print(\"Done with reading workflow step input\")\n\n\ndef check_if_empty(*args):\n for field in args:\n if np.isnan(field):\n return True\n\n\ndef compute_children(step_job_ids, tasks_in_workflow):\n for task in tasks_in_workflow:\n step_id = None\n for pair in step_job_ids:\n # find task id's corresponding step id\n if pair[1] == task.id:\n step_id = pair[0]\n\n children = set()\n df = WORKFLOW_CONNECTIONS.loc[(WORKFLOW_CONNECTIONS[\"output_step_id\"] == step_id)]\n\n if df.empty:\n task.children = children\n continue\n\n for wc_row in df.itertuples():\n\n # find id for subsequent connected step\n row = WORKFLOW_STEP_INPUT.loc[(WORKFLOW_STEP_INPUT[\"id\"] == wc_row[3])]\n\n child_step_id = row.iloc[0][\"workflow_step_id\"]\n\n # find child_step_id in step-job pairs and add corresponding job_id to children set\n for pair2 in step_job_ids:\n if pair2[0] == child_step_id:\n children.add(np.int64(pair2[1]))\n for child in tasks_in_workflow:\n if child.id == pair2[1]:\n child.parents.append(np.int64(task.id))\n break\n break\n task.children = children\n for task2 in tasks_in_workflow:\n unique_parents = set(task2.parents)\n unique_parents_list = list(unique_parents)\n task2.parents = unique_parents_list\n\n return tasks_in_workflow\n\n\ndef parse():\n os.makedirs(TARGET_DIR, exist_ok=True)\n task_counter = 0\n workflow_counter = 0\n processed_workflows = []\n final_workflows = []\n final_tasks = []\n task_offset = 0\n workflow_offset = None\n\n for wi_row in WORKFLOW_INVOCATIONS.itertuples():\n flag = False\n # only use one execution of a workflow\n if wi_row[4] in processed_workflows:\n continue\n\n # check if workflow contains cycles\n workflow_row = WORKFLOWS.loc[(WORKFLOWS[\"id\"] == getattr(wi_row, \"workflow_id\"))]\n if workflow_row.iloc[0][\"has_cycles\"] == \"t\":\n continue\n\n # workflows contain a number of workflow steps but this is not the ID of their actual execution\n # this list is used to tie the workflow steps to their actual execution ID\n step_job_ids = []\n\n tasks_in_workflow = []\n workflow_index = wi_row[4]\n # check if workflow id is null\n if pd.isnull(workflow_index):\n continue\n\n df = WORKFLOW_INVOKE_STEPS.loc[(WORKFLOW_INVOKE_STEPS[\"workflow_invocation_id\"] == getattr(wi_row, \"id\"))]\n\n # check if workflow is not empty\n if df.empty:\n processed_workflows.append(workflow_index)\n continue\n\n for wis_row in df.itertuples():\n\n # check if entry in WF_INVOKE_STEPS has the same wf_invocation_id\n if getattr(wis_row, \"workflow_invocation_id\") == getattr(wi_row, \"id\"):\n\n # check if required fields are not empty\n if check_if_empty(getattr(wis_row, \"workflow_step_id\"), getattr(wis_row, \"job_id\")):\n processed_workflows.append(workflow_index)\n flag = True\n break\n\n # get step id and corresponding execution id\n step_job_pair = [getattr(wis_row, \"workflow_step_id\"), getattr(wis_row, \"job_id\")]\n step_job_ids.append(step_job_pair)\n\n job_id = getattr(wis_row, \"job_id\")\n submit_time = int(((datetime.strptime(getattr(wis_row, \"create_time\"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)\n job_metrics = METRICS.loc[(METRICS[\"job_id\"] == job_id)]\n runtime = job_metrics.loc[(job_metrics[\"metric_name\"] == \"runtime_seconds\"), 'metric_value'] * 1000\n memory = job_metrics.loc[(job_metrics[\"metric_name\"] == \"memory.memsw.max_usage_in_bytes\"), 'metric_value']\n cpu_time = job_metrics.loc[(job_metrics[\"metric_name\"] == \"cpuacct.usage\"), 'metric_value']\n\n # check if any required fields are empty\n if runtime.empty or memory.empty or cpu_time.empty:\n processed_workflows.append(workflow_index)\n flag = True\n break\n\n # used to find the task with lowest submit time, this time will be used ass offset\n if task_offset == 0:\n task_offset = submit_time\n elif submit_time < task_offset:\n task_offset = submit_time\n\n runtime = runtime.iloc[0]\n memory = memory.iloc[0]\n cpu_time = cpu_time.iloc[0] / 1000000\n\n if cpu_time > runtime:\n cpu_time = runtime\n\n task = Task(np.int64(job_id), \"Composite\", submit_time, 0, runtime, 1, None, workflow_index, -1, \"cpu-time\",resource=cpu_time, memory_requested=memory)\n task_counter += 1\n tasks_in_workflow.append(task)\n flag = False\n\n # if flag is true, a task in the workflow is not usable to we skip it\n if flag:\n processed_workflows.append((workflow_index))\n continue\n\n # compute children of tasks\n final_tasks.extend(compute_children(step_job_ids, tasks_in_workflow))\n\n workflow_submit_time = int(((datetime.strptime(getattr(wi_row, \"create_time\"),DATETIME_FORMAT) - EPOCH).total_seconds()) * 1000)\n\n # find smallest workflow submit time as offset\n if workflow_offset is None:\n workflow_offset = workflow_submit_time\n elif workflow_submit_time < workflow_offset:\n workflow_offset = workflow_submit_time\n\n workflow = Workflow(workflow_index, workflow_submit_time, tasks_in_workflow, \"core\", \"Engineering\",\n \"Galaxy\", \"Biological Engineering\")\n workflow.compute_critical_path()\n processed_workflows.append(workflow_index)\n final_workflows.append(workflow)\n workflow_counter += 1\n\n # apply offset\n for x in final_tasks:\n x.ts_submit = x.ts_submit - task_offset\n\n # apply offset\n for y in final_workflows:\n y.ts_submit = y.ts_submit - workflow_offset\n\n # make tasks dataframe\n task_df = pd.DataFrame([t.get_parquet_dict() for t in final_tasks])\n\n # create parquet file in specified folder\n os.makedirs(os.path.join(TARGET_DIR, Task.output_path()), exist_ok=True)\n task_df.to_parquet(os.path.join(TARGET_DIR, Task.output_path(), \"part.0.parquet\"), engine=\"pyarrow\")\n\n # make workflows dataframe\n workflow_df = pd.DataFrame([w.get_parquet_dict() for w in final_workflows])\n\n # create parquet file in specified folder\n os.makedirs(os.path.join(TARGET_DIR, Workflow.output_path()), exist_ok=True)\n workflow_df.to_parquet(os.path.join(TARGET_DIR, Workflow.output_path(), \"part.0.parquet\"), engine=\"pyarrow\")\n\n json_dict = Workload.get_json_dict_from_pandas_task_dataframe(task_df,\n domain=\"Biological Engineering\",\n authors=[\"Jaro Bosch\", \"Laurens Versluis\"],\n workload_description=\"Traces from different biomedical research workflows, executed on the public Galaxy server in Europe.\"\n )\n os.makedirs(os.path.join(TARGET_DIR, Workload.output_path()), exist_ok=True)\n\n with open(os.path.join(TARGET_DIR, Workload.output_path(), \"generic_information.json\"), \"w\") as file:\n # Need this on 32-bit python.\n def default(o):\n if isinstance(o, np.int64): return int(o)\n raise TypeError\n\n file.write(json.dumps(json_dict, default=default))\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(USAGE)\n sys.exit(1)\n\n folder_path = sys.argv[1]\n read_files(folder_path)\n parse()\n",
"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mar 1 2019\n\nValidation tool for parquet files\n\nv.1.2\n\"\"\"\nimport json\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom objects.datatransfer import Datatransfer\nfrom objects.task import Task\nfrom objects.workflow import Workflow\n\nUSAGE = 'Usage: python(3) ./validate_parquet_files.py <path_to_trace>'\n\n\nclass ParquetValidator(object):\n\n @staticmethod\n def validate_field(pdf, fieldname):\n try:\n if pdf[fieldname].isnull().values.any():\n print(\"\\t{}: contains NaN or empty values\".format(fieldname))\n elif pdf[fieldname].dtype in [np.float64, np.int64, np.int32]:\n if pdf[fieldname].isin({-1}).any():\n print(\"\\t{}: contains -1 values\".format(fieldname))\n if pdf[fieldname].isin({0}).any():\n print(\"\\t{}: contains 0 values\".format(fieldname))\n except KeyError:\n print(\"\\t{}: field is not found in the trace file\".format(fieldname))\n\n # Validate if all mandatory Workflow fields exist\n def validate_workflow_fields(self, workflow_pdf):\n for field_name, column_type in Workflow.get_parquet_meta_dict().items():\n self.validate_field(workflow_pdf, field_name)\n # Maybe not complete\n\n # Validate if all mandatory Task fields exist\n def validate_task_fields(self, task_pdf):\n for field_name, column_type in Task.get_parquet_meta_dict().items():\n self.validate_field(task_pdf, field_name)\n\n # Validate if all mandatory DataTransfer fields exist\n def validate_datatransfer_fields(self, datatransfer_pdf):\n for field_name, column_type in Datatransfer.get_parquet_meta_dict().items():\n self.validate_field(datatransfer_pdf, field_name)\n\n def validate_datatransfers(self, datatransfer_df, task_df):\n\n # check DataTransfers\n err_nr_ts_submit = 0\n nr_dt_df = 0\n err_nr_viol_size = 0\n err_nr_viol_transfertimes = 0\n nr_of_empty_src = 0\n nr_of_not_found_src_ids = 0\n nr_of_empty_dest = 0\n nr_of_not_found_dest_ids = 0\n\n id_set = set(task_df[\"id\"])\n for index, row in datatransfer_df.iterrows():\n nr_dt_df = nr_dt_df + 1\n # Check ts_submit\n try:\n if int(row['ts_submit']) < -1:\n if err_nr_ts_submit == 0 or err_nr_ts_submit == 1:\n err_nr_ts_submit = 1\n else:\n err_nr_ts_submit = 3\n except ValueError:\n if err_nr_ts_submit == 0 or err_nr_ts_submit == 2:\n err_nr_ts_submit = 2\n else:\n err_nr_ts_submit = 3\n\n # size\n try:\n if int(row['size']) < 0:\n if err_nr_viol_size == 0 or err_nr_viol_size == 1:\n err_nr_viol_size = 1\n else:\n err_nr_viol_size = 3\n except ValueError:\n if err_nr_viol_size == 0 or err_nr_viol_size == 2:\n err_nr_viol_size = 2\n else:\n err_nr_viol_size = 3\n\n # transfertime\n try:\n if int(row['transfertime']) < 0:\n if err_nr_viol_transfertimes == 0 or err_nr_viol_transfertimes == 1:\n err_nr_viol_transfertimes = 1\n else:\n err_nr_viol_transfertimes = 3\n except ValueError:\n if err_nr_viol_transfertimes == 0 or err_nr_viol_transfertimes == 2:\n err_nr_viol_transfertimes = 2\n else:\n err_nr_viol_transfertimes = 3\n # destination\n dest = row['destination']\n if dest == -1:\n nr_of_empty_dest = nr_of_empty_dest + 1\n else:\n if dest not in id_set:\n nr_of_not_found_dest_ids = nr_of_not_found_dest_ids + 1\n # source\n src = row['source']\n if src == -1:\n nr_of_empty_src = nr_of_empty_src + 1\n else:\n if src not in id_set:\n nr_of_not_found_src_ids = nr_of_not_found_src_ids + 1\n # print\n print(\"\\nValidating DataTransfers ...\")\n # validate for empty fields\n self.validate_datatransfer_fields(datatransfer_df)\n if err_nr_ts_submit == 1:\n print('ts_submit: One or more values are not set, unknown or negative')\n elif err_nr_ts_submit == 2:\n print('ts_submit: One or more values are not an integer -> use unix time')\n elif err_nr_ts_submit == 3:\n print('ts_submit: One or more values are not set, unknown or negative, and not an integer -> use unix time')\n if err_nr_viol_size == 1:\n print('size: One or more data size values are smaller than 0')\n elif err_nr_viol_size == 2:\n print('size: One or more data size values are not integer')\n elif err_nr_viol_size == 3:\n print('size: One or more data size values are smaller than 0 not integer')\n if err_nr_viol_transfertimes == 1:\n print('transfertimes: One or more transfertime values are smaller than 0')\n elif err_nr_viol_transfertimes == 2:\n print('transfertimes: One or more values are not integer')\n elif err_nr_viol_transfertimes == 3:\n print('transfertimes: One or more values are smaller than 0 and not integer')\n\n print(\"destinations: (please check numbers)\\n\\ttotal nr. of empty destinations:\\t{0}\\t({1:3.2f} %)\".format(\n nr_of_empty_dest, (nr_of_empty_dest / nr_dt_df) * 100.0))\n print(\"\\ttotal nr. of not found destination ids: {0}\\t({1:3.2f} %)\".format(nr_of_not_found_dest_ids, (\n nr_of_not_found_dest_ids / nr_dt_df) * 100.0))\n\n print(\"sources: (please check numbers)\\n\\ttotal nr. of empty sources:\\t{0}\\t({1:3.2f} %)\".format(\n nr_of_empty_src, (\n nr_of_empty_src / nr_dt_df) * 100.0))\n print(\"\\ttotal nr. of not found source ids: {0}\\t({1:3.2f} %)\".format(\n nr_of_not_found_src_ids, (nr_of_not_found_src_ids / nr_dt_df) * 100.0))\n\n # Validates Workflows, Task fields and their relations\n def validate_workflows_tasks(self, workflow_pdf, task_pdf):\n # Check meta properties\n if workflow_pdf['id'].nunique() != len(workflow_pdf):\n print(\"Workflows IDs are not unique!\")\n exit(-1)\n\n if workflow_pdf['id'].nunique() != task_pdf['workflow_id'].nunique():\n print(\"Not all workflows are in task_pdf!\")\n exit(-1)\n\n # check Tasks\n nr_task_df = len(task_pdf)\n\n def check_children(df):\n nr_of_empty_childs = 0\n nr_of_childs = 0\n nr_of_not_found_child_ids = 0\n nr_of_empty_parents = 0\n nr_of_parents = 0\n nr_of_not_found_parent_ids = 0\n\n id_set = set(df['id'])\n for index, row in df.iterrows():\n # Check children\n children = row['children']\n if len(children) == 0:\n nr_of_empty_childs = nr_of_empty_childs + 1\n else:\n children_set = set(children)\n nr_of_childs += len(children)\n if len(children) != len(children_set):\n print(\"Duplicate children found\")\n diff = children_set - id_set\n if len(diff) > 0:\n print(diff)\n if diff:\n print(\"Keys not found:\" \",\".join(diff))\n nr_of_not_found_child_ids += len(diff)\n\n # Check parents\n parents = row['parents']\n if len(parents) == 0:\n nr_of_empty_parents = nr_of_empty_parents + 1\n else:\n parent_set = set(parents)\n nr_of_parents += len(parents)\n if len(parents) != len(parent_set):\n print(\"Duplicate parents found\")\n diff = parent_set - id_set\n if len(diff):\n print(\"Keys not found:\" \",\".join(diff))\n nr_of_not_found_parent_ids += len(diff)\n\n return pd.DataFrame(\n {\n \"nr_of_empty_childs\": nr_of_empty_childs,\n \"nr_of_childs\": nr_of_childs,\n \"nr_of_not_found_child_ids\": nr_of_not_found_child_ids,\n \"nr_of_empty_parents\": nr_of_empty_parents,\n \"nr_of_parents\": nr_of_parents,\n \"nr_of_not_found_parent_ids\": nr_of_not_found_parent_ids,\n }, index=[0])\n\n result_df = task_pdf.groupby(\"workflow_id\").apply(check_children)\n\n nr_of_childs = result_df['nr_of_childs'].sum()\n nr_of_empty_childs = result_df['nr_of_empty_childs'].sum()\n nr_of_not_found_child_ids = result_df['nr_of_not_found_child_ids'].sum()\n nr_of_parents = result_df['nr_of_parents'].sum()\n nr_of_empty_parents = result_df['nr_of_empty_parents'].sum()\n nr_of_not_found_parent_ids = result_df['nr_of_not_found_parent_ids'].sum()\n\n err_nr_viol_runtime = 0\n err_nr_ts_submit = 0\n if nr_of_childs != nr_of_parents:\n print(\"The amount of children and parents are not equal! Num children: {} - num parents: {})\".format(\n nr_of_childs, nr_of_parents))\n exit(-1)\n\n # Check ts_submit\n for index, row in task_pdf.iterrows():\n try:\n if int(row['ts_submit']) < -1:\n if err_nr_ts_submit == 0 or err_nr_ts_submit == 1:\n err_nr_ts_submit = 1\n else:\n err_nr_ts_submit = 3\n except ValueError:\n if err_nr_ts_submit == 0 or err_nr_ts_submit == 2:\n err_nr_ts_submit = 2\n else:\n err_nr_ts_submit = 3\n\n # Check runtime\n try:\n if float(row['runtime']) < 0.0:\n if err_nr_viol_runtime == 0 or err_nr_viol_runtime == 1:\n err_nr_viol_runtime = 1\n else:\n err_nr_viol_runtime = 3\n except ValueError:\n if err_nr_viol_runtime == 0 or err_nr_viol_runtime == 2:\n err_nr_viol_runtime = 2\n else:\n err_nr_viol_runtime = 3\n\n print(\"\\nValidating Tasks ...\")\n self.validate_task_fields(task_pdf)\n\n invalid_trace = False\n\n print(\"children: (please check numbers)\\n\\ttotal nr. of tasks without children: {0}\\t({1:3.2f} %)\".format(\n nr_of_empty_childs, (nr_of_empty_childs / nr_task_df) * 100.0))\n print(\"\\ttotal nr. of not found ids: {0}\\t({1:3.2f} %)\".format(\n nr_of_not_found_child_ids, (nr_of_not_found_child_ids / nr_of_childs) * 100.0))\n\n if nr_of_not_found_child_ids > 0:\n invalid_trace = True\n\n print(\n \"parents: (please check numbers)\\n\\ttotal nr. of tasks without parents:\\t{0}\\t({1:3.2f} %)\".format(\n nr_of_empty_parents, (nr_of_empty_parents / nr_task_df) * 100.0))\n print(\"\\ttotal nr. of not found ids: {0}\\t({1:3.2f} %)\".format(nr_of_not_found_parent_ids, (\n nr_of_not_found_parent_ids / nr_of_parents) * 100.0))\n\n if nr_of_not_found_parent_ids > 0:\n invalid_trace = True\n\n if err_nr_ts_submit == 1:\n print('ts_submit: One or more values are not set, unknown or negative')\n elif err_nr_ts_submit == 2:\n print('ts_submit: One or more values are not an integer -> use unix time')\n elif err_nr_ts_submit == 3:\n print('ts_submit: One or more values are not set, unknown or negative, and not an integer -> use unix time')\n if err_nr_viol_runtime == 1:\n print('runtime: One or more time values are negative')\n elif err_nr_viol_runtime == 2:\n print('runtime: One or more values are not float')\n elif err_nr_viol_runtime == 3:\n print('runtime: One or more values are negative and not float')\n\n if err_nr_ts_submit > 0:\n invalid_trace = True\n\n if err_nr_viol_runtime > 0:\n invalid_trace = True\n\n if invalid_trace:\n exit(-1)\n\n # check Workflows\n err_nr_ts_submit = 0\n nr_viol_task_cnt = 0\n task_cnt = 0\n for index, row in workflow_pdf.iterrows():\n # Check ts_submit\n try:\n if int(row['ts_submit']) < -1:\n if err_nr_ts_submit == 0 or err_nr_ts_submit == 1:\n err_nr_ts_submit = 1\n else:\n err_nr_ts_submit = 3\n except ValueError:\n if err_nr_ts_submit == 0 or err_nr_ts_submit == 2:\n err_nr_ts_submit = 2\n else:\n err_nr_ts_submit = 3\n\n # Check taskcount\n if row['task_count'] < 0:\n nr_viol_task_cnt = nr_viol_task_cnt + 1\n else:\n task_cnt = task_cnt + row['task_count']\n # print\n print(\"\\nValidating Workflows ...\")\n self.validate_workflow_fields(workflow_pdf)\n if err_nr_ts_submit == 1:\n print('ts_submit: At least one time value is smaller than -1')\n elif err_nr_ts_submit == 2:\n print('ts_submit: One or more values are not an integer -> use unix time')\n elif err_nr_ts_submit == 3:\n print('ts_submit: One or more values are not set, unknown or negative, and not an integer -> use unix time')\n\n if err_nr_ts_submit > 0:\n invalid_trace = True\n\n if nr_viol_task_cnt > 0:\n print('task_count: {0} negative task counts found.'.format(nr_viol_task_cnt))\n invalid_trace = True\n\n if task_cnt != nr_task_df:\n print('task_count: Tasks count of Workflows ({0}) is not equal to the sum of Tasks ({1})'.format(\n task_cnt, nr_task_df))\n invalid_trace = True\n\n if invalid_trace:\n exit(-1)\n\n\nif __name__ == '__main__':\n pdWorkflowFrame = None\n pdTaskFrame = None\n pdDataTransferFrame = None\n if len(sys.argv) != 2:\n print(USAGE)\n sys.exit(1)\n\n task_df_location = os.path.join(sys.argv[1], \"tasks\", \"schema-1.0\")\n wf_df_location = os.path.join(sys.argv[1], \"workflows\", \"schema-1.0\")\n datatransfer_df_location = os.path.join(sys.argv[1], \"datatransfers\", \"schema-1.0\")\n\n if os.path.exists(datatransfer_df_location):\n pdDataTransferFrame = pd.read_parquet(datatransfer_df_location, engine='pyarrow')\n\n validator = ParquetValidator()\n\n pdWorkflowFrame = pd.read_parquet(wf_df_location, engine='pyarrow')\n pdTaskFrame = pd.read_parquet(task_df_location, engine='pyarrow')\n\n print(\"\\nValidation started (only test fails and warnings are shown) ...\")\n validator.validate_workflows_tasks(pdWorkflowFrame, pdTaskFrame)\n if pdDataTransferFrame is not None:\n validator.validate_datatransfers(pdDataTransferFrame, pdTaskFrame)\n\n generic_information_location = os.path.join(sys.argv[1], \"workload\", \"schema-1.0\", \"generic_information.json\")\n\n workload_fields = [\"total_workflows\", \"total_tasks\", \"domain\", \"date_start\", \"date_end\", \"num_sites\",\n \"num_resources\", \"num_users\", \"num_groups\", \"total_resource_seconds\", \"authors\",\n \"min_resource_task\", \"max_resource_task\", \"std_resource_task\", \"mean_resource_task\",\n \"median_resource_task\", \"first_quartile_resource_task\", \"third_quartile_resource_task\",\n \"cov_resource_task\", \"min_memory\", \"max_memory\", \"std_memory\", \"mean_memory\", \"median_memory\",\n \"first_quartile_memory\", \"third_quartile_memory\", \"cov_memory\", \"min_network_usage\",\n \"max_network_usage\", \"std_network_usage\", \"mean_network_usage\", \"median_network_usage\",\n \"first_quartile_network_usage\", \"third_quartile_network_usage\", \"cov_network_usage\",\n \"min_disk_space_usage\", \"max_disk_space_usage\", \"std_disk_space_usage\", \"mean_disk_space_usage\",\n \"median_disk_space_usage\", \"first_quartile_disk_space_usage\", \"third_quartile_disk_space_usage\",\n \"cov_disk_space_usage\", \"min_energy\", \"max_energy\", \"std_energy\", \"mean_energy\", \"median_energy\",\n \"first_quartile_energy\", \"third_quartile_energy\", \"cov_energy\", \"workload_description\"]\n\n with open(generic_information_location, \"r\") as file:\n wl_data = json.load(file)\n # Check if all field exist\n for field in workload_fields:\n if field not in wl_data:\n print(\"Workload data is missing the {} field\".format(field))\n exit(-1)\n\n print(\"DONE.\")\n"
] | [
[
"numpy.isnan",
"numpy.int64",
"pandas.set_option",
"pandas.isnull"
],
[
"pandas.read_parquet",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
KillerStrike17/PyDeNN | [
"2f0dfaf3e092a4f995ed30e2f8db946e30724551",
"2f0dfaf3e092a4f995ed30e2f8db946e30724551"
] | [
"DeNN/visualization/gradcam.py",
"DeNN/dataset_loader/data_loader.py"
] | [
"import seaborn as sns\nimport matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nimport cv2\nfrom .cam import GradCAM\n\n\n# def load_gradcam(images, labels, model, device, target_layers):\ndef load_gradcam(test, model, device, target_layers,size = 25,classified = True):\n\n _images = []\n _target = []\n _pred = []\n\n # model, device = self.trainer.model, self.trainer.device\n\n # set the model to evaluation mode\n model.eval()\n\n # turn off gradients\n with torch.no_grad():\n for data, target in test:\n # move them to respective device\n data, target = data.to(device), target.to(device)\n\n # do inferencing\n output = model(data)\n # print(\"output:\",output[0])\n # get the predicted output\n pred = output.argmax(dim=1, keepdim=True)\n # print(pred,pred.view_as(target))\n\n # get the current misclassified in this batch\n list_images = (target.eq(pred.view_as(target)) == classified)\n batch_misclassified = data[list_images]\n batch_mis_pred = pred[list_images]\n batch_mis_target = target[list_images]\n\n # batch_misclassified =\n\n _images.append(batch_misclassified)\n _pred.append(batch_mis_pred)\n _target.append(batch_mis_target)\n\n # group all the batched together\n img = torch.cat(_images)\n pred = torch.cat(_pred)\n tar = torch.cat(_target)\n # move the model to device\n\n images = img[:size]\n labels = tar[:size]\n\n model.to(device)\n\n # set the model in evaluation mode\n model.eval()\n\n # get the grad cam\n gcam = GradCAM(model=model, candidate_layers=target_layers)\n\n # images = torch.stack(images).to(device)\n\n # predicted probabilities and class ids\n pred_probs, pred_ids = gcam.forward(images)\n\n # actual class ids\n # target_ids = torch.LongTensor(labels).view(len(images), -1).to(device)\n target_ids = labels.view(len(images), -1).to(device)\n\n # backward pass wrt to the actual ids\n gcam.backward(ids=target_ids)\n\n # we will store the layers and correspondings images activations here\n layers_region = {}\n\n # fetch the grad cam layers of all the images\n for target_layer in target_layers:\n\n # Grad-CAM\n regions = gcam.generate(target_layer=target_layer)\n\n layers_region[target_layer] = regions\n\n # we are done here, remove the hooks\n gcam.remove_hook()\n\n return layers_region, pred_probs, pred_ids,images, labels\n\n\nsns.set()\n# plt.style.use(\"dark_background\")\n\n\ndef plot_gradcam(gcam_layers, images, target_labels, predicted_labels, class_labels, denormalize):\n\n images = images.cpu()\n # convert BCHW to BHWC for plotting stufffff\n images = images.permute(0, 2, 3, 1)\n target_labels = target_labels.cpu()\n\n fig, axs = plt.subplots(nrows=len(images), ncols=len(\n gcam_layers.keys())+1, figsize=((len(gcam_layers.keys()) + 2)*3, len(images)*3))\n fig.suptitle(\"Grad-CAM\", fontsize=16)\n\n for image_idx, image in enumerate(images):\n\n # denormalize the imaeg\n denorm_img = denormalize(image.permute(2, 0, 1)).permute(1, 2, 0)\n\n # axs[image_idx, 0].text(\n # 0.5, 0.5, f'predicted: {class_labels[predicted_labels[image_idx][0] ]}\\nactual: {class_labels[target_labels[image_idx]] }', horizontalalignment='center', verticalalignment='center', fontsize=14, )\n # axs[image_idx, 0].axis('off')\n\n axs[image_idx, 0].imshow(\n (denorm_img.numpy() * 255).astype(np.uint8), interpolation='bilinear')\n axs[image_idx, 0].axis('off')\n\n for layer_idx, layer_name in enumerate(gcam_layers.keys()):\n # gets H X W of the cam layer\n _layer = gcam_layers[layer_name][image_idx].cpu().numpy()[0]\n heatmap = 1 - _layer\n heatmap = np.uint8(255 * heatmap)\n heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n\n superimposed_img = cv2.addWeighted(\n (denorm_img.numpy() * 255).astype(np.uint8), 0.6, heatmap_img, 0.4, 0)\n\n axs[image_idx, layer_idx +\n 1].imshow(superimposed_img, interpolation='bilinear')\n axs[image_idx, layer_idx+1].set_title(f'layer: {layer_name}')\n axs[image_idx, layer_idx+1].axis('off')\n axs[image_idx, 0].set_title(f'Predicted: {class_labels[predicted_labels[image_idx][0] ]}\\nTarget: {class_labels[target_labels[image_idx]] }')\n\n plt.tight_layout()\n plt.subplots_adjust(top=0.95, wspace=0.2, hspace=0.2)\n plt.show()",
"from torch.utils.data import DataLoader, Dataset\nfrom torchvision import datasets\nfrom .util import download_and_extract_archive\nimport os, glob\nfrom PIL import Image\nclass DatasetMnist:\n \"\"\"\n This class loads MNIST dataset with applied transformations\n\n # Functions:\n\n __repr__:\n\n This is a representation function, It returns the printable representation of the object.\n\n __str__:\n\n It returns useful string representation of the object.\n\n __init__:\n\n This is the constructor of DataMnist class< It initializes dataset and applied transformations over it.\n \n load_data:\n\n This function returns the generated datasaet.\n \"\"\"\n def __repr__(self):\n return \"Loading MNIST Dataset\"\n\n def __str__(self):\n return \"Loading MNIST Dataset\"\n\n def __init__(self, data_path:str, *, batch_size = 32, shuffle = True,transformations, sampler=None, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, multiprocessing_context=None, generator=None):\n \"\"\"\n This function initializes the dataset based on the values provided as parameters\n\n # Param:\n\n data_path: Root directory of dataset where MNIST/processed/training.pt and MNIST/processed/testing.pt exist.\n\n batch_size (int, optional): how many samples per batch to load (default: ``1``).\n\n shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: ``False``).\n\n transformations: A function/transform that takes in an PIL image and returns a transformed version. E.g, transforms.RandomCrop \n \n sampler (Sampler, optional): defines the strategy to draw samples from the dataset. If specified, ``shuffle`` must be False.\n \n batch_sampler (Sampler, optional): like sampler, but returns a batch ofindices at a time. Mutually exclusive with :attr:`batch_size`,:attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`.\n \n num_workers (int, optional): how many subprocesses to use for dataloading. 0 means that the data will be loaded in the main process.(default: ``0``)\n \n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n \n pin_memory (bool, optional): If ``True``, the data loader will copy tensors into CUDA pinned memory before returning them. If your data elements are a custom type, or your ``collate_fn`` returns a batch that is a custom type\n see the example below.\n \n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n\n timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: ``0``)\n \n worker_init_fn (callable, optional): If not ``None``, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: ``None``)\n \"\"\"\n self.data_path = data_path\n self.train_set = datasets.MNIST(root = self.data_path,train = True,download = True, transform = transformations.apply_transforms(train = True))\n self.test_set = datasets.MNIST(root = self.data_path,train = False,download = True, transform = transformations.apply_transforms(train = False))\n self.params = {\n 'shuffle': shuffle,\n 'batch_size': batch_size,\n 'sampler': sampler,\n 'batch_sampler':batch_sampler,\n 'collate_fn':collate_fn,\n 'drop_last':drop_last,\n 'timeout':timeout,\n 'worker_init_fn':worker_init_fn,\n 'multiprocessing_context':multiprocessing_context,\n 'generator':generator,\n 'num_workers': num_workers,\n 'pin_memory': pin_memory\n }\n\n def load_data(self):\n \"\"\"\n This function returns the generated dataset\n\n # Returns:\n\n Dataset,Dataset : Generated test and train dataset\n \"\"\"\n return DataLoader(self.train_set,**self.params),DataLoader(self.test_set,**self.params)\n \nclass DatasetCifar10:\n\n def __repr__(self):\n return \"Loading CIFAR 10 Dataset\"\n\n def __str__(self):\n return \"Loading CIFAR 10 Dataset\"\n\n def __init__(self, data_path,transformations,*, batch_size = 32 , shuffle = True, sampler=None, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, multiprocessing_context=None, generator=None):\n \"\"\"\n This function initializes the dataset based on the values provided as parameters\n\n # Param:\n\n data_path: Root directory of dataset where CIFAR10/processed/train.pt and CIFAR10/processed/test.pt exist.\n\n batch_size (int, optional): how many samples per batch to load (default: ``1``).\n\n shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: ``False``).\n\n transformations: A function/transform that takes in an PIL image and returns a transformed version. E.g, transforms.RandomCrop \n \n sampler (Sampler, optional): defines the strategy to draw samples from the dataset. If specified, ``shuffle`` must be False.\n \n batch_sampler (Sampler, optional): like sampler, but returns a batch ofindices at a time. Mutually exclusive with :attr:`batch_size`,:attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`.\n \n num_workers (int, optional): how many subprocesses to use for dataloading. 0 means that the data will be loaded in the main process.(default: ``0``)\n \n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n \n pin_memory (bool, optional): If ``True``, the data loader will copy tensors into CUDA pinned memory before returning them. If your data elements are a custom type, or your ``collate_fn`` returns a batch that is a custom type\n see the example below.\n \n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n\n timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: ``0``)\n \n worker_init_fn (callable, optional): If not ``None``, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: ``None``)\n \"\"\"\n self.data_path = data_path\n self.train_set = datasets.CIFAR10(root = self.data_path,train = True,download = True, transform = transformations.apply_transforms(train = True))\n self.test_set = datasets.CIFAR10(root = self.data_path,train = False,download = True, transform = transformations.apply_transforms(train = False))\n self.params = {\n 'shuffle': shuffle,\n 'batch_size': batch_size,\n 'sampler': sampler,\n 'batch_sampler':batch_sampler,\n 'collate_fn':collate_fn,\n 'drop_last':drop_last,\n 'timeout':timeout,\n 'worker_init_fn':worker_init_fn,\n 'multiprocessing_context':multiprocessing_context,\n 'generator':generator,\n 'num_workers': num_workers,\n 'pin_memory': pin_memory\n }\n\n def load_data(self):\n \"\"\"\n This function returns the generated dataset\n\n # Returns:\n\n Dataset,Dataset : Generated test and train dataset\n \"\"\"\n return DataLoader(self.train_set,**self.params),DataLoader(self.test_set,**self.params)\n\nclass TINYIMAGENET:\n url = 'http://cs231n.stanford.edu/tiny-imagenet-200.zip'\n filename = 'tiny-imagenet-200.zip'\n dataset_folder_name = 'tiny-imagenet-200'\n\n EXTENSION = 'JPEG'\n NUM_IMAGES_PER_CLASS = 500\n CLASS_LIST_FILE = 'wnids.txt'\n VAL_ANNOTATION_FILE = 'val_annotations.txt'\n\n def __init__(self, root, train=True, transform=None, target_transform=None, download=False):\n self.root = root\n self.transform = transform\n self.target_transform = target_transform\n\n if download and (not os.path.isdir(os.path.join(self.root, self.dataset_folder_name))):\n self.download()\n\n self.split_dir = 'train' if train else 'val'\n self.split_dir = os.path.join(\n self.root, self.dataset_folder_name, self.split_dir)\n self.image_paths = sorted(glob.iglob(os.path.join(\n self.split_dir, '**', '*.%s' % self.EXTENSION), recursive=True))\n\n self.target = []\n self.labels = {}\n\n # build class label - number mapping\n with open(os.path.join(self.root, self.dataset_folder_name, self.CLASS_LIST_FILE), 'r') as fp:\n self.label_texts = sorted([text.strip()\n for text in fp.readlines()])\n self.label_text_to_number = {\n text: i for i, text in enumerate(self.label_texts)}\n\n # build labels for NUM_IMAGES_PER_CLASS images\n if train:\n for label_text, i in self.label_text_to_number.items():\n for cnt in range(self.NUM_IMAGES_PER_CLASS):\n self.labels[f'{label_text}_{cnt}.{self.EXTENSION}'] = i\n\n # build the validation dataset\n else:\n with open(os.path.join(self.split_dir, self.VAL_ANNOTATION_FILE), 'r') as fp:\n for line in fp.readlines():\n terms = line.split('\\t')\n file_name, label_text = terms[0], terms[1]\n self.labels[file_name] = self.label_text_to_number[label_text]\n\n self.target = [self.labels[os.path.basename(\n filename)] for filename in self.image_paths]\n\n def download(self):\n download_and_extract_archive(\n self.url, self.root, filename=self.filename)\n\n def __getitem__(self, index):\n filepath = self.image_paths[index]\n img = Image.open(filepath)\n img = img.convert(\"RGB\")\n target = self.target[index]\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n \n def __len__(self):\n return len(self.image_paths)\n\nclass DatasetTinyImageNet:\n\n def __repr__(self):\n return \"Loading TinyImageNet Dataset\"\n\n def __str__(self):\n return \"Loading TinyImageNet Dataset\"\n\n def __init__(self, data_path,transformations,*, batch_size = 32 , shuffle = True, sampler=None, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, multiprocessing_context=None, generator=None):\n \"\"\"\n This function initializes the dataset based on the values provided as parameters\n\n # Param:\n\n data_path: Root directory of dataset where TinyImageNet/processed/train.pt and TinyImageNet/processed/test.pt exist.\n\n batch_size (int, optional): how many samples per batch to load (default: ``1``).\n\n shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: ``False``).\n\n transformations: A function/transform that takes in an PIL image and returns a transformed version. E.g, transforms.RandomCrop \n \n sampler (Sampler, optional): defines the strategy to draw samples from the dataset. If specified, ``shuffle`` must be False.\n \n batch_sampler (Sampler, optional): like sampler, but returns a batch ofindices at a time. Mutually exclusive with :attr:`batch_size`,:attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`.\n \n num_workers (int, optional): how many subprocesses to use for dataloading. 0 means that the data will be loaded in the main process.(default: ``0``)\n \n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n \n pin_memory (bool, optional): If ``True``, the data loader will copy tensors into CUDA pinned memory before returning them. If your data elements are a custom type, or your ``collate_fn`` returns a batch that is a custom type\n see the example below.\n \n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n\n timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: ``0``)\n \n worker_init_fn (callable, optional): If not ``None``, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: ``None``)\n \"\"\"\n self.data_path = data_path\n self.train_set = TINYIMAGENET(root = self.data_path,train = True,download = True, transform = transformations.apply_transforms(train = True))\n self.test_set = TINYIMAGENET(root = self.data_path,train = False,download = True, transform = transformations.apply_transforms(train = False))\n self.params = {\n 'shuffle': shuffle,\n 'batch_size': batch_size,\n 'sampler': sampler,\n 'batch_sampler':batch_sampler,\n 'collate_fn':collate_fn,\n 'drop_last':drop_last,\n 'timeout':timeout,\n 'worker_init_fn':worker_init_fn,\n 'multiprocessing_context':multiprocessing_context,\n 'generator':generator,\n 'num_workers': num_workers,\n 'pin_memory': pin_memory\n }\n\n def load_data(self):\n \"\"\"\n This function returns the generated dataset\n\n # Returns:\n\n Dataset,Dataset : Generated test and train dataset \n \"\"\"\n return DataLoader(self.train_set,**self.params),DataLoader(self.test_set,**self.params)"
] | [
[
"matplotlib.pyplot.tight_layout",
"torch.cat",
"numpy.uint8",
"torch.no_grad",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
],
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jaeckie/covid19-containment-embeddings | [
"e27e63266113231ee399f3a55f76b823d514c6f7"
] | [
"store_data.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 4 15:37:43 2020\n\n@author: moder\n\"\"\"\nimport os\nfrom datetime import datetime\nimport pandas as pd\nimport urllib.request\nfrom bs4 import BeautifulSoup \n\nuser_agent = \"user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)\"\n\ndef scrap_wikipedia_text(url):\n request = urllib.request.Request(url, data=None, headers={'User-Agent' : user_agent})\n html = urllib.request.urlopen(request).read().decode('utf-8') \n soup = BeautifulSoup(html, 'html.parser')\n content_div = soup.find('div', attrs={'id': 'mw-content-text'})\n # remove tables and graphs\n if content_div is not None: \n for s in content_div.select('table'): \n s.extract()\n for s in content_div.select('img'): \n s.extract()\n # remove references\n for s in content_div.select('div.reflist'):\n s.extract()\n print('div.reflist extracted from %s...' % url)\n # iterate all p tags and append to text\n tags = ['h1', 'h2', 'h3', 'li', 'p']\n bodytext = ''\n for con in content_div.find_all(tags):\n bodytext += con.text \n return bodytext \n return None\n\nif __name__ == '__main__':\n print('store data started...') \n # load containment history file from kaggle\n df_contain = pd.read_csv(r'data/COVID 19 Containment measures data.csv')\n \n # cfilter = df_contain['Country'].isin(['Austria', 'Germany', 'Italy', 'Spain', 'Denmark'])\n # df_c = df_contain[cfilter]\n df_c = df_contain\n \n df = df_c[df_c['Source'].notna()]\n df_drop = df.drop_duplicates(subset='Source', keep='last')\n \n wfilter = df_drop['Source'].str.contains('en.wikipedia.org')\n df_red = df_drop[wfilter]\n \n df_res = df_red[['Date Start', 'Country', 'Keywords', 'Source']]\n df_res.to_csv(r'data/covid19-all-countries.csv')\n\n for index, row in df_res.iterrows():\n text = scrap_wikipedia_text(row['Source'])\n time = datetime.now().strftime('%Y%m%d_%H%M%S')\n filename = '%s_%s_covid19-wikipedia.txt' % (time, row['Country'])\n with open(os.path.join('data',filename), 'w', encoding='utf-8') as file:\n file.write(text)\n print('saved file %s ...' % filename)\n file.close()\n # \\[\\d+\\]\n \n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
VIROBO-15/yolov1 | [
"b7824a6cc7e89a6c29ab63f636a236d923fa0a64"
] | [
"loss.py"
] | [
"import torch\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nLAMBDA_COORD = 5\nLAMBDA_NOOBJ = 0.5\n\n\ndef calc_loss(inp , target, opt):\n if inp.size(0) != target.size(0):\n raise Exception(\"Batch size does not match\")\n\n total_loss = torch.tensor(0.0)\n #total_loss = total_loss.dtype(tensor)\n\n for i in range(inp.size(0)):\n inp = inp[i]\n target = target[i]\n Q = predict_one_bbox(inp, target, opt)\n total_loss = total_loss + calc_loss_single(Q, target, opt)\n return total_loss\n\ndef predict_one_bbox(inp, target, opt):\n Q = torch.zeros(opt.S, opt.S, 5 + opt.C)\n\n select = torch.tensor(0).to(device)\n\n for i in range(opt.S):\n for j in range(opt.S):\n for b in range(opt.B):\n if b==0:\n boxes = inp[i, j, b*5 : b*5+5].to(device)\n else:\n boxes = torch.stack((boxes, inp[i, j, b*5 : b*5+5])).to(device)\n\n if len(target[i, j, :].nonzero()) > 1:\n max_iou = torch.tensor([0.]).to(device)\n\n\n groundtruth_box = target[i, j, :4].clone()\n\n for b in range(opt.B):\n iou = calc_IOU(groundtruth_box, boxes[b][:-1], device)\n\n if iou > max_iou:\n max_iou = iou\n select = torch.tensor(b).to(device)\n\n else:\n max_confidence = torch.tensor(0.).to(device)\n\n for b in range(opt.B):\n confidence = boxes[b][-1]\n\n if confidence > max_confidence:\n max_confidence = confidence\n select = torch.tensor(b).to(device)\n\n Q[i, j, :5] = boxes[select]\n Q[i, j, 5:] = inp[i, j, -opt.C:]\n return Q\n\ndef calc_loss_single(inp, target, opt):\n\n loss = torch.zeros(1)\n for i in range(opt.S):\n for j in range(opt.S):\n # case 1: grid cell HAS object\n if len(target[i, j, :].nonzero()) > 1:\n # localization\n loss = loss + LAMBDA_COORD * (torch.pow(inp[i, j, 0] - target[i, j, 0], 2) + torch.pow(inp[i, j, 1] - target[i, j, 1], 2))\n\n loss = loss + LAMBDA_COORD * (torch.pow(torch.sqrt(torch.abs(inp[i, j, 2])) - torch.sqrt(torch.abs(target[i, j,2])), 2) \\\n + torch.pow(torch.sqrt(torch.abs(inp[i, j, 3])) - torch.sqrt(torch.abs(target[i, j, 3])), 2)) # org\n # loss = loss + LAMBDA_COORD * (torch.sqrt(torch.abs(P[i, j, 2] - G[i, j, 2])) +\n # torch.sqrt(torch.abs(P[i, j, 3] - G[i, j, 3]))) # ZZ\n\n loss = loss + torch.pow(inp[i, j, 4]-1, 2) # Ground truth confidence is constant 1\n # classification\n true_cls = target[i, j, -1].type(torch.int64)\n true_cls_vec = torch.zeros(opt.C)\n true_cls_vec[true_cls] = torch.tensor(1)\n pred_cls_vec = inp[i, j, -opt.C:]\n loss = loss + torch.sum(torch.pow(pred_cls_vec - true_cls_vec, 2))\n\n # case 2: grid cell NO object\n # classification\n else:\n loss = loss + LAMBDA_NOOBJ * torch.pow(inp[i, j, 4] - 0, 2) # Ground truth confidence is constant 0\n\n return loss\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef calc_IOU(box_1, box_2, device=torch.device('cpu'), use_float64=False):\n \"\"\"\n Tensor version of calc_IOU()\n compute IOU between two bounding boxes\n :param box_1: Detection x, y, w, h image coordinates in [0, 1]\n :param box_2: GroundTruth x, y, w, h image coordinates in [0, 1]\n :return:\n \"\"\"\n '''\n x_min_1 = torch.clamp((box_1[0] - box_1[2] / 2), 0, 1).to(device)\n x_max_1 = torch.clamp((box_1[0] + box_1[2] / 2), 0, 1).to(device)\n y_min_1 = torch.clamp((box_1[1] - box_1[3] / 2), 0, 1).to(device)\n y_max_1 = torch.clamp((box_1[1] + box_1[3] / 2), 0, 1).to(device)\n '''\n\n x_min_1 = torch.clamp((abs(box_1[0]) - abs(box_1[2]) / 2), 0, 1).to(device)\n x_max_1 = torch.clamp((abs(box_1[0]) + abs(box_1[2]) / 2), 0, 1).to(device)\n y_min_1 = torch.clamp((abs(box_1[1]) - abs(box_1[3]) / 2), 0, 1).to(device)\n y_max_1 = torch.clamp((abs(box_1[1]) + abs(box_1[3]) / 2), 0, 1).to(device)\n\n x_min_2 = torch.clamp((box_2[0] - box_2[2] / 2), 0, 1).to(device)\n x_max_2 = torch.clamp((box_2[0] + box_2[2] / 2), 0, 1).to(device)\n y_min_2 = torch.clamp((box_2[1] - box_2[3] / 2), 0, 1).to(device)\n y_max_2 = torch.clamp((box_2[1] + box_2[3] / 2), 0, 1).to(device)\n\n\n # z = torch.tensor(0, dtype=torch.float).to(device)\n z = torch.tensor(0.).to(device)\n\n a = torch.min(x_max_1, x_max_2)\n b = torch.max(x_min_1, x_min_2)\n c = torch.min(y_max_1, y_max_2)\n d = torch.max(y_min_1, y_min_2)\n\n overlap_width = torch.max(a-b, z)\n overlap_height = torch.max(c-d, z)\n overlap_area = overlap_width * overlap_height\n\n union_area = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) \\\n + (x_max_2 - x_min_2) * (y_max_2 - y_min_2) \\\n - overlap_area\n intersection_over_union = overlap_area / union_area\n return intersection_over_union\n\n\n"
] | [
[
"torch.abs",
"torch.max",
"torch.zeros",
"torch.min",
"torch.tensor",
"torch.cuda.is_available",
"torch.device",
"torch.clamp",
"torch.pow",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EQt/graphidx | [
"9716488cf29f6235072fc920fa1a473bf88e954f"
] | [
"python/test/test_biadjacent.py"
] | [
"import numpy as np\nfrom graphidx.idx import BiAdjacent\n\n\ndef square():\n head = np.array([0, 0, 1, 2])\n tail = np.array([1, 2, 3, 3])\n return BiAdjacent(head, tail)\n\n\ndef test_sqare():\n neigh = square()\n assert repr(neigh) == \"BiAdjacent[m = 4, n = 4]\"\n assert set(neigh[0]) == {1, 2}\n assert set(neigh[1]) == {0, 3}\n assert set(neigh[2]) == {0, 3}\n assert set(neigh[3]) == {1, 2}\n\n\ndef test_1():\n head = np.array([0, 1, 2, 3], dtype=np.int32)\n tail = np.array([1, 3, 1, 2], dtype=np.int32)\n index = BiAdjacent(head, tail)\n assert repr(index) == \"BiAdjacent[m = 4, n = 4]\"\n\n i2 = index[2]\n assert len(i2) == 2\n\n assert list(i2) == [1, 3]\n assert list(index[0]) == [1]\n assert list(index[1]) == [0, 3, 2]\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tza0035/RMG-Py | [
"38c49f7107d1b19e4a534408a1040ddd313b8596",
"38c49f7107d1b19e4a534408a1040ddd313b8596",
"38c49f7107d1b19e4a534408a1040ddd313b8596"
] | [
"arkane/encorr/ae.py",
"rmgpy/thermo/nasaTest.py",
"arkane/statmechTest.py"
] | [
"#!/usr/bin/env python3\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2021 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\n\"\"\"\nThis module provides classes for fitting atom energies based on a very\nsmall, predetermined set of molecules.\n\"\"\"\n\nimport importlib\nimport json\nimport logging\nfrom collections import Counter\nfrom typing import Dict, Hashable, List, Union\n\nimport numpy as np\nfrom scipy.stats import distributions\n\nfrom rmgpy import constants\nfrom rmgpy.molecule import get_element, Molecule\n\nimport arkane.encorr.data as data\nfrom arkane.encorr.reference import ReferenceDatabase\nfrom arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory\n\n# List of species labels that will be used for fitting (labels should match reference database)\nSPECIES_LABELS = [\n 'Dihydrogen',\n 'Dinitrogen',\n 'Dioxygen',\n 'Disulfur',\n 'Difluorine',\n 'Dichlorine',\n 'Dibromine',\n 'Hydrogen fluoride',\n 'Hydrogen chloride',\n 'Hydrogen bromide',\n 'Hydrogen sulfide',\n 'Water',\n 'Methane',\n 'Methyl',\n 'Ammonia',\n 'Chloromethane'\n]\n\n\nclass AEJob:\n \"\"\"\n A job for fitting atom energies.\n \"\"\"\n\n def __init__(self,\n species_energies: Dict[str, float],\n level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory] = None,\n write_to_database: bool = False,\n overwrite: bool = False):\n \"\"\"\n Initialize an AEJob instance.\n\n Notes:\n The species energies should be provided as a dictionary\n containing the species labels as keys and their single-\n point electronic energies in Hartree as values. The\n energies should be calculated using the experimental\n geometry provided for the species in the reference\n database, and the zero-point energy should not be included\n in the electronic energy.\n\n Args:\n species_energies: Dictionary of species labels with single-point electronic energies (Hartree).\n level_of_theory: Dictionary key for saving atom energies to the database.\n write_to_database: Save the fitted atom energies directly to the RMG database.\n overwrite: Overwrite atom energies in the RMG database if they already exist.\n \"\"\"\n self.spcs_energies = species_energies\n self.level_of_theory = level_of_theory\n self.write_to_database = write_to_database\n self.overwrite = overwrite\n self.ae = AE(species_energies)\n\n def execute(self, output_file: str = None):\n \"\"\"\n Execute the atom energy job.\n\n Args:\n output_file: Write the fitted energies to this file.\n \"\"\"\n if self.level_of_theory is None:\n logging.info('Fitting atom energies')\n else:\n logging.info(f'Fitting atom energies for {self.level_of_theory}')\n self.ae.fit()\n\n if output_file is not None:\n with open(output_file, 'a') as f:\n if self.level_of_theory is not None:\n f.write(f'# {self.level_of_theory}\\n')\n for element, energy in self.ae.atom_energies.items():\n f.write(f'# {element:2}: {energy:15.8f} +/- {self.ae.confidence_intervals[element]:.8f} Hartree\\n')\n f.writelines(self.ae.format_atom_energies(\n 'atom_energies' if self.level_of_theory is None else self.level_of_theory))\n\n if self.write_to_database:\n if self.level_of_theory is None:\n raise Exception('Level of theory is required for writing to database')\n try:\n self.ae.write_to_database(self.level_of_theory, overwrite=self.overwrite)\n except ValueError as e:\n logging.warning('Could not write atom energies to database. Captured error:')\n logging.warning(str(e))\n\n\nclass AE:\n \"\"\"\n A class for fitting atom energies.\n \"\"\"\n\n ref_data_src = 'CCCBDB' # Use CCCBDB data\n ref_data = None # Dictionary of reference data entries\n\n def __init__(self, species_energies: Dict[str, float]):\n self.species_energies = species_energies # Hartree\n self.atom_energies = None\n self.confidence_intervals = None\n\n for lbl in SPECIES_LABELS:\n if lbl not in self.species_energies:\n logging.warning(f'{lbl} missing from provided species energies!')\n\n @classmethod\n def _load_refdata(cls):\n if cls.ref_data is None:\n logging.info('Loading reference database')\n db = ReferenceDatabase()\n db.load()\n cls.ref_data = {lbl: spc for lbl, spc in zip(SPECIES_LABELS, db.get_species_from_label(SPECIES_LABELS))}\n\n def fit(self):\n \"\"\"\n Fit atom energies using the provided species energies and\n corresponding atomization energies from the reference data.\n \"\"\"\n self._load_refdata()\n\n mols = [\n Molecule().from_adjacency_list(\n self.ref_data[lbl].adjacency_list,\n raise_atomtype_exception=False,\n raise_charge_exception=False\n ) for lbl in self.species_energies\n ]\n atom_counts = [Counter(atom.element.symbol for atom in mol.atoms) for mol in mols]\n elements = sorted({element for ac in atom_counts for element in ac}, key=lambda s: get_element(s).number)\n x = np.array([[ac[element] for element in elements] for ac in atom_counts]) # Nmols x Nelements\n\n atomization_energies = np.array([\n self.ref_data[lbl].reference_data[self.ref_data_src].atomization_energy.value_si\n / constants.E_h / constants.Na for lbl in self.species_energies\n ])\n zpes = np.array([\n self.ref_data[lbl].reference_data[self.ref_data_src].zpe.value_si\n / constants.E_h / constants.Na for lbl in self.species_energies\n ])\n elec_energies = np.array(list(self.species_energies.values())) # Should already be in Hartree\n y = atomization_energies + elec_energies + zpes\n\n w = np.linalg.solve(x.T @ x, x.T @ y)\n self.atom_energies = dict(zip(elements, w))\n\n # Get confidence intervals\n n = len(y) # Ndata\n k = len(w) # Nparam\n ypred = x @ w\n sigma2 = np.sum((y - ypred)**2) / (n - k - 1) # MSE\n cov = sigma2 * np.linalg.inv(x.T @ x) # covariance matrix\n se = np.sqrt(np.diag(cov)) # standard error\n alpha = 0.05 # 95% confidence level\n tdist = distributions.t.ppf(1 - alpha/2, n - k - 1) # student-t\n ci = tdist * se # confidence interval half-width\n self.confidence_intervals = dict(zip(elements, ci)) # Parameter estimates are w +/- ci\n\n def write_to_database(self, key: Hashable, overwrite: bool = False, alternate_path: str = None):\n \"\"\"\n Write atom energies to database.\n\n Args:\n key: Dictionary key to use for atom energies in database.\n overwrite: Overwrite existing atom energies.\n alternate_path: Write atom energies and existing database to this path instead.\n \"\"\"\n if self.atom_energies is None:\n raise ValueError('No atom energies available for writing')\n\n data_path = data.quantum_corrections_path\n with open(data_path) as f:\n lines = f.readlines()\n\n ae_formatted = self.format_atom_energies(key, indent=True)\n\n # Add new atom energies to file without changing existing formatting\n for i, line in enumerate(lines):\n if 'atom_energies' in line:\n if key in data.atom_energies:\n if overwrite:\n # Does not overwrite comments\n del_idx_start = del_idx_end = None\n for j, line2 in enumerate(lines[i:]):\n if repr(key) in line2:\n del_idx_start = i + j\n del_idx_end = None\n elif line2.rstrip() == ' },': # Can't have a comment after final brace\n del_idx_end = i + j + 1\n if del_idx_start is not None and del_idx_end is not None:\n if (lines[del_idx_start - 1].lstrip().startswith('#')\n or lines[del_idx_end + 1].lstrip().startswith('#')):\n logging.warning('There may be left over comments from previous atom energies')\n lines[del_idx_start:del_idx_end] = ae_formatted\n break\n else:\n raise ValueError(f'{key} already exists. Set `overwrite` to True.')\n else:\n lines[(i+1):(i+1)] = ['\\n'] + ae_formatted\n break\n\n with open(data_path if alternate_path is None else alternate_path, 'w') as f:\n f.writelines(lines)\n\n # Reload data to update atom energy dictionary\n if alternate_path is None:\n importlib.reload(data)\n\n def format_atom_energies(self, key: Hashable, indent: bool = False) -> List[str]:\n \"\"\"\n Obtain a list of nicely formatted atom energies suitable for\n writelines.\n\n Args:\n key: Dictionary key to use for formatting dictionary.\n indent: Indent each line.\n\n Returns:\n Formatted list of atom energies.\n \"\"\"\n ae_formatted = json.dumps(self.atom_energies, indent=4).replace('\"', \"'\").split('\\n')\n ae_formatted[0] = f'\"{key}\": ' + ae_formatted[0]\n ae_formatted[-1] += ','\n ae_formatted = [e + '\\n' for e in ae_formatted]\n if indent:\n ae_formatted = [' ' + e for e in ae_formatted]\n return ae_formatted\n",
"#!/usr/bin/env python3\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2021 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\n\"\"\"\nThis script contains unit tests of the :mod:`rmgpy.thermo.nasa` module.\n\"\"\"\n\nimport os.path\nimport unittest\n\nimport numpy as np\n\nimport rmgpy.constants as constants\nfrom rmgpy.quantity import ScalarQuantity\nfrom rmgpy.thermo.nasa import NASA, NASAPolynomial\n\n\n################################################################################\n\n\nclass TestNASA(unittest.TestCase):\n \"\"\"\n Contains unit tests of the MultiNASA class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n A function run before each unit test in this class.\n \"\"\"\n self.coeffs_low = [4.03055, -0.00214171, 4.90611e-05, -5.99027e-08, 2.38945e-11, -11257.6, 3.5613]\n self.coeffs_high = [-0.307954, 0.0245269, -1.2413e-05, 3.07724e-09, -3.01467e-13, -10693, 22.628]\n self.Tmin = 300.\n self.Tmax = 3000.\n self.Tint = 650.73\n self.E0 = -782292. # J/mol.\n self.comment = \"C2H6\"\n self.nasa = NASA(\n polynomials=[\n NASAPolynomial(coeffs=self.coeffs_low, Tmin=(self.Tmin, \"K\"), Tmax=(self.Tint, \"K\")),\n NASAPolynomial(coeffs=self.coeffs_high, Tmin=(self.Tint, \"K\"), Tmax=(self.Tmax, \"K\")),\n ],\n Tmin=(self.Tmin, \"K\"),\n Tmax=(self.Tmax, \"K\"),\n E0=(self.E0, \"J/mol\"),\n comment=self.comment,\n )\n\n def tearDown(self):\n \"\"\"\n Reset the database & liquid parameters for solution\n \"\"\"\n import rmgpy.data.rmg\n rmgpy.data.rmg.database = None\n\n def test_poly_low(self):\n \"\"\"\n Test that the NASA low-temperature polynomial was properly set.\n \"\"\"\n self.assertEqual(len(self.nasa.poly1.coeffs), len(self.coeffs_low))\n for coeff0, coeff in zip(self.nasa.poly1.coeffs, self.coeffs_low):\n self.assertAlmostEqual(coeff / coeff0, 1.0, 6)\n self.assertEqual(self.nasa.poly1.Tmin.value_si, self.Tmin)\n self.assertEqual(self.nasa.poly1.Tmax.value_si, self.Tint)\n\n def test_poly_high(self):\n \"\"\"\n Test that the NASA high-temperature polynomial was properly set.\n \"\"\"\n self.assertEqual(len(self.nasa.poly2.coeffs), len(self.coeffs_high))\n for coeff0, coeff in zip(self.nasa.poly2.coeffs, self.coeffs_high):\n self.assertAlmostEqual(coeff / coeff0, 1.0, 6)\n self.assertEqual(self.nasa.poly2.Tmin.value_si, self.Tint)\n self.assertEqual(self.nasa.poly2.Tmax.value_si, self.Tmax)\n\n def test_temperature_min(self):\n \"\"\"\n Test that the NASA Tmin property was properly set.\n \"\"\"\n self.assertAlmostEqual(self.nasa.Tmin.value_si / self.Tmin, 1.0, 6,\n '{0} != {1} within 6 places'.format(self.nasa.Tmin, self.Tmin))\n\n def test_temperature_max(self):\n \"\"\"\n Test that the NASA Tmax property was properly set.\n \"\"\"\n self.assertAlmostEqual(self.nasa.Tmax.value_si / self.Tmax, 1.0, 6,\n '{0} != {1} within 6 places'.format(self.nasa.Tmax, self.Tmax))\n\n def test_e0(self):\n \"\"\"\n Test that the NASA E0 property was properly set.\n \"\"\"\n self.assertAlmostEqual(self.nasa.E0.value_si / self.E0, 1.0, 6,\n '{0} != {1} within 6 places'.format(self.nasa.Tmax, self.Tmax))\n\n def test_comment(self):\n \"\"\"\n Test that the NASA comment property was properly set.\n \"\"\"\n self.assertEqual(self.nasa.comment, self.comment)\n\n def test_is_temperature_valid(self):\n \"\"\"\n Test the NASA.is_temperature_valid() method.\n \"\"\"\n Tdata = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]\n valid_data = [False, True, True, True, True, True, True, True, True, True]\n for T, valid in zip(Tdata, valid_data):\n valid0 = self.nasa.is_temperature_valid(T)\n self.assertEqual(valid0, valid)\n\n def test_get_heat_capacity(self):\n \"\"\"\n Test the NASA.get_heat_capacity() method.\n \"\"\"\n Tlist = np.array([400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])\n cp_exp_list = np.array([7.80157, 10.5653, 12.8213, 14.5817, 15.9420,\n 16.9861, 17.78645, 18.4041, 18.8883]) * constants.R\n for T, cp_exp in zip(Tlist, cp_exp_list):\n cp_act = self.nasa.get_heat_capacity(T)\n self.assertAlmostEqual(cp_exp / cp_act, 1.0, 4, '{0} != {1}'.format(cp_exp, cp_act))\n\n def test_get_enthalpy(self):\n \"\"\"\n Test the NASA.get_enthalpy() method.\n \"\"\"\n Tlist = np.array([400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])\n h_exp_list = np.array([-22.7613, -12.1027, -6.14236, -2.16615, 0.743456,\n 2.99256, 4.79397, 6.27334, 7.51156]) * constants.R * Tlist\n for T, h_exp in zip(Tlist, h_exp_list):\n h_act = self.nasa.get_enthalpy(T)\n self.assertAlmostEqual(h_exp / h_act, 1.0, 3, '{0} != {1}'.format(h_exp, h_act))\n\n def test_get_entropy(self):\n \"\"\"\n Test the NASA.get_entropy() method.\n \"\"\"\n Tlist = np.array([400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])\n s_exp_list = np.array([29.6534, 33.3516, 36.7131, 39.7715, 42.5557,\n 45.0952, 47.4179, 49.5501, 51.5152]) * constants.R\n for T, s_exp in zip(Tlist, s_exp_list):\n s_act = self.nasa.get_entropy(T)\n self.assertAlmostEqual(s_exp / s_act, 1.0, 4, '{0} != {1}'.format(s_exp, s_act))\n\n def test_get_free_energy(self):\n \"\"\"\n Test the NASA.get_free_energy() method.\n \"\"\"\n Tlist = np.array([400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])\n for T in Tlist:\n g_exp = self.nasa.get_enthalpy(T) - T * self.nasa.get_entropy(T)\n g_act = self.nasa.get_free_energy(T)\n self.assertAlmostEqual(g_exp / g_act, 1.0, 4, '{0} != {1}'.format(g_exp, g_act))\n\n def test_pickle(self):\n \"\"\"\n Test that a NASA object can be pickled and unpickled with no loss of\n information.\n \"\"\"\n import pickle\n nasa = pickle.loads(pickle.dumps(self.nasa))\n self.assertEqual(len(self.nasa.poly1.coeffs), len(nasa.poly1.coeffs))\n for coeff0, coeff in zip(self.nasa.poly1.coeffs, nasa.poly1.coeffs):\n self.assertAlmostEqual(coeff / coeff0, 1.0, 6)\n self.assertEqual(self.nasa.poly1.Tmin.value, nasa.poly1.Tmin.value)\n self.assertEqual(self.nasa.poly1.Tmin.units, nasa.poly1.Tmin.units)\n self.assertEqual(self.nasa.poly1.Tmax.value, nasa.poly1.Tmax.value)\n self.assertEqual(self.nasa.poly1.Tmax.units, nasa.poly1.Tmax.units)\n self.assertEqual(self.nasa.poly1.comment, nasa.poly1.comment)\n self.assertEqual(len(self.nasa.poly2.coeffs), len(nasa.poly2.coeffs))\n for coeff0, coeff in zip(self.nasa.poly2.coeffs, nasa.poly2.coeffs):\n self.assertAlmostEqual(coeff / coeff0, 1.0, 6)\n self.assertEqual(self.nasa.poly2.Tmin.value, nasa.poly2.Tmin.value)\n self.assertEqual(self.nasa.poly2.Tmin.units, nasa.poly2.Tmin.units)\n self.assertEqual(self.nasa.poly2.Tmax.value, nasa.poly2.Tmax.value)\n self.assertEqual(self.nasa.poly2.Tmax.units, nasa.poly2.Tmax.units)\n self.assertEqual(self.nasa.poly2.comment, nasa.poly2.comment)\n self.assertEqual(self.nasa.Tmin.value, nasa.Tmin.value)\n self.assertEqual(self.nasa.Tmin.units, nasa.Tmin.units)\n self.assertEqual(self.nasa.Tmax.value, nasa.Tmax.value)\n self.assertEqual(self.nasa.Tmax.units, nasa.Tmax.units)\n self.assertEqual(self.nasa.E0.value, nasa.E0.value)\n self.assertEqual(self.nasa.E0.units, nasa.E0.units)\n self.assertEqual(self.nasa.comment, nasa.comment)\n\n def test_repr(self):\n \"\"\"\n Test that a NASA object can be reconstructed from its repr() output\n with no loss of information.\n \"\"\"\n namespace = {}\n exec('nasa = {0!r}'.format(self.nasa), globals(), namespace)\n self.assertIn('nasa', namespace)\n nasa = namespace['nasa']\n self.assertEqual(len(self.nasa.poly1.coeffs), len(nasa.poly1.coeffs))\n for coeff0, coeff in zip(self.nasa.poly1.coeffs, nasa.poly1.coeffs):\n self.assertAlmostEqual(coeff / coeff0, 1.0, 6)\n self.assertEqual(self.nasa.poly1.Tmin.value, nasa.poly1.Tmin.value)\n self.assertEqual(self.nasa.poly1.Tmin.units, nasa.poly1.Tmin.units)\n self.assertEqual(self.nasa.poly1.Tmax.value, nasa.poly1.Tmax.value)\n self.assertEqual(self.nasa.poly1.Tmax.units, nasa.poly1.Tmax.units)\n self.assertEqual(self.nasa.poly1.comment, nasa.poly1.comment)\n self.assertEqual(len(self.nasa.poly2.coeffs), len(nasa.poly2.coeffs))\n for coeff0, coeff in zip(self.nasa.poly2.coeffs, nasa.poly2.coeffs):\n self.assertAlmostEqual(coeff / coeff0, 1.0, 6)\n self.assertEqual(self.nasa.poly2.Tmin.value, nasa.poly2.Tmin.value)\n self.assertEqual(self.nasa.poly2.Tmin.units, nasa.poly2.Tmin.units)\n self.assertEqual(self.nasa.poly2.Tmax.value, nasa.poly2.Tmax.value)\n self.assertEqual(self.nasa.poly2.Tmax.units, nasa.poly2.Tmax.units)\n self.assertEqual(self.nasa.poly2.comment, nasa.poly2.comment)\n self.assertEqual(self.nasa.Tmin.value, nasa.Tmin.value)\n self.assertEqual(self.nasa.Tmin.units, nasa.Tmin.units)\n self.assertEqual(self.nasa.Tmax.value, nasa.Tmax.value)\n self.assertEqual(self.nasa.Tmax.units, nasa.Tmax.units)\n self.assertEqual(self.nasa.E0.value, nasa.E0.value)\n self.assertEqual(self.nasa.E0.units, nasa.E0.units)\n self.assertEqual(self.nasa.comment, nasa.comment)\n\n def test_to_cantera(self):\n \"\"\"\n Test that conversion to a Cantera NasaPoly2 object works\n \"\"\"\n nasapoly2 = self.nasa.to_cantera()\n # NasaPoly2 units use J/kmol rather than J/mol\n self.assertAlmostEqual(self.nasa.get_enthalpy(900), nasapoly2.h(900) / 1000, 1)\n self.assertAlmostEqual(self.nasa.get_entropy(700), nasapoly2.s(700) / 1000, 1)\n\n def test_to_nasa(self):\n \"\"\"\n Test if the entropy computed from other thermo implementations is close to what NASA computes.\n \"\"\"\n\n from rmgpy import settings\n from rmgpy.data.rmg import RMGDatabase\n from rmgpy.species import Species\n\n # Load databases\n database = RMGDatabase()\n database.load_thermo(os.path.join(settings['database.directory'], 'thermo'), thermo_libraries=['Narayanaswamy'])\n database.load_solvation(os.path.join(settings['database.directory'], 'solvation'))\n\n spc = Species().from_smiles('CC')\n spc.get_thermo_data()\n\n T = 1350. # not 298K!\n\n # nasa to thermodata\n nasa = spc.thermo\n s_nasa = nasa.get_entropy(T)\n\n td = nasa.to_thermo_data()\n s_td = td.get_entropy(T)\n\n self.assertAlmostEqual(s_nasa, s_td, -1)\n self.assertEqual(td.comment, nasa.comment)\n\n # thermodata to nasa\n nasa = td.to_nasa(Tmin=100.0, Tmax=5000.0, Tint=1000.0)\n s_nasa = nasa.get_entropy(T)\n\n self.assertAlmostEqual(s_nasa, s_td, -1)\n self.assertEqual(td.comment, nasa.comment)\n\n # wilhoit to nasa\n wilhoit = nasa.to_wilhoit()\n nasa = wilhoit.to_nasa(Tmin=100.0, Tmax=5000.0, Tint=1000.0)\n s_nasa = nasa.get_entropy(T)\n\n self.assertAlmostEqual(s_nasa, s_td, -1)\n self.assertEqual(wilhoit.comment, nasa.comment)\n\n # nasa to wilhoi performed in wilhoitTest\n\n def test_nasa_as_dict_full(self):\n \"\"\"\n Test that NASA.as_dict functions properly with all attributes\n \"\"\"\n nasa_dict = self.nasa.as_dict()\n self.assertEqual(nasa_dict['E0']['value'], self.E0)\n self.assertEqual(nasa_dict['Tmin']['value'], self.Tmin)\n self.assertEqual(nasa_dict['Tmax']['value'], self.Tmax)\n self.assertEqual(nasa_dict['comment'], self.comment)\n self.assertTupleEqual(tuple(nasa_dict['polynomials']['polynomial1']['coeffs']['object']),\n tuple(self.coeffs_low))\n self.assertTupleEqual(tuple(nasa_dict['polynomials']['polynomial2']['coeffs']['object']),\n tuple(self.coeffs_high))\n self.assertEqual(nasa_dict['polynomials']['polynomial1']['Tmin']['value'], self.Tmin)\n self.assertEqual(nasa_dict['polynomials']['polynomial1']['Tmax']['value'], self.Tint)\n self.assertEqual(nasa_dict['polynomials']['polynomial2']['Tmin']['value'], self.Tint)\n self.assertEqual(nasa_dict['polynomials']['polynomial2']['Tmax']['value'], self.Tmax)\n\n def test_nasa_as_dict_minimal(self):\n \"\"\"\n Test that NASA.as_dict does not contain empty, optional attributes\n \"\"\"\n nasa_dict = NASA().as_dict()\n keys = list(nasa_dict.keys())\n self.assertNotIn('Tmin', keys)\n self.assertNotIn('Tmax', keys)\n self.assertNotIn('E0', keys)\n self.assertNotIn('Cp0', keys)\n self.assertNotIn('CpInf', keys)\n self.assertNotIn('label', keys)\n self.assertNotIn('comment', keys)\n\n def test_nasa_polynomial_as_dict(self):\n \"\"\"\n Test that NASAPolynomial.as_dict returns all non-empty, non-redundant attributes properly.\n \"\"\"\n nasa_poly_dict = self.nasa.polynomials[0].as_dict()\n self.assertEqual(nasa_poly_dict, {'coeffs': {'object': [4.03055, -0.00214171, 4.90611e-05, -5.99027e-08,\n 2.38945e-11, -11257.6, 3.5613],\n 'class': 'np_array'},\n 'Tmax': {'units': 'K', 'class': 'ScalarQuantity', 'value': 650.73},\n 'Tmin': {'units': 'K', 'class': 'ScalarQuantity', 'value': 300.0},\n 'class': 'NASAPolynomial'}\n )\n\n def test_make_nasa(self):\n \"\"\"\n Test that a NASA object can be reconstructed from a dictionary (also test NASAPolynomial by extension)\n \"\"\"\n nasa_dict = self.nasa.as_dict()\n new_nasa = NASA.__new__(NASA)\n class_dictionary = {'ScalarQuantity': ScalarQuantity,\n 'np_array': np.array,\n 'NASA': NASA,\n 'NASAPolynomial': NASAPolynomial,\n }\n\n new_nasa.make_object(nasa_dict, class_dictionary)\n\n\n################################################################################\n\n\nif __name__ == '__main__':\n unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))\n",
"#!/usr/bin/env python3\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2021 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\n\"\"\"\nThis module contains unit tests of the :mod:`arkane.statmech` module.\n\"\"\"\n\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom rmgpy.species import Species\nfrom rmgpy.exceptions import InputError\n\nfrom arkane import Arkane\nfrom arkane.ess.qchem import QChemLog\nfrom arkane.modelchem import LevelOfTheory\nfrom arkane.statmech import StatMechJob, determine_rotor_symmetry, is_linear\n\n################################################################################\n\n\nclass TestStatmech(unittest.TestCase):\n \"\"\"\n Contains unit tests of the StatmechJob class.\n \"\"\"\n\n @classmethod\n def setUp(cls):\n \"\"\"A method that is run before each unit test in this class\"\"\"\n arkane = Arkane()\n cls.job_list = arkane.load_input_file(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'data', 'Benzyl', 'input.py'))\n\n def test_gaussian_log_file_error(self):\n \"\"\"Test that the proper error is raised if gaussian geometry and frequency file paths are not the same\"\"\"\n job = self.job_list[-2]\n self.assertTrue(isinstance(job, StatMechJob))\n with self.assertRaises(InputError):\n job.load()\n\n def test_rotor_symmetry_determination(self):\n \"\"\"\n Test that the correct symmetry number is determined for rotor potential scans.\n \"\"\"\n path1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'NCC_NRotor.out')\n path2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'NCC_CRotor.out')\n scan_log1 = QChemLog(path1)\n scan_log2 = QChemLog(path2)\n v_list1, angle = scan_log1.load_scan_energies()\n v_list2, angle = scan_log2.load_scan_energies()\n symmetry1 = determine_rotor_symmetry(energies=v_list1, label='NCC', pivots=[])\n symmetry2 = determine_rotor_symmetry(energies=v_list2, label='NCC', pivots=[])\n self.assertEqual(symmetry1, 1)\n self.assertEqual(symmetry2, 3)\n\n def test_is_linear(self):\n \"\"\"Test that we can determine the linearity of a molecule from it's coordinates\"\"\"\n xyz1 = np.array([\n [0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.159076],\n [0.000000, 0.000000, -1.159076]]) # a trivial case\n xyz2 = np.array([\n [-0.06618943, -0.12360663, -0.07631983],\n [-0.79539707, 0.86755487, 1.02675668],\n [-0.68919931, 0.25421823, -1.34830853],\n [0.01546439, -1.54297548, 0.44580391],\n [1.94428095, 0.40772394, 1.03719428],\n [2.20318015, -0.14715186, -0.64755729],\n [1.59252246, 1.51178950, -0.33908352],\n [-0.87856890, -2.02453514, 0.38494433],\n [-1.34135876, 1.49608206, 0.53295071]]) # a non-linear multi-atom molecule\n xyz3 = np.array([\n [0.0000000000, 0.0000000000, 0.3146069129],\n [-1.0906813653, 0.0000000000, -0.1376405244],\n [1.0906813653, 0.0000000000, -0.1376405244]]) # NO2, a non-linear 3-atom molecule\n xyz4 = np.array([\n [0.0000000000, 0.0000000000, 0.1413439534],\n [-0.8031792912, 0.0000000000, -0.4947038368],\n [0.8031792912, 0.0000000000, -0.4947038368]]) # NH2, a non-linear 3-atom molecule\n xyz5 = np.array([\n [-0.5417345330, 0.8208150346, 0.0000000000],\n [0.9206183692, 1.6432038228, 0.0000000000],\n [-1.2739176462, 1.9692549926, 0.0000000000]]) # HSO, a non-linear 3-atom molecule\n xyz6 = np.array([\n [1.18784533, 0.98526702, 0.00000000],\n [0.04124533, 0.98526702, 0.00000000],\n [-1.02875467, 0.98526702, 0.00000000]]) # HCN, a linear 3-atom molecule\n xyz7 = np.array([\n [-4.02394116, 0.56169428, 0.00000000],\n [-5.09394116, 0.56169428, 0.00000000],\n [-2.82274116, 0.56169428, 0.00000000],\n [-1.75274116, 0.56169428, 0.00000000]]) # C2H2, a linear 4-atom molecule\n xyz8 = np.array([\n [-1.02600933, 2.12845307, 0.00000000],\n [-0.77966935, 0.95278385, 0.00000000],\n [-1.23666197, 3.17751246, 0.00000000],\n [-0.56023545, -0.09447399, 0.00000000]]) # C2H2, just 0.5 degree off from linearity, so NOT linear\n xyz9 = np.array([\n [-1.1998, 0.1610, 0.0275],\n [-1.4021, 0.6223, -0.8489],\n [-1.48302, 0.80682, -1.19946]]) # just 3 points in space on a straight line (not a physical molecule)\n xyz10 = np.array([\n [-1.1998, 0.1610, 0.0275]]) # mono-atomic species, non-linear\n xyz11 = np.array([\n [1.06026500, -0.07706800, 0.03372800],\n [3.37340700, -0.07706800, 0.03372800],\n [2.21683600, -0.07706800, 0.03372800]]) # CO2 at wb97xd/6-311+g(d,p), linear\n xyz12 = np.array([\n [1.05503600, -0.00335000, 0.09823600],\n [2.42816800, -0.00335000, 0.09823600],\n [-0.14726400, -0.00335000, 0.09823600],\n [3.63046800, -0.00335000, 0.09823600],\n [-1.21103500, -0.00335000, 0.09823600],\n [4.69423900, -0.00335000, 0.09823600]]) # C#CC#C at wb97xd/6-311+g(d,p), linear\n\n self.assertTrue(is_linear(xyz1))\n self.assertTrue(is_linear(xyz6))\n self.assertTrue(is_linear(xyz7))\n self.assertTrue(is_linear(xyz9))\n self.assertTrue(is_linear(xyz11))\n self.assertTrue(is_linear(xyz12))\n self.assertFalse(is_linear(xyz2))\n self.assertFalse(is_linear(xyz3))\n self.assertFalse(is_linear(xyz4))\n self.assertFalse(is_linear(xyz5))\n self.assertFalse(is_linear(xyz8))\n self.assertFalse(is_linear(xyz10))\n\n def test_specifying_absolute_file_paths(self):\n \"\"\"Test specifying absolute file paths of statmech files\"\"\"\n h2o2_input = \"\"\"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nbonds = {{'H-O': 2, 'O-O': 1}}\n\nexternalSymmetry = 2\n\nspinMultiplicity = 1\n\nopticalIsomers = 1\n\nenergy = {{'b3lyp/6-311+g(3df,2p)': Log('{energy}')}}\n\ngeometry = Log('{freq}')\n\nfrequencies = Log('{freq}')\n\nrotors = [HinderedRotor(scanLog=Log('{scan}'), pivots=[1, 2], top=[1, 3], symmetry=1, fit='fourier')]\n\n\"\"\"\n abs_arkane_path = os.path.abspath(os.path.dirname(__file__)) # this is the absolute path to `.../RMG-Py/arkane`\n energy_path = os.path.join('arkane', 'data', 'H2O2', 'sp_a19032.out')\n freq_path = os.path.join('arkane', 'data', 'H2O2', 'freq_a19031.out')\n scan_path = os.path.join('arkane', 'data', 'H2O2', 'scan_a19034.out')\n h2o2_input = h2o2_input.format(energy=energy_path, freq=freq_path, scan=scan_path)\n h2o2_path = os.path.join(abs_arkane_path, 'data', 'H2O2', 'H2O2.py')\n if not os.path.exists(os.path.dirname(h2o2_path)):\n os.makedirs(os.path.dirname(h2o2_path))\n with open(h2o2_path, 'w') as f:\n f.write(h2o2_input)\n h2o2 = Species(label='H2O2', smiles='OO')\n self.assertIsNone(h2o2.conformer)\n statmech_job = StatMechJob(species=h2o2, path=h2o2_path)\n statmech_job.level_of_theory = LevelOfTheory('b3lyp', '6-311+g(3df,2p)')\n statmech_job.load(pdep=False, plot=False)\n self.assertAlmostEqual(h2o2.conformer.E0.value_si, -146031.49933673252)\n os.remove(h2o2_path)\n\n################################################################################\n\n\nif __name__ == '__main__':\n unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))\n"
] | [
[
"numpy.diag",
"numpy.linalg.solve",
"numpy.linalg.inv",
"scipy.stats.distributions.t.ppf",
"numpy.array",
"numpy.sum"
],
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
intel/lp-opt-tool | [
"130eefa3586b38df6c0ff78cc8807ae273f6a63f",
"130eefa3586b38df6c0ff78cc8807ae273f6a63f"
] | [
"test/test_adaptor_pytorch.py",
"examples/pytorch/nlp/huggingface_models/text-classification/optimization_pipeline/prune_once_for_all/fx/run_glue_no_trainer_pruneOFA.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.quantized as nnq\nfrom torch.quantization import QuantStub, DeQuantStub\nimport torchvision\nimport unittest\nimport os\nfrom neural_compressor.adaptor import FRAMEWORKS\nfrom neural_compressor.model import MODELS\nfrom neural_compressor.adaptor.pytorch import PyTorchVersionMode\nimport neural_compressor.adaptor.pytorch as nc_torch\nfrom neural_compressor.experimental import Quantization, common\nfrom neural_compressor.conf.config import Quantization_Conf\nfrom neural_compressor.utils.pytorch import load\nfrom neural_compressor.utils.utility import recover\nimport shutil\nimport copy\nimport numpy as np\nimport yaml\n\ntry:\n try:\n import intel_pytorch_extension as ipex\n except:\n import intel_extension_for_pytorch as ipex\n TEST_IPEX = True\nexcept:\n TEST_IPEX = False\n\nPT_VERSION = nc_torch.get_torch_version()\nif PT_VERSION >= PyTorchVersionMode.PT18.value:\n FX_MODE = True\nelse:\n FX_MODE = False\n\n\nfake_dyn_yaml = '''\n model:\n name: imagenet\n framework: pytorch\n\n quantization:\n approach: post_training_dynamic_quant\n op_wise: {\n 'decoder': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n }\n }\n evaluation:\n accuracy:\n metric:\n topk: 1\n performance:\n warmup: 5\n iteration: 10\n\n tuning:\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: saved\n '''\n\n\nfake_ptq_yaml = '''\n model:\n name: imagenet\n framework: pytorch\n\n quantization:\n op_wise: {\n 'quant': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer1.0.conv1': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer1.0.conv2': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer2.0.conv1': {\n 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},\n 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}\n },\n 'layer3.0.conv1': {\n 'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},\n 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}\n },\n 'layer1.0.add_relu': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n }\n evaluation:\n accuracy:\n metric:\n topk: 1\n performance:\n warmup: 1\n iteration: 10\n\n tuning:\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: saved\n '''\n\nfake_ptq_yaml_for_fx = '''\n model:\n name: imagenet\n framework: pytorch_fx\n\n quantization:\n op_wise: {\n 'quant': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer1.0.conv1': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer1.0.conv2': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer2.0.conv1': {\n 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},\n 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}\n },\n 'layer3.0.conv1': {\n 'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},\n 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}\n },\n 'layer1.0.add_relu': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'default_qconfig': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n }\n }\n evaluation:\n accuracy:\n metric:\n topk: 1\n performance:\n warmup: 5\n iteration: 10\n\n tuning:\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: saved\n '''\n\n\nfake_qat_yaml = '''\n model:\n name: imagenet\n framework: pytorch\n\n quantization:\n approach: quant_aware_training\n train:\n end_epoch: 1\n iteration: 1\n optimizer:\n SGD:\n learning_rate: 0.0001\n criterion:\n CrossEntropyLoss:\n reduction: mean\n op_wise: {\n 'quant': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer1.0.conv1': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer1.0.conv2': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n },\n 'layer2.0.conv1': {\n 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},\n 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}\n },\n 'layer3.0.conv1': {\n 'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},\n 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}\n },\n 'layer1.0.add_relu': {\n 'activation': {'dtype': ['fp32']},\n 'weight': {'dtype': ['fp32']}\n }\n }\n evaluation:\n accuracy:\n metric:\n topk: 1\n\n tuning:\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: saved\n '''\n\n\ndef build_pytorch_yaml():\n with open('ptq_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_ptq_yaml)\n\n with open('dynamic_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_dyn_yaml)\n\n with open('qat_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_qat_yaml)\n\n\ndef build_pytorch_fx_yaml():\n if PT_VERSION >= PyTorchVersionMode.PT19.value:\n fake_fx_ptq_yaml = fake_ptq_yaml_for_fx\n else:\n fake_fx_ptq_yaml = fake_ptq_yaml.replace('pytorch', 'pytorch_fx')\n with open('fx_ptq_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_fx_ptq_yaml)\n\n fake_fx_dyn_yaml = fake_dyn_yaml.replace('pytorch', 'pytorch_fx')\n with open('fx_dynamic_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_fx_dyn_yaml)\n\n fake_fx_qat_yaml = fake_qat_yaml.replace('pytorch', 'pytorch_fx')\n with open('fx_qat_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_fx_qat_yaml)\n\n\ndef build_ipex_yaml():\n fake_yaml = '''\n model:\n name: imagenet\n framework: pytorch_ipex\n\n evaluation:\n accuracy:\n metric:\n topk: 1\n performance:\n warmup: 5\n iteration: 10\n\n tuning:\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: saved\n '''\n with open('ipex_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_yaml)\n\n\ndef build_dump_tensors_yaml():\n fake_yaml = '''\n model:\n name: imagenet\n framework: pytorch\n\n evaluation:\n accuracy:\n metric:\n topk: 1\n\n tuning:\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: saved\n tensorboard: true\n '''\n with open('dump_yaml.yaml', 'w', encoding=\"utf-8\") as f:\n f.write(fake_yaml)\n\n\nclass M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.quant = QuantStub()\n self.conv = nn.Conv2d(3, 1, 1)\n self.linear = nn.Linear(224 * 224, 5)\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv(x)\n x = x.view(1, -1)\n x = self.linear(x)\n x = self.dequant(x)\n return x\n\n\nclass FP32Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n times = x.size(1)\n if times == 1:\n return x + x\n return x\n\n\nclass DynamicModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(1, 1, 1)\n def forward(self, x):\n if x is not None:\n x = self.conv(x)\n return x\n\n\nclass SubModel(torch.nn.Module):\n def __init__(self, bypass=True):\n super().__init__()\n self.quant = QuantStub()\n self.conv = nn.Conv2d(1, 1, 1)\n self.conv1 = nn.Conv2d(1, 1, 1)\n self.bn = nn.BatchNorm2d(1)\n self.relu = nn.ReLU()\n self.fp32 = FP32Model()\n self.norm = nn.LayerNorm([1, 224, 224])\n self.dequant = DeQuantStub()\n self.bypass = bypass\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.quant(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.dequant(x)\n if not self.bypass:\n x = self.fp32(x)\n x = self.norm(x)\n return x\n\n\nclass PartialQuantModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.quant = QuantStub()\n self.conv = nn.Conv2d(3, 1, 1)\n self.bn = nn.BatchNorm2d(1)\n self.conv1 = nn.Conv2d(1, 1, 1)\n self.bn1 = nn.BatchNorm2d(1)\n self.conv2 = nn.Conv2d(1, 1, 1)\n self.linear = nn.Linear(224 * 224, 1)\n self.dequant = DeQuantStub()\n self.sub = SubModel(bypass=False)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.sub(x)\n x = self.quant(x)\n x = self.conv2(x)\n x = x.view(1, -1)\n x = self.linear(x)\n x = self.dequant(x)\n return x\n\nclass DynamicControlModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(3, 1, 1)\n self.bn = nn.BatchNorm2d(1)\n self.linear = nn.Linear(224 * 224, 1)\n self.sub = SubModel()\n self.fp32 = FP32Model()\n self.dyn = DynamicModel()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.dyn(x)\n x = self.bn(x)\n x = self.sub(x)\n x = self.fp32(x)\n x = x.view(1, -1)\n x = self.linear(x)\n return x\n\n\ndef eval_func(model):\n # switch to evaluate mode\n model.eval()\n with torch.no_grad():\n input = torch.randn(1, 3, 224, 224)\n # compute output\n output = model(input)\n return 0.0\n\n\ndef q_func(model):\n optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)\n # switch to evaluate mode\n model.train()\n input = torch.randn(1, 3, 224, 224)\n # compute output\n output = model(input)\n loss = output.mean()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return model\n\n\nclass TestPytorchAdaptor(unittest.TestCase):\n framework_specific_info = {\"device\": \"cpu\",\n \"approach\": \"post_training_static_quant\",\n \"random_seed\": 1234,\n \"q_dataloader\": None,\n \"workspace_path\": \"./\"}\n framework = \"pytorch\"\n adaptor = FRAMEWORKS[framework](framework_specific_info)\n model = torchvision.models.quantization.resnet18()\n nc_model = MODELS['pytorch'](model)\n\n @classmethod\n def setUpClass(self):\n build_pytorch_yaml()\n build_dump_tensors_yaml()\n\n @classmethod\n def tearDownClass(self):\n os.remove('ptq_yaml.yaml')\n os.remove('dynamic_yaml.yaml')\n os.remove('qat_yaml.yaml')\n os.remove('dump_yaml.yaml')\n shutil.rmtree('./saved', ignore_errors=True)\n shutil.rmtree('runs', ignore_errors=True)\n\n def test_get_all_weight_name(self):\n assert len(list(self.nc_model.get_all_weight_names())) == 62\n\n def test_get_weight(self):\n for name, param in self.model.named_parameters():\n if name == \"layer4.1.conv2.weight\":\n param.data.fill_(0.0)\n if name == \"fc.bias\":\n param.data.fill_(0.1)\n assert int(torch.sum(self.nc_model.get_weight(\"layer4.1.conv2.weight\"))) == 0\n assert torch.allclose(\n torch.sum(\n self.nc_model.get_weight(\"fc.bias\")),\n torch.tensor(100.))\n\n def test_get_input(self):\n model = MODELS['pytorch'](torchvision.models.quantization.resnet18())\n model.model.eval().fuse_model()\n model.register_forward_pre_hook()\n rand_input = torch.rand(100, 3, 224, 224).float()\n model.model(rand_input)\n assert torch.equal(model.get_inputs('x'), rand_input)\n model.remove_hooks()\n\n def test_update_weights(self):\n self.nc_model.update_weights('fc.bias', torch.zeros([1000]))\n assert int(torch.sum(self.nc_model.get_weight(\"fc.bias\"))) == 0\n\n def test_get_gradient(self):\n with self.assertRaises(AssertionError):\n self.nc_model.get_gradient('fc.bias')\n\n for name, tensor in self.nc_model._model.named_parameters():\n if name == 'fc.bias':\n tensor.grad = torch.zeros_like(tensor)\n break\n assert torch.equal(torch.Tensor(self.nc_model.get_gradient('fc.bias')), torch.zeros_like(tensor))\n\n rand_input = torch.rand(100, 3, 224, 224).float()\n rand_input.grad = torch.ones_like(rand_input)\n assert torch.equal(torch.Tensor(self.nc_model.get_gradient(rand_input)),\n torch.ones_like(rand_input))\n\n def test_report_sparsity(self):\n df, total_sparsity = self.nc_model.report_sparsity()\n self.assertTrue(total_sparsity > 0)\n self.assertTrue(len(df) == 22)\n\n def test_quantization_saved(self):\n for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:\n model = M()\n quantizer = Quantization(fake_yaml)\n quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True\n dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)\n quantizer.model = model\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_dataloader = common.DataLoader(dataset)\n q_model = quantizer.fit()\n eval_func(q_model)\n q_model.save('./saved')\n # Load configure and weights by neural_compressor.utils\n saved_model = load(\"./saved\", model)\n eval_func(saved_model)\n # recover int8 model from history\n history_file = './saved/history.snapshot'\n model_recover = recover(model, history_file, 0)\n eval_func(model_recover)\n self.assertEqual(type(saved_model.conv), \\\n type(model_recover.conv))\n shutil.rmtree('./saved', ignore_errors=True)\n from neural_compressor.experimental import Benchmark\n evaluator = Benchmark('ptq_yaml.yaml')\n # Load configure and weights by neural_compressor.model\n evaluator.model = model\n evaluator.b_dataloader = common.DataLoader(dataset)\n evaluator()\n evaluator.model = model\n evaluator()\n\n for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:\n model = copy.deepcopy(self.model)\n if fake_yaml == 'ptq_yaml.yaml':\n model.eval().fuse_model()\n conf = Quantization_Conf(fake_yaml)\n quantizer = Quantization(conf)\n dataset = quantizer.dataset('dummy', (100, 3, 224, 224))\n quantizer.model = model\n if fake_yaml == 'qat_yaml.yaml':\n quantizer.q_func = q_func\n else:\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_func = eval_func\n q_model = quantizer.fit()\n q_model.save('./saved')\n # Load configure and weights by neural_compressor.utils\n saved_model = load(\"./saved\", model)\n eval_func(saved_model)\n shutil.rmtree('./saved', ignore_errors=True)\n\n def test_quantization_new_saved(self):\n for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:\n model = M()\n quantizer = Quantization(fake_yaml)\n quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True\n dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)\n quantizer.model = model\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_dataloader = common.DataLoader(dataset)\n q_model = quantizer.fit()\n eval_func(q_model)\n torch.save(q_model.quantized_state_dict(), './saved/model.pt')\n # Load configure and weights by neural_compressor.utils\n from neural_compressor.experimental.common import Model\n common_model = Model(model)\n common_model.load_quantized_state_dict(torch.load('./saved/model.pt'))\n eval_func(common_model)\n self.assertEqual(type(q_model._model.linear), \\\n type(common_model._model.linear))\n shutil.rmtree('./saved', ignore_errors=True)\n\n def test_non_quant_module(self):\n for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:\n model = PartialQuantModel()\n conf = Quantization_Conf(fake_yaml)\n quantizer = Quantization(conf)\n dataset = quantizer.dataset('dummy', (1, 3, 224, 224))\n non_quant_dict = {'non_quant_module_name': ['conv', 'conv1', 'sub.conv'], \\\n 'non_quant_module_class': ['BatchNorm2d', 'FP32Model']}\n quantizer.model = common.Model(model, **non_quant_dict)\n if fake_yaml == 'qat_yaml.yaml':\n quantizer.q_func = q_func\n else:\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_func = eval_func\n q_model = quantizer.fit()\n q_model.save('./saved')\n saved_model = load(\"./saved\", model, **non_quant_dict)\n eval_func(saved_model)\n shutil.rmtree('./saved', ignore_errors=True)\n\n def test_workspace_path(self):\n model = M()\n quantizer = Quantization('ptq_yaml.yaml')\n quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True\n dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)\n quantizer.model = model\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_dataloader = common.DataLoader(dataset)\n q_model = quantizer.fit()\n eval_func(q_model)\n torch.save(q_model.quantized_state_dict(), './saved/best_model.pt')\n # Load configure and weights by workspace_path\n from neural_compressor.experimental.common import Model\n common_model = Model(model)\n common_model.workspace_path = './saved'\n eval_func(common_model)\n self.assertEqual(type(q_model._model.linear), \\\n type(common_model._model.linear))\n shutil.rmtree('./saved', ignore_errors=True)\n\n def test_get_graph_info(self):\n from neural_compressor.model.torch_model import PyTorchModel\n model = PyTorchModel(self.model)\n op_map = model.graph_info\n self.assertTrue(op_map['conv1'] == 'Conv2d')\n\n def test_tensorboard(self):\n model = copy.deepcopy(self.nc_model)\n model.model.eval().fuse_model()\n quantizer = Quantization('dump_yaml.yaml')\n dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)\n quantizer.model = model.model\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_func = eval_func\n quantizer.fit()\n self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)\n quantizer.eval_dataloader = common.DataLoader(dataset)\n quantizer.eval_func = None\n quantizer.fit()\n self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)\n\n def test_tensor_dump_and_set(self):\n model = copy.deepcopy(self.nc_model)\n model.model.eval().fuse_model()\n quantizer = Quantization('ptq_yaml.yaml')\n dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)\n dataloader = common.DataLoader(dataset)\n dataloader = common._generate_common_dataloader(dataloader, 'pytorch')\n quantizer.eval_dataloader = dataloader\n quantizer.calib_dataloader = dataloader\n quantizer.model = model.model\n q_model = quantizer.fit()\n quantizer.strategy.adaptor.inspect_tensor(\n model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],\n iteration_list=[1, 2], inspect_type='all', save_to_disk=True)\n load_array = lambda *a, **k: np.load(*a, allow_pickle=True, **k)\n a = load_array('saved/dump_tensor/activation_iter1.npz')\n w = load_array('saved/dump_tensor/weight.npz')\n if PT_VERSION >= PyTorchVersionMode.PT18.value:\n self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==\n a['conv1.0'].item()['conv1.0.output0'].shape[1])\n else:\n self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==\n a['conv1.0'].item()['conv1.1.output0'].shape[1])\n data = np.random.random(w['conv1.0'].item()['conv1.0.weight'].shape).astype(np.float32)\n quantizer.strategy.adaptor.set_tensor(q_model, {'conv1.0.weight': data})\n changed_tensor = q_model.get_weight('conv1.weight')\n scales = changed_tensor.q_per_channel_scales()\n changed_tensor_fp32 = torch.dequantize(changed_tensor)\n self.assertTrue(np.allclose(data, changed_tensor_fp32.numpy(), atol=2 / np.min(scales.numpy())))\n quantizer.strategy.adaptor.inspect_tensor(\n q_model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],\n iteration_list=[1, 2], inspect_type='all', save_to_disk=False)\n\n def test_get_graph_info(self):\n from neural_compressor.adaptor.pytorch import get_ops_recursively\n model = copy.deepcopy(self.model)\n op_map = {}\n get_ops_recursively(model, '', op_map)\n self.assertTrue(op_map['conv1'] == 'Conv2d')\n\n def test_forward_wrapper(self):\n vision_model = torchvision.models.resnet18()\n class dummymodel(torch.nn.Module):\n def __init__(self, model):\n super(dummymodel, self).__init__()\n self._model = model\n def forward(self,input=None):\n return self._model(input)\n\n data = [[{'input': torch.rand(3,224,224)}, torch.ones(1,1)], ]\n # dataloader.batch_size=100\n dataloader = common.DataLoader(data, batch_size=1)\n\n quantizer = Quantization('dynamic_yaml.yaml')\n model = dummymodel(vision_model)\n quantizer.model = model\n quantizer.calib_dataloader = dataloader\n quantizer.eval_dataloader = dataloader\n quantizer.fit()\n\n def test_floatfunctions_fallback(self):\n class ModelWithFunctionals(torch.nn.Module):\n def __init__(self):\n super(ModelWithFunctionals, self).__init__()\n self.mycat = nnq.FloatFunctional()\n self.myadd = nnq.FloatFunctional()\n self.myadd_relu = nnq.FloatFunctional()\n # Tracing doesnt work yet for c10 ops with scalar inputs\n # https://github.com/pytorch/pytorch/issues/27097\n self.my_scalar_add = nnq.FloatFunctional()\n self.mymul = nnq.FloatFunctional()\n self.my_scalar_mul = nnq.FloatFunctional()\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n y = self.mycat.cat([x, x, x])\n z = self.myadd.add(y, y)\n w = self.myadd_relu.add_relu(z, z)\n # Tracing doesnt work yet for c10 ops with scalar inputs\n # https://github.com/pytorch/pytorch/issues/27097\n w = self.my_scalar_add.add_scalar(w, -0.5)\n w = self.mymul.mul(w, w)\n w = self.my_scalar_mul.mul_scalar(w, 0.5)\n w = self.dequant(w)\n return w\n\n model = ModelWithFunctionals()\n model = MODELS['pytorch'](model)\n x = torch.rand(10, 1, dtype=torch.float)\n y = model.model(x)\n fallback_ops = []\n q_capability = self.adaptor.query_fw_capability(model)\n for k, v in q_capability[\"opwise\"].items():\n if k[0] != \"quant\" and k[0] != \"dequant\":\n fallback_ops.append(k[0])\n model.model.qconfig = torch.quantization.default_qconfig\n model.model.quant.qconfig = torch.quantization.default_qconfig\n if PT_VERSION >= PyTorchVersionMode.PT18.value:\n model.model.dequant.qconfig = torch.quantization.default_qconfig\n nc_torch._fallback_quantizable_ops_recursively(\n model.model, '', fallback_ops, op_qcfgs={})\n torch.quantization.add_observer_(model.model)\n model.model(x)\n torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True)\n qy = model.model(x)\n tol = {'atol': 1e-01, 'rtol': 1e-03}\n self.assertTrue(np.allclose(y, qy, **tol))\n\n\[email protected](not TEST_IPEX, \"Unsupport Intel PyTorch Extension\")\nclass TestPytorchIPEXAdaptor(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n build_ipex_yaml()\n\n @classmethod\n def tearDownClass(self):\n os.remove('ipex_yaml.yaml')\n shutil.rmtree('./saved', ignore_errors=True)\n shutil.rmtree('runs', ignore_errors=True)\n\n def test_tuning_ipex(self):\n from neural_compressor.experimental import Quantization\n model = M()\n quantizer = Quantization('ipex_yaml.yaml')\n quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True\n dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)\n quantizer.model = model\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_dataloader = common.DataLoader(dataset)\n nc_model = quantizer.fit()\n nc_model.save('./saved')\n try:\n script_model = torch.jit.script(model.to(ipex.DEVICE))\n except:\n script_model = torch.jit.trace(model.to(ipex.DEVICE), torch.randn(10, 3, 224, 224).to(ipex.DEVICE))\n from neural_compressor.experimental import Benchmark\n evaluator = Benchmark('ipex_yaml.yaml')\n evaluator.model = script_model\n evaluator.b_dataloader = common.DataLoader(dataset)\n results = evaluator()\n\n\[email protected](not FX_MODE, \"Unsupport Fx Mode with PyTorch Version Below 1.8\")\nclass TestPytorchFXAdaptor(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n build_pytorch_fx_yaml()\n\n @classmethod\n def tearDownClass(self):\n os.remove('fx_ptq_yaml.yaml')\n os.remove('fx_dynamic_yaml.yaml')\n shutil.rmtree('./saved', ignore_errors=True)\n shutil.rmtree('runs', ignore_errors=True)\n\n def test_fx_quant(self):\n for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:\n model_origin = torchvision.models.resnet18()\n # run fx_quant in neural_compressor and save the quantized GraphModule\n quantizer = Quantization(fake_yaml)\n dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)\n quantizer.eval_func = eval_func\n if fake_yaml == 'fx_qat_yaml.yaml':\n quantizer.q_func = q_func\n else:\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.model = common.Model(model_origin,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n q_model = quantizer.fit()\n q_model.save('./saved')\n # Load configure and weights with neural_compressor.utils\n model_fx = load('./saved', model_origin,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))\n\n # recover int8 model with only tune_cfg\n history_file = './saved/history.snapshot'\n model_fx_recover = recover(model_origin, history_file, 0,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n self.assertEqual(model_fx.code, model_fx_recover.code)\n shutil.rmtree('./saved', ignore_errors=True)\n\n for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:\n model_origin = M()\n # run fx_quant in neural_compressor and save the quantized GraphModule\n quantizer = Quantization(fake_yaml)\n quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True\n dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.eval_dataloader = common.DataLoader(dataset)\n quantizer.model = common.Model(model_origin,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n q_model = quantizer.fit()\n q_model.save('./saved')\n # Load configure and weights with neural_compressor.utils\n model_fx = load('./saved', model_origin,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))\n shutil.rmtree('./saved', ignore_errors=True)\n\n @unittest.skipIf(PT_VERSION < PyTorchVersionMode.PT19.value,\n \"Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend\")\n def test_fx_dynamic_quant(self):\n # Model Definition\n class LSTMModel(nn.Module):\n '''Container module with an encoder, a recurrent module, and a decoder.'''\n\n def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5):\n super(LSTMModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n self.init_weights()\n self.nhid = nhid\n self.nlayers = nlayers\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.zero_()\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output)\n return decoded, hidden\n\n model = LSTMModel(\n ntoken = 10,\n ninp = 512,\n nhid = 256,\n nlayers = 5,\n )\n\n # run fx_quant in neural_compressor and save the quantized GraphModule\n model.eval()\n quantizer = Quantization('fx_dynamic_yaml.yaml')\n quantizer.model = common.Model(model,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n q_model = quantizer.fit()\n q_model.save('./saved')\n\n # Load configure and weights by neural_compressor.utils\n model_fx = load(\"./saved\", model,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))\n # recover int8 model with only tune_cfg\n history_file = './saved/history.snapshot'\n model_fx_recover = recover(model, history_file, 0,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n self.assertEqual(model_fx.code, model_fx_recover.code)\n shutil.rmtree('./saved', ignore_errors=True)\n\n def test_fx_sub_module_quant(self):\n for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:\n model_origin = DynamicControlModel()\n # run fx_quant in neural_compressor and save the quantized GraphModule\n quantizer = Quantization(fake_yaml)\n dataset = quantizer.dataset('dummy', (1, 3, 224, 224), label=True)\n quantizer.eval_func = eval_func\n if fake_yaml == 'fx_qat_yaml.yaml':\n quantizer.q_func = q_func\n else:\n quantizer.calib_dataloader = common.DataLoader(dataset)\n quantizer.model = common.Model(model_origin,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n q_model = quantizer.fit()\n q_model.save('./saved')\n # Load configure and weights with neural_compressor.utils\n model_fx = load('./saved/best_model.pt', model_origin,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n self.assertTrue(isinstance(model_fx.sub, torch.fx.graph_module.GraphModule))\n\n # recover int8 model with only tune_cfg\n history_file = './saved/history.snapshot'\n model_fx_recover = recover(model_origin, history_file, 0,\n **{'prepare_custom_config_dict': \\\n {'non_traceable_module_name': ['a']},\n 'convert_custom_config_dict': \\\n {'preserved_attributes': []}\n })\n self.assertEqual(model_fx.sub.code, model_fx_recover.sub.code)\n shutil.rmtree('./saved', ignore_errors=True)\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning a 🤗 Transformers model for sequence classification on GLUE.\"\"\"\nimport argparse\nimport logging\nfrom neural_compressor.utils.logger import log\nimport math\nimport os\nimport random\nimport copy\nimport datasets\nfrom datasets import load_dataset, load_metric\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.distributed as dist\nfrom tqdm.auto import tqdm\n\nimport numpy as np\n\nimport transformers\nfrom transformers import (\n AdamW,\n AutoConfig,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n DataCollatorWithPadding,\n PretrainedConfig,\n SchedulerType,\n default_data_collator,\n get_scheduler,\n set_seed,\n)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\nlogger = logging.getLogger(__name__)\n\ntask_to_keys = {\n \"cola\": (\"sentence\", None),\n \"mnli\": (\"premise\", \"hypothesis\"),\n \"mrpc\": (\"sentence1\", \"sentence2\"),\n \"qnli\": (\"question\", \"sentence\"),\n \"qqp\": (\"question1\", \"question2\"),\n \"rte\": (\"sentence1\", \"sentence2\"),\n \"sst2\": (\"sentence\", None),\n \"stsb\": (\"sentence1\", \"sentence2\"),\n \"wnli\": (\"sentence1\", \"sentence2\"),\n}\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a text classification task\")\n parser.add_argument(\n \"--task_name\",\n type=str,\n default=None,\n help=\"The name of the glue task to train on.\",\n choices=list(task_to_keys.keys()),\n )\n parser.add_argument(\n \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n )\n parser.add_argument(\n \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n )\n parser.add_argument(\n \"--max_seq_length\",\n type=int,\n default=128,\n help=(\n \"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,\"\n \" sequences shorter will be padded if `--pad_to_max_lengh` is passed.\"\n ),\n )\n parser.add_argument(\n \"--pad_to_max_length\",\n action=\"store_true\",\n help=\"If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.\",\n )\n parser.add_argument(\n \"--model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n required=True,\n )\n parser.add_argument(\n \"--use_slow_tokenizer\",\n action=\"store_true\",\n help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the training dataloader.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=None,\n help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\n \"--lr_scheduler_type\",\n type=SchedulerType,\n default=\"linear\",\n help=\"The scheduler type to use.\",\n choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n )\n parser.add_argument(\n \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n )\n parser.add_argument('--use_auth_token', action='store_true', help=\"use authentic token\")\n parser.add_argument(\"--resume\", type=str, default=None, help=\"Where to resume from the provided model.\")\n parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n parser.add_argument('--do_prune', action='store_true',\n help=\"prune model\")\n parser.add_argument('--do_eval', action='store_true',\n help=\"evaluate model\")\n parser.add_argument('--do_quantization', action='store_true',\n help=\"do quantization aware training on model\")\n parser.add_argument('--do_distillation', action='store_true',\n help=\"do distillation with pre-trained teacher model\")\n parser.add_argument(\"--prune_config\", default='prune.yaml', help=\"pruning config\")\n parser.add_argument(\"--quantization_config\", default='qat.yaml', help=\"quantization config\")\n parser.add_argument(\"--distillation_config\", default='distillation.yaml', help=\"pruning config\")\n parser.add_argument(\n \"--teacher_model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models\"\n \" to be the teacher model.\",\n required=True,\n )\n parser.add_argument(\"--core_per_instance\", type=int, default=-1, help=\"cores per instance.\")\n \n parser.add_argument(\"--temperature\", default=1, type=float,\n help='temperature parameter of distillation')\n parser.add_argument(\"--loss_types\", default=['CE', 'KL'], type=str, nargs='+',\n help='loss types of distillation, should be a list of length 2, '\n 'first for student targets loss, second for teacher student loss.')\n parser.add_argument(\"--loss_weights\", default=[0.5, 0.5], type=float, nargs='+',\n help='loss weights of distillation, should be a list of length 2, '\n 'and sum to 1.0, first for student targets loss weight, '\n 'second for teacher student loss weight.')\n args = parser.parse_args()\n\n # Sanity checks\n if args.task_name is None and args.train_file is None and args.validation_file is None:\n raise ValueError(\"Need either a task name or a training/validation file.\")\n else:\n if args.train_file is not None:\n extension = args.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if args.validation_file is not None:\n extension = args.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n if args.output_dir is not None:\n os.makedirs(args.output_dir, exist_ok=True)\n\n return args\n\ndef gather_results(predictions, gt):\n if rank != -1:\n pred_list = [predictions.clone() for _ in range(world)] if rank == 0 else []\n gt_list = [gt.clone() for _ in range(world)] if rank == 0 else []\n dist.gather(predictions, gather_list=pred_list)\n dist.gather(gt, gather_list=gt_list)\n return pred_list[0], gt_list[0]\n else:\n return predictions, gt\n\ndef evaluation(model, eval_dataloader, metric):\n logger.info(\"***** Running eval *****\")\n logger.info(f\" Num examples = {len(eval_dataloader) }\")\n model.eval()\n eval_dataloader = tqdm(eval_dataloader, desc=\"Evaluating\")\n for step, batch in enumerate(eval_dataloader):\n outputs = model(**batch)['logits']\n predictions = outputs.argmax(dim=-1)\n metric.add_batch(\n predictions=predictions,\n references=batch[\"labels\"],\n )\n\n eval_metric = metric.compute()\n logger.info(f\"eval_metric : {eval_metric}\")\n return eval_metric['accuracy']\n\ndef train(args, model, train_dataloader, lr_scheduler, criterion, optimizer, agent, eval_dataloader, metric):\n # Train!\n total_batch_size = args.batch_size * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataloader)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n # Only show the progress bar once on each machine.\n completed_steps = 0\n\n agent.pre_epoch_begin()\n model = agent.model.model\n for epoch in range(args.num_train_epochs):\n model.train()\n train_dataloader = tqdm(train_dataloader, desc=\"Training\")\n agent.on_epoch_begin(epoch)\n for step, batch in enumerate(train_dataloader):\n agent.on_batch_begin(step)\n teacher_logits = None\n if 'teacher_logits' in batch:\n teacher_logits = batch['teacher_logits']\n del batch['teacher_logits']\n outputs = model(**batch)\n if criterion is None:\n loss = outputs.loss\n else:\n criterion.teacher_outputs = teacher_logits\n loss = criterion(outputs['logits'], batch[\"labels\"])\n loss = loss / args.gradient_accumulation_steps\n loss.backward()\n if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n agent.on_post_grad()\n lr_scheduler.step()\n optimizer.zero_grad()\n completed_steps += 1\n agent.on_batch_end()\n if completed_steps >= args.max_train_steps:\n break\n agent.on_epoch_end()\n evaluation(model, eval_dataloader, metric)\n agent.post_epoch_end()\n\ndef main():\n args = parse_args()\n\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)\n # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).\n\n # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the\n # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named\n # label if at least two columns are provided.\n\n # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this\n # single column. You can easily tweak this behavior (see below)\n\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if args.task_name is not None:\n # Downloading and loading a dataset from the hub.\n raw_datasets = load_dataset(\"glue\", args.task_name)\n else:\n # Loading the dataset from local csv or json file.\n data_files = {}\n if args.train_file is not None:\n data_files[\"train\"] = args.train_file\n if args.validation_file is not None:\n data_files[\"validation\"] = args.validation_file\n extension = (args.train_file if args.train_file is not None else args.valid_file).split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files)\n # See more about loading any type of standard or custom dataset at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Labels\n if args.task_name is not None:\n is_regression = args.task_name == \"stsb\"\n if not is_regression:\n label_list = raw_datasets[\"train\"].features[\"label\"].names\n num_labels = len(label_list)\n else:\n num_labels = 1\n else:\n # Trying to have good defaults here, don't hesitate to tweak to your needs.\n is_regression = datasets[\"train\"].features[\"label\"].dtype in [\"float32\", \"float64\"]\n if is_regression:\n num_labels = 1\n else:\n # A useful fast method:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique\n label_list = datasets[\"train\"].unique(\"label\")\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(args.model_name_or_path, \n num_labels=num_labels, \n finetuning_task=args.task_name, \n use_auth_token=args.use_auth_token)\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, \n use_fast=not args.use_slow_tokenizer, \n use_auth_token=args.use_auth_token)\n model = AutoModelForSequenceClassification.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config, use_auth_token=args.use_auth_token\n )\n if args.resume:\n try:\n model.load_state_dict(torch.load(args.resume))\n logger.info('Resumed model from {}'.format(args.resume))\n except:\n raise TypeError('Provided {} is not a valid checkpoint file, '\n 'please provide .pt file'.format(args.resume))\n\n # Preprocessing the datasets\n if args.task_name is not None:\n sentence1_key, sentence2_key = task_to_keys[args.task_name]\n else:\n # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.\n non_label_column_names = [name for name in datasets[\"train\"].column_names if name != \"label\"]\n if \"sentence1\" in non_label_column_names and \"sentence2\" in non_label_column_names:\n sentence1_key, sentence2_key = \"sentence1\", \"sentence2\"\n else:\n if len(non_label_column_names) >= 2:\n sentence1_key, sentence2_key = non_label_column_names[:2]\n else:\n sentence1_key, sentence2_key = non_label_column_names[0], None\n\n # Some models have set the order of the labels to use, so let's make sure we do use it.\n label_to_id = None\n if (\n model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id\n and args.task_name is not None\n and not is_regression\n ):\n # Some have all caps in their config, some don't.\n label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}\n if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):\n logger.info(\n f\"The configuration of the model provided the following label correspondence: {label_name_to_id}. \"\n \"Using it!\"\n )\n label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}\n else:\n logger.warn(\n \"Your model seems to have been trained with labels, but they don't match the dataset: \",\n f\"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.\"\n \"\\nIgnoring the model labels as a result.\",\n )\n elif args.task_name is None:\n label_to_id = {v: i for i, v in enumerate(label_list)}\n\n padding = \"max_length\" if args.pad_to_max_length else False\n\n def preprocess_function(examples):\n # Tokenize the texts\n texts = (\n (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])\n )\n result = tokenizer(*texts, padding=padding, max_length=args.max_seq_length, truncation=True)\n\n if \"label\" in examples:\n if label_to_id is not None:\n # Map labels to IDs (not necessary for GLUE tasks)\n result[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n else:\n # In all cases, rename the column to labels because the model will expect that.\n result[\"labels\"] = examples[\"label\"]\n return result\n\n processed_datasets = raw_datasets.map(\n preprocess_function, batched=True, remove_columns=raw_datasets[\"train\"].column_names\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation_matched\" if args.task_name == \"mnli\" else \"validation\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # DataLoaders creation:\n if args.pad_to_max_length:\n # If padding was already done ot max length, we use the default data collator that will just convert everything\n # to tensors.\n data_collator = default_data_collator\n else:\n # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of\n # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple\n # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\n data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=None)\n\n if args.do_distillation:\n teacher_config = AutoConfig.from_pretrained(args.teacher_model_name_or_path, \\\n num_labels=num_labels, finetuning_task=args.task_name)\n teacher_tokenizer = AutoTokenizer.from_pretrained(args.teacher_model_name_or_path, \\\n use_fast=not args.use_slow_tokenizer)\n assert teacher_tokenizer.vocab == tokenizer.vocab, \\\n 'teacher model and student model should have same tokenizer.'\n teacher_model = AutoModelForSequenceClassification.from_pretrained(\n args.teacher_model_name_or_path,\n from_tf=bool(\".ckpt\" in args.teacher_model_name_or_path),\n config=teacher_config,\n )\n para_counter = lambda model:sum(p.numel() for p in model.parameters())\n logger.info(\"***** Number of teacher model parameters: {:.2f}M *****\".format(\\\n para_counter(teacher_model)/10**6))\n logger.info(\"***** Number of student model parameters: {:.2f}M *****\".format(\\\n para_counter(model)/10**6))\n \n # get logits of teacher model\n if args.loss_weights[1] > 0:\n def get_logits(teacher_model, train_dataset):\n logger.info(\"***** Getting logits of teacher model *****\")\n logger.info(f\" Num examples = {len(train_dataset) }\")\n teacher_model.eval()\n npy_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '{}.{}.npy'.format(args.task_name, args.teacher_model_name_or_path.replace('/', '.')))\n if os.path.exists(npy_file):\n teacher_logits = [x for x in np.load(npy_file)]\n else:\n train_dataloader = DataLoader(train_dataset, collate_fn=data_collator, \\\n batch_size=args.batch_size)\n train_dataloader = tqdm(train_dataloader, desc=\"Evaluating\")\n teacher_logits = []\n for step, batch in enumerate(train_dataloader):\n outputs = teacher_model(**batch)\n teacher_logits += [x for x in outputs['logits'].numpy()]\n np.save(npy_file, np.array(teacher_logits))\n return train_dataset.add_column('teacher_logits', teacher_logits)\n with torch.no_grad():\n train_dataset = get_logits(teacher_model, train_dataset)\n\n # Dataloader\n train_dataloader = DataLoader(\n train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.batch_size, drop_last=True\n )\n eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.batch_size, drop_last=True)\n\n # Optimizer\n # Split weights in two groups, one with weight decay and the other not.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be\n # shorter in multiprocess)\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n else:\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n lr_scheduler = get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n\n # Get the metric function\n if args.task_name is not None:\n metric = load_metric(\"glue\", args.task_name)\n \n def train_func(model):\n return train(args, model, train_dataloader, lr_scheduler, \\\n criterion, optimizer, agent, eval_dataloader, metric)\n\n def eval_func(model):\n return evaluation(model, eval_dataloader, metric)\n\n if args.do_prune:\n # Pruning!\n from neural_compressor.experimental import Pruning, common\n agent = Pruning(args.prune_config)\n criterion = None # use huggingface's loss\n if args.do_distillation:\n logger.info('='*30 + 'Teacher model on validation set' + '='*30)\n evaluation(teacher_model, eval_dataloader, metric) \n\n # from neural_compressor.experimental import Distillation\n from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss\n criterion = PyTorchKnowledgeDistillationLoss(\n temperature=args.temperature,\n loss_types=args.loss_types,\n loss_weights=args.loss_weights)\n criterion.teacher_model = teacher_model\n \n if args.do_quantization:\n # transforming the student model to fx mode for QAT\n from transformers.utils.fx import symbolic_trace\n for input in eval_dataloader:\n input_names = input.keys()\n break\n model = symbolic_trace(model, input_names=input_names, \\\n batch_size=args.batch_size, \\\n sequence_length=args.max_seq_length)\n \n from neural_compressor.experimental.scheduler import Scheduler\n from neural_compressor.experimental import Quantization\n combs = [agent, Quantization(args.quantization_config)]\n scheduler = Scheduler() \n scheduler.model = common.Model(model)\n agent = scheduler.combine(*combs)\n agent.train_func = train_func\n agent.eval_func = eval_func\n print(agent)\n scheduler.append(agent)\n model = scheduler.fit()\n else:\n agent.model = common.Model(model)\n agent.pruning_func = train_func\n agent.eval_func = eval_func\n model = agent()\n model.save(args.output_dir)\n # change to framework model for further use\n model = model.model\n\n if args.do_eval:\n eval_dataloader = tqdm(eval_dataloader, desc=\"Evaluating\")\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n outputs = model(**batch)\n predictions = outputs['logits'].argmax(dim=-1)\n metric.add_batch(\n predictions=predictions,\n references=batch[\"labels\"],\n )\n\n eval_metric = metric.compute()\n logger.info(f\"eval_metric: {eval_metric}\")\n\n if args.task_name == \"mnli\":\n # Final evaluation on mismatched validation set\n eval_dataset = processed_datasets[\"validation_mismatched\"]\n eval_dataloader = DataLoader(\n eval_dataset, collate_fn=data_collator, batch_size=args.batch_size, drop_last=True\n )\n\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n outputs = model(**batch)\n predictions = outputs['logits'].argmax(dim=-1)\n pred, gt = gather_results(predictions, batch[\"labels\"])\n metric.add_batch(predictions=pred, references=gt)\n\n eval_metric = metric.compute()\n logger.info(f\"mnli-mm: {eval_metric}\")\n\nif __name__ == \"__main__\":\n rank, world = int(os.environ.get('PMI_RANK', -1)), int(os.environ.get('PMI_SIZE', 1))\n if rank != -1:\n logger.warning('start distributed training...')\n dist.init_process_group(backend='mpi')\n main()\n"
] | [
[
"torch.zeros",
"torch.load",
"torch.quantization.DeQuantStub",
"torch.nn.Embedding",
"torch.no_grad",
"torch.nn.Dropout",
"torch.ones",
"numpy.allclose",
"torch.randn",
"torch.tensor",
"torch.rand",
"numpy.load",
"torch.ones_like",
"torch.quantization.QuantStub",
"torch.quantization.add_observer_",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Linear",
"torch.quantization.convert",
"torch.nn.BatchNorm2d",
"torch.nn.LSTM",
"torch.dequantize",
"torch.nn.LayerNorm",
"torch.nn.ReLU",
"torch.nn.quantized.FloatFunctional"
],
[
"torch.distributed.init_process_group",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.distributed.gather",
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
0wu/mlflow | [
"2b5a21af05defcfa80255c081b5d9f07443f3f64"
] | [
"tests/tensorflow/test_tensorflow_model_export.py"
] | [
"# pep8: disable=E501\n\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport pandas\nimport shutil\nimport unittest\n\nimport pandas as pd\nimport sklearn.datasets as datasets\nimport tensorflow as tf\n\nfrom mlflow import tensorflow, pyfunc\nfrom mlflow import tracking\nfrom mlflow.utils.file_utils import TempDir\n\n\nclass TestModelExport(unittest.TestCase):\n\n def helper(self, feature_spec, tmp, estimator, df):\n \"\"\"\n This functions handles exporting, logging, loading back, and predicting on an estimator for\n testing purposes.\n \"\"\"\n receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)\n saved_estimator_path = tmp.path(\"model\")\n os.makedirs(saved_estimator_path)\n # Saving TensorFlow model.\n saved_estimator_path = estimator.export_savedmodel(saved_estimator_path,\n receiver_fn).decode(\"utf-8\")\n # Logging the TensorFlow model just saved.\n tensorflow.log_saved_model(saved_model_dir=saved_estimator_path,\n signature_def_key=\"predict\",\n artifact_path=tmp.path(\"hello\"))\n # Loading the saved TensorFlow model as a pyfunc.\n x = pyfunc.load_pyfunc(saved_estimator_path)\n # Predicting on the dataset using the pyfunc.\n return x.predict(df)\n\n def test_log_saved_model(self):\n # This tests model logging capabilities on the sklearn.iris dataset.\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features.\n y = iris.target\n trainingFeatures = {}\n for i in range(0, 2):\n # TensorFlow is fickle about feature names, so we remove offending characters\n iris.feature_names[i] = iris.feature_names[i].replace(\" \", \"\")\n iris.feature_names[i] = iris.feature_names[i].replace(\"(\", \"\")\n iris.feature_names[i] = iris.feature_names[i].replace(\")\", \"\")\n trainingFeatures[iris.feature_names[i]] = iris.data[:, i:i+1]\n tf_feat_cols = []\n feature_names = iris.feature_names[:2]\n # Creating TensorFlow-specific numeric columns for input.\n for col in iris.feature_names[:2]:\n tf_feat_cols.append(tf.feature_column.numeric_column(col))\n # Creating input training function.\n input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,\n y,\n shuffle=False,\n batch_size=1)\n # Creating Deep Neural Network Regressor.\n estimator = tf.estimator.DNNRegressor(feature_columns=tf_feat_cols,\n hidden_units=[1])\n # Training and creating expected predictions on training dataset.\n estimator.train(input_train, steps=10)\n # Saving the estimator's prediction on the training data; assume the DNNRegressor\n # produces a single output column named 'predictions'\n pred_col = \"predictions\"\n estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]\n estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})\n\n old_tracking_uri = tracking.get_tracking_uri()\n # should_start_run tests whether or not calling log_model() automatically starts a run.\n for should_start_run in [False, True]:\n with TempDir(chdr=True, remove_on_exit=True) as tmp:\n try:\n # Creating dict of features names (str) to placeholders (tensors)\n feature_spec = {}\n for name in feature_names:\n feature_spec[name] = tf.placeholder(\"float\", name=name, shape=[150])\n tracking.set_tracking_uri(\"test\")\n if should_start_run:\n tracking.start_run()\n pyfunc_preds_df = self.helper(feature_spec, tmp, estimator,\n pandas.DataFrame(data=X, columns=feature_names))\n\n # Asserting that the loaded model predictions are as expected.\n assert estimator_preds_df.equals(pyfunc_preds_df)\n finally:\n # Restoring the old logging location.\n tracking.end_run()\n tracking.set_tracking_uri(old_tracking_uri)\n\n def test_categorical_columns(self):\n \"\"\"\n This tests logging capabilities on datasets with categorical columns.\n See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/get_started/\\\n regression/imports85.py\n for reference code.\n \"\"\"\n with TempDir(chdr=False, remove_on_exit=True) as tmp:\n path = os.path.abspath(\"tests/data/uci-autos-imports-85.data\")\n # Order is important for the csv-readers, so we use an OrderedDict here.\n defaults = collections.OrderedDict([\n (\"body-style\", [\"\"]),\n (\"curb-weight\", [0.0]),\n (\"highway-mpg\", [0.0]),\n (\"price\", [0.0])\n ])\n\n types = collections.OrderedDict((key, type(value[0]))\n for key, value in defaults.items())\n df = pandas.read_csv(path, names=types.keys(), dtype=types, na_values=\"?\")\n df = df.dropna()\n\n # Extract the label from the features dataframe.\n y_train = df.pop(\"price\")\n\n # Creating the input training function required.\n trainingFeatures = {}\n\n for i in df:\n trainingFeatures[i] = df[i].values\n\n input_train = tf.estimator.inputs.numpy_input_fn(trainingFeatures,\n y_train.values,\n shuffle=False,\n batch_size=1)\n\n # Creating the feature columns required for the DNNRegressor.\n body_style_vocab = [\"hardtop\", \"wagon\", \"sedan\", \"hatchback\", \"convertible\"]\n body_style = tf.feature_column.categorical_column_with_vocabulary_list(\n key=\"body-style\", vocabulary_list=body_style_vocab)\n feature_columns = [\n tf.feature_column.numeric_column(key=\"curb-weight\"),\n tf.feature_column.numeric_column(key=\"highway-mpg\"),\n # Since this is a DNN model, convert categorical columns from sparse\n # to dense.\n # Wrap them in an `indicator_column` to create a\n # one-hot vector from the input.\n tf.feature_column.indicator_column(body_style)\n ]\n\n # Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns\n # defined above as input.\n estimator = tf.estimator.DNNRegressor(\n hidden_units=[20, 20], feature_columns=feature_columns)\n\n # Training the estimator.\n estimator.train(input_fn=input_train, steps=10)\n # Saving the estimator's prediction on the training data; assume the DNNRegressor\n # produces a single output column named 'predictions'\n pred_col = \"predictions\"\n estimator_preds = [s[pred_col] for s in estimator.predict(input_train)]\n estimator_preds_df = pd.DataFrame({pred_col: estimator_preds})\n # Setting the logging such that it is in the temp folder and deleted after the test.\n old_tracking_dir = tracking.get_tracking_uri()\n tracking_dir = os.path.abspath(tmp.path(\"mlruns\"))\n tracking.set_tracking_uri(\"file://%s\" % tracking_dir)\n tracking.start_run()\n try:\n # Creating dict of features names (str) to placeholders (tensors)\n feature_spec = {}\n feature_spec[\"body-style\"] = tf.placeholder(\"string\",\n name=\"body-style\",\n shape=[None])\n feature_spec[\"curb-weight\"] = tf.placeholder(\"float\",\n name=\"curb-weight\",\n shape=[None])\n feature_spec[\"highway-mpg\"] = tf.placeholder(\"float\",\n name=\"highway-mpg\",\n shape=[None])\n\n pyfunc_preds_df = self.helper(feature_spec, tmp, estimator, df)\n # Asserting that the loaded model predictions are as expected. Allow for some\n # imprecision as this is expected with TensorFlow.\n pandas.testing.assert_frame_equal(\n pyfunc_preds_df, estimator_preds_df, check_less_precise=6)\n finally:\n # Restoring the old logging location.\n tracking.end_run()\n tracking.set_tracking_uri(old_tracking_dir)\n"
] | [
[
"tensorflow.feature_column.categorical_column_with_vocabulary_list",
"tensorflow.estimator.export.build_raw_serving_input_receiver_fn",
"sklearn.datasets.load_iris",
"tensorflow.placeholder",
"tensorflow.estimator.DNNRegressor",
"pandas.DataFrame",
"tensorflow.feature_column.numeric_column",
"tensorflow.feature_column.indicator_column",
"pandas.testing.assert_frame_equal",
"tensorflow.estimator.inputs.numpy_input_fn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
helene-todd/XPPAUT_code | [
"e4caf112c03889a68eed0f4e5fa9d9d436918914"
] | [
"g_function_weak_coupling/G_function.py"
] | [
"from matplotlib import cm, rcParams\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math as math\nimport random as rand\n\n\"\"\" G(phi) function in Rinzel & Lewis' article (2003) under weak coupling \"\"\"\n\"\"\" This is under weak coupling theory, although one can note that gamma only serves to scale the function \"\"\"\n\nc = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']\nrcParams.update({'figure.autolayout': True})\n\ndef T(I):\n return math.log(I/(I-1))\n\ndef G(phi, I, gamma):\n if phi != 0 and phi != 1:\n return gamma*(2/T(I))*(phi*math.sinh((1-phi)*T(I)) - (1-phi)*math.sinh(phi*T(I))) + gamma*(beta/(I*T(I)*T(I)))*(math.exp(phi*T(I)) - math.exp((1-phi)*T(I)))\n else :\n return 0\n\n\"\"\" Varying Gamma \"\"\"\n\ngamma = [0.4, 0.3, 0.2, 0.1, 0.01]\nbeta = 0.1\nI = 1.8\n\nplt.figure(figsize=(8,5))\nvector_phi = np.linspace(0,1,1000)\nzero_line = np.zeros(len(vector_phi))\nplt.plot(vector_phi, zero_line, color='black', linestyle='--')\n\nk = 0\nfor g in gamma :\n vector_G = []\n for el in vector_phi:\n vector_G.append(G(el, I, g))\n vector_G = np.array(vector_G)\n plt.plot(vector_phi, vector_G, label=f'$\\gamma = {g}$', color = c[k])\n k += 1\n\nplt.xlabel('$\\phi$', size=14)\nplt.ylabel('$G(\\phi)$', size=14)\nplt.title(f'G function for $I={I}, \\\\beta={beta}$')\n\nzero_crossings = np.where(np.diff(np.sign(vector_G-zero_line)))[0]\nprint(zero_crossings)\n\nplt.legend(loc='upper left')\nplt.savefig(f'G_function_range_gammas_I={I}.png', dpi=600)\nplt.show()\nplt.close()\n\n\"\"\" Varying I \"\"\"\n\"\"\"\ngamma = 1\nbeta = 0.2\nI = [1.15, 1.2, 1.4]\n\nplt.figure(figsize=(8,5))\nvector_phi = np.linspace(0,1,1000)\nzero_line = np.zeros(len(vector_phi))\nplt.plot(vector_phi, zero_line, linestyle='--', color='k')\n\nk = 0\nfor current in I :\n vector_G = []\n for el in vector_phi:\n vector_G.append(G(el, current, gamma))\n vector_G = np.array(vector_G)\n plt.plot(vector_phi, vector_G, label=f'$I = {current}$', color = c[k])\n k += 1\n\n\nplt.xlabel('$\\phi$', size=14)\nplt.ylabel('$G(\\phi)$', size=14)\n\nzero_crossings = np.where(np.diff(np.sign(vector_G-zero_line)))[0]\nprint(zero_crossings)\n\nplt.legend()\nplt.show()\n\"\"\"\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.sign",
"matplotlib.pyplot.ylabel",
"matplotlib.rcParams.update",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nevertheless-ui/TelegramData_Analyst | [
"6c7b33560a2b8b26bce99c9a82efa6b4796d5828"
] | [
"run_analyst.py"
] | [
"# Filename: analyst.py\n\"\"\"Analyst is a tool to look up (and export selected) data and insights\nfrom exported data from chats and channels in Telegram\nusing Python and PyQt5.\"\"\"\n\nimport sys\n\nimport pandas as pd\nfrom pathlib import Path\n\nfrom PyQt5 import QtWidgets, QtCore\nfrom PyQt5 import uic\n\nfrom backend import (\n converter,\n handler,\n)\n\n__version__ = '0.1'\n__author__ = 'Artyom Filippenko'\n\ndf = pd.DataFrame({'a': ['Mary', 'Jim', 'John'],\n 'b': [100, 200, 300],\n 'c': ['a', 'b', 'c']})\n\n\n# VARS SECTION\n# IMPORT LOCALE\nIMPORT_WINDOW_TITLE = 'TelegramData Analyst - Import'\nIMPORT_WINDOW_MSG = 'This software is designed for analysis of Telegram channels and chats.'\nIMPORT_BROWSE_MSG = 'Open file'\nIMPORT_PATHLINE_MSG = 'Please, add path to JSON file, exported from Telegram Application...'\nIMPORT_BROWSE_BTN_NAME = 'Browse'\nIMPORT_ANALYSE_BTN_NAME = 'Analyze'\nIMPORT_PATH_MSG = 'File'\n\n# ANALYST LOCALE\nANALYST_WINDOW_TITLE = 'TelegramData Analyst - Explorer'\nANALYST_STATUSBAR_PREFIX_MSG = 'Exploring data from json-file:'\nANALYST_WINDOW_MSG = 'Analyzing file'\nANALYST_RETURN_BTN_NAME = 'Return to import...'\nANALYST_EXPORT_BTN_NAME = 'Export results...'\n\n# ANALYST LOCALE\n#ALERT_WINDOW_TITLE = 'Alert!'\n\n# UI path\nIMPORT_UI_PATH = './frontend/import_data.ui'\nMAIN_UI_PATH = './frontend/workspace.ui'\n#ALERT_UI_PATH = './frontend/alert.ui'\n\nclass ImportWindow(QtWidgets.QDialog):\n def __init__(self, parent=None):\n super().__init__(parent)\n self._build()\n self.ui.show()\n\n def _build(self):\n self.ui = uic.loadUi(IMPORT_UI_PATH)\n\n # Locale\n self.ui.setWindowTitle(IMPORT_WINDOW_TITLE)\n self.ui.import_description_message.setText(IMPORT_WINDOW_MSG)\n self.ui.browse_files_btn.setText(IMPORT_BROWSE_BTN_NAME)\n self.ui.analyse_file_btn.setText(IMPORT_ANALYSE_BTN_NAME)\n self.ui.import_file_pathline.setText(IMPORT_PATHLINE_MSG)\n\n # Loading UI logic\n self.ui.browse_files_btn.clicked.connect(self._browse_files)\n self.ui.analyse_file_btn.clicked.connect(self._open_analyst)\n\n def _browse_files(self):\n import_file = QtWidgets.QFileDialog.getOpenFileName(self, IMPORT_BROWSE_MSG,\n './', \"Json file (*.json)\")\n self.ui.import_file_pathline.setText(import_file[0])\n\n def _open_analyst(self):\n if self.ui.import_file_pathline.text() == IMPORT_PATHLINE_MSG:\n json_file_path = ''\n\n else:\n json_file_path = Path(self.ui.import_file_pathline.text())\n self.analyst = AnalysisWindow(self)\n self.analyst.import_json_file(json_file_path)\n self.analyst.update_table_view\n self.analyst.ui.statusbar.showMessage(ANALYST_STATUSBAR_PREFIX_MSG + ' ' + \\\n str(json_file_path))\n self.ui.hide()\n\n\nclass AnalysisWindow(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent, QtCore.Qt.Window)\n self._build()\n self.ui.show()\n #self.import_json_file()\n #self.update_table_view()\n\n def _build(self):\n self.ui = uic.loadUi(MAIN_UI_PATH)\n # Locale\n self.ui.setWindowTitle(ANALYST_WINDOW_TITLE)\n self.ui.return_btn.setText(ANALYST_RETURN_BTN_NAME)\n self.ui.export_btn.setText(ANALYST_EXPORT_BTN_NAME)\n # Loading UI logic\n self.ui.return_btn.clicked.connect(self._return_to_import)\n\n def _return_to_import(self):\n self.ui.close()\n self.parent().ui.show()\n\n def import_json_file(self, json_file_path):\n self._data = converter.convert_tg_json(json_file_path)\n\n def update_table_view(self):\n self.ui.test_msg.setText(str(df.columns))\n self.model = handler.pandasModel(self._data)\n self.ui.table_view.setModel(self.model)\n self.ui.table_view.show()\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n window = ImportWindow()\n #window.show()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shallowyuan/cosegmentor-crf | [
"c84a9418b70f3f3c7c6a7e998de5835182619f30"
] | [
"tlib/networks/VGGnet_train.py"
] | [
"import tensorflow as tf\nfrom networks.network import Network\n\n\n#define\n\nn_classes = 21\n_feat_stride = [16,]\nanchor_scales = [8, 16, 32]\n\nclass VGGnet_train(Network):\n def __init__(self, trainable=True):\n self.inputs = []\n self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])\n #self.im_info = tf.placeholder(tf.float32, shape=[None, 3])\n #self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])\n self.keep_prob = tf.placeholder(tf.float32)\n self.segmentation=tf.placeholder(tf.float32,shape=[None,900])\n self.rois=tf.placeholder(tf.float32,shape=[None,5])\n #self.mweights=tf.placeholder(tf.float32,shape=[None,2])\n self.sweights=tf.placeholder(tf.bool,shape=[None])\n self.labels=tf.placeholder(tf.int32,shape=[None])\n self.layers = dict({'data':self.data, 'segmentation':self.segmentation, 'sweight':self.sweights, 'labels': self.labels, \"rois\": self.rois})\n self.trainable = trainable\n self.setup()\n\n\n def setup(self):\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3'))\n #=========ROIPOOLING=======\n (self.feed('conv4_3','rois')\n .roi_pool(7, 7, 1.0/16, name='pool_4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool5'))\n\n\n #========= RPN ============\n# (self.feed('conv5_3')\n# .conv(3,3,512,1,1,name='rpn_conv/3x3')\n# .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))#\n\n# (self.feed('rpn_cls_score','gt_boxes','im_info','data')\n# .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))#\n\n# # Loss of rpn_cls & rpn_boxes\n\n# (self.feed('rpn_conv/3x3')\n# .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))\n\n #========= RoI Proposal ============\n# (self.feed('rpn_cls_score')\n# .reshape_layer(2,name = 'rpn_cls_score_reshape')\n# .softmax(name='rpn_cls_prob'))\n#\n# (self.feed('rpn_cls_prob')\n# .reshape_layer(len(anchor_scales)*3*2,name = 'rpn_cls_prob_reshape'))\n#\n# (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')\n# .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))\n#\n# (self.feed('rpn_rois','gt_boxes')\n# .proposal_target_layer(n_classes,name = 'roi-data'))\n\n\n #========= RCNN ============\n (self.feed('pool5')\n .fc(1024, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(1024, name='fc7')\n .dropout(0.5, name='drop7')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n\n # (self.feed('drop7')\n # .fc(n_classes*4, relu=False, name='bbox_pred'))\n\n #==========segment network===\n (self.feed('conv5_3')\n .conv(1,1,512,1 , 1, padding='VALID', name='conv5_4')\n .fc(512, name='fc8')\n .fc(900, relu=False, name='seg_score'))\n\n"
] | [
[
"tensorflow.placeholder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.